##// END OF EJS Templates
merge: introduce mergeresult.addfile() and use it...
Pulkit Goyal -
r45839:b442920a default
parent child Browse files
Show More
@@ -1,1833 +1,1823 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 exthelper,
28 exthelper,
29 filemerge,
29 filemerge,
30 hg,
30 hg,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge,
33 merge,
34 mergestate as mergestatemod,
34 mergestate as mergestatemod,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 subrepo,
39 subrepo,
40 upgrade,
40 upgrade,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from . import (
45 from . import (
46 lfcommands,
46 lfcommands,
47 lfutil,
47 lfutil,
48 storefactory,
48 storefactory,
49 )
49 )
50
50
51 eh = exthelper.exthelper()
51 eh = exthelper.exthelper()
52
52
53 lfstatus = lfutil.lfstatus
53 lfstatus = lfutil.lfstatus
54
54
55 # -- Utility functions: commonly/repeatedly needed functionality ---------------
55 # -- Utility functions: commonly/repeatedly needed functionality ---------------
56
56
57
57
58 def composelargefilematcher(match, manifest):
58 def composelargefilematcher(match, manifest):
59 '''create a matcher that matches only the largefiles in the original
59 '''create a matcher that matches only the largefiles in the original
60 matcher'''
60 matcher'''
61 m = copy.copy(match)
61 m = copy.copy(match)
62 lfile = lambda f: lfutil.standin(f) in manifest
62 lfile = lambda f: lfutil.standin(f) in manifest
63 m._files = [lf for lf in m._files if lfile(lf)]
63 m._files = [lf for lf in m._files if lfile(lf)]
64 m._fileset = set(m._files)
64 m._fileset = set(m._files)
65 m.always = lambda: False
65 m.always = lambda: False
66 origmatchfn = m.matchfn
66 origmatchfn = m.matchfn
67 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
67 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
68 return m
68 return m
69
69
70
70
71 def composenormalfilematcher(match, manifest, exclude=None):
71 def composenormalfilematcher(match, manifest, exclude=None):
72 excluded = set()
72 excluded = set()
73 if exclude is not None:
73 if exclude is not None:
74 excluded.update(exclude)
74 excluded.update(exclude)
75
75
76 m = copy.copy(match)
76 m = copy.copy(match)
77 notlfile = lambda f: not (
77 notlfile = lambda f: not (
78 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
78 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
79 )
79 )
80 m._files = [lf for lf in m._files if notlfile(lf)]
80 m._files = [lf for lf in m._files if notlfile(lf)]
81 m._fileset = set(m._files)
81 m._fileset = set(m._files)
82 m.always = lambda: False
82 m.always = lambda: False
83 origmatchfn = m.matchfn
83 origmatchfn = m.matchfn
84 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
84 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
85 return m
85 return m
86
86
87
87
88 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
88 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
89 large = opts.get('large')
89 large = opts.get('large')
90 lfsize = lfutil.getminsize(
90 lfsize = lfutil.getminsize(
91 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
91 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
92 )
92 )
93
93
94 lfmatcher = None
94 lfmatcher = None
95 if lfutil.islfilesrepo(repo):
95 if lfutil.islfilesrepo(repo):
96 lfpats = ui.configlist(lfutil.longname, b'patterns')
96 lfpats = ui.configlist(lfutil.longname, b'patterns')
97 if lfpats:
97 if lfpats:
98 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
98 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
99
99
100 lfnames = []
100 lfnames = []
101 m = matcher
101 m = matcher
102
102
103 wctx = repo[None]
103 wctx = repo[None]
104 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
104 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
105 exact = m.exact(f)
105 exact = m.exact(f)
106 lfile = lfutil.standin(f) in wctx
106 lfile = lfutil.standin(f) in wctx
107 nfile = f in wctx
107 nfile = f in wctx
108 exists = lfile or nfile
108 exists = lfile or nfile
109
109
110 # Don't warn the user when they attempt to add a normal tracked file.
110 # Don't warn the user when they attempt to add a normal tracked file.
111 # The normal add code will do that for us.
111 # The normal add code will do that for us.
112 if exact and exists:
112 if exact and exists:
113 if lfile:
113 if lfile:
114 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
114 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
115 continue
115 continue
116
116
117 if (exact or not exists) and not lfutil.isstandin(f):
117 if (exact or not exists) and not lfutil.isstandin(f):
118 # In case the file was removed previously, but not committed
118 # In case the file was removed previously, but not committed
119 # (issue3507)
119 # (issue3507)
120 if not repo.wvfs.exists(f):
120 if not repo.wvfs.exists(f):
121 continue
121 continue
122
122
123 abovemin = (
123 abovemin = (
124 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
124 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
125 )
125 )
126 if large or abovemin or (lfmatcher and lfmatcher(f)):
126 if large or abovemin or (lfmatcher and lfmatcher(f)):
127 lfnames.append(f)
127 lfnames.append(f)
128 if ui.verbose or not exact:
128 if ui.verbose or not exact:
129 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
129 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
130
130
131 bad = []
131 bad = []
132
132
133 # Need to lock, otherwise there could be a race condition between
133 # Need to lock, otherwise there could be a race condition between
134 # when standins are created and added to the repo.
134 # when standins are created and added to the repo.
135 with repo.wlock():
135 with repo.wlock():
136 if not opts.get('dry_run'):
136 if not opts.get('dry_run'):
137 standins = []
137 standins = []
138 lfdirstate = lfutil.openlfdirstate(ui, repo)
138 lfdirstate = lfutil.openlfdirstate(ui, repo)
139 for f in lfnames:
139 for f in lfnames:
140 standinname = lfutil.standin(f)
140 standinname = lfutil.standin(f)
141 lfutil.writestandin(
141 lfutil.writestandin(
142 repo,
142 repo,
143 standinname,
143 standinname,
144 hash=b'',
144 hash=b'',
145 executable=lfutil.getexecutable(repo.wjoin(f)),
145 executable=lfutil.getexecutable(repo.wjoin(f)),
146 )
146 )
147 standins.append(standinname)
147 standins.append(standinname)
148 if lfdirstate[f] == b'r':
148 if lfdirstate[f] == b'r':
149 lfdirstate.normallookup(f)
149 lfdirstate.normallookup(f)
150 else:
150 else:
151 lfdirstate.add(f)
151 lfdirstate.add(f)
152 lfdirstate.write()
152 lfdirstate.write()
153 bad += [
153 bad += [
154 lfutil.splitstandin(f)
154 lfutil.splitstandin(f)
155 for f in repo[None].add(standins)
155 for f in repo[None].add(standins)
156 if f in m.files()
156 if f in m.files()
157 ]
157 ]
158
158
159 added = [f for f in lfnames if f not in bad]
159 added = [f for f in lfnames if f not in bad]
160 return added, bad
160 return added, bad
161
161
162
162
163 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
163 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
164 after = opts.get('after')
164 after = opts.get('after')
165 m = composelargefilematcher(matcher, repo[None].manifest())
165 m = composelargefilematcher(matcher, repo[None].manifest())
166 with lfstatus(repo):
166 with lfstatus(repo):
167 s = repo.status(match=m, clean=not isaddremove)
167 s = repo.status(match=m, clean=not isaddremove)
168 manifest = repo[None].manifest()
168 manifest = repo[None].manifest()
169 modified, added, deleted, clean = [
169 modified, added, deleted, clean = [
170 [f for f in list if lfutil.standin(f) in manifest]
170 [f for f in list if lfutil.standin(f) in manifest]
171 for list in (s.modified, s.added, s.deleted, s.clean)
171 for list in (s.modified, s.added, s.deleted, s.clean)
172 ]
172 ]
173
173
174 def warn(files, msg):
174 def warn(files, msg):
175 for f in files:
175 for f in files:
176 ui.warn(msg % uipathfn(f))
176 ui.warn(msg % uipathfn(f))
177 return int(len(files) > 0)
177 return int(len(files) > 0)
178
178
179 if after:
179 if after:
180 remove = deleted
180 remove = deleted
181 result = warn(
181 result = warn(
182 modified + added + clean, _(b'not removing %s: file still exists\n')
182 modified + added + clean, _(b'not removing %s: file still exists\n')
183 )
183 )
184 else:
184 else:
185 remove = deleted + clean
185 remove = deleted + clean
186 result = warn(
186 result = warn(
187 modified,
187 modified,
188 _(
188 _(
189 b'not removing %s: file is modified (use -f'
189 b'not removing %s: file is modified (use -f'
190 b' to force removal)\n'
190 b' to force removal)\n'
191 ),
191 ),
192 )
192 )
193 result = (
193 result = (
194 warn(
194 warn(
195 added,
195 added,
196 _(
196 _(
197 b'not removing %s: file has been marked for add'
197 b'not removing %s: file has been marked for add'
198 b' (use forget to undo)\n'
198 b' (use forget to undo)\n'
199 ),
199 ),
200 )
200 )
201 or result
201 or result
202 )
202 )
203
203
204 # Need to lock because standin files are deleted then removed from the
204 # Need to lock because standin files are deleted then removed from the
205 # repository and we could race in-between.
205 # repository and we could race in-between.
206 with repo.wlock():
206 with repo.wlock():
207 lfdirstate = lfutil.openlfdirstate(ui, repo)
207 lfdirstate = lfutil.openlfdirstate(ui, repo)
208 for f in sorted(remove):
208 for f in sorted(remove):
209 if ui.verbose or not m.exact(f):
209 if ui.verbose or not m.exact(f):
210 ui.status(_(b'removing %s\n') % uipathfn(f))
210 ui.status(_(b'removing %s\n') % uipathfn(f))
211
211
212 if not dryrun:
212 if not dryrun:
213 if not after:
213 if not after:
214 repo.wvfs.unlinkpath(f, ignoremissing=True)
214 repo.wvfs.unlinkpath(f, ignoremissing=True)
215
215
216 if dryrun:
216 if dryrun:
217 return result
217 return result
218
218
219 remove = [lfutil.standin(f) for f in remove]
219 remove = [lfutil.standin(f) for f in remove]
220 # If this is being called by addremove, let the original addremove
220 # If this is being called by addremove, let the original addremove
221 # function handle this.
221 # function handle this.
222 if not isaddremove:
222 if not isaddremove:
223 for f in remove:
223 for f in remove:
224 repo.wvfs.unlinkpath(f, ignoremissing=True)
224 repo.wvfs.unlinkpath(f, ignoremissing=True)
225 repo[None].forget(remove)
225 repo[None].forget(remove)
226
226
227 for f in remove:
227 for f in remove:
228 lfutil.synclfdirstate(
228 lfutil.synclfdirstate(
229 repo, lfdirstate, lfutil.splitstandin(f), False
229 repo, lfdirstate, lfutil.splitstandin(f), False
230 )
230 )
231
231
232 lfdirstate.write()
232 lfdirstate.write()
233
233
234 return result
234 return result
235
235
236
236
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 # appear at their right place in the manifests.
238 # appear at their right place in the manifests.
239 @eh.wrapfunction(webcommands, b'decodepath')
239 @eh.wrapfunction(webcommands, b'decodepath')
240 def decodepath(orig, path):
240 def decodepath(orig, path):
241 return lfutil.splitstandin(path) or path
241 return lfutil.splitstandin(path) or path
242
242
243
243
244 # -- Wrappers: modify existing commands --------------------------------
244 # -- Wrappers: modify existing commands --------------------------------
245
245
246
246
247 @eh.wrapcommand(
247 @eh.wrapcommand(
248 b'add',
248 b'add',
249 opts=[
249 opts=[
250 (b'', b'large', None, _(b'add as largefile')),
250 (b'', b'large', None, _(b'add as largefile')),
251 (b'', b'normal', None, _(b'add as normal file')),
251 (b'', b'normal', None, _(b'add as normal file')),
252 (
252 (
253 b'',
253 b'',
254 b'lfsize',
254 b'lfsize',
255 b'',
255 b'',
256 _(
256 _(
257 b'add all files above this size (in megabytes) '
257 b'add all files above this size (in megabytes) '
258 b'as largefiles (default: 10)'
258 b'as largefiles (default: 10)'
259 ),
259 ),
260 ),
260 ),
261 ],
261 ],
262 )
262 )
263 def overrideadd(orig, ui, repo, *pats, **opts):
263 def overrideadd(orig, ui, repo, *pats, **opts):
264 if opts.get('normal') and opts.get('large'):
264 if opts.get('normal') and opts.get('large'):
265 raise error.Abort(_(b'--normal cannot be used with --large'))
265 raise error.Abort(_(b'--normal cannot be used with --large'))
266 return orig(ui, repo, *pats, **opts)
266 return orig(ui, repo, *pats, **opts)
267
267
268
268
269 @eh.wrapfunction(cmdutil, b'add')
269 @eh.wrapfunction(cmdutil, b'add')
270 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
270 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
271 # The --normal flag short circuits this override
271 # The --normal flag short circuits this override
272 if opts.get('normal'):
272 if opts.get('normal'):
273 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
273 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
274
274
275 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
275 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
276 normalmatcher = composenormalfilematcher(
276 normalmatcher = composenormalfilematcher(
277 matcher, repo[None].manifest(), ladded
277 matcher, repo[None].manifest(), ladded
278 )
278 )
279 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
279 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
280
280
281 bad.extend(f for f in lbad)
281 bad.extend(f for f in lbad)
282 return bad
282 return bad
283
283
284
284
285 @eh.wrapfunction(cmdutil, b'remove')
285 @eh.wrapfunction(cmdutil, b'remove')
286 def cmdutilremove(
286 def cmdutilremove(
287 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
287 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
288 ):
288 ):
289 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
289 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
290 result = orig(
290 result = orig(
291 ui,
291 ui,
292 repo,
292 repo,
293 normalmatcher,
293 normalmatcher,
294 prefix,
294 prefix,
295 uipathfn,
295 uipathfn,
296 after,
296 after,
297 force,
297 force,
298 subrepos,
298 subrepos,
299 dryrun,
299 dryrun,
300 )
300 )
301 return (
301 return (
302 removelargefiles(
302 removelargefiles(
303 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
303 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
304 )
304 )
305 or result
305 or result
306 )
306 )
307
307
308
308
309 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
309 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
310 def overridestatusfn(orig, repo, rev2, **opts):
310 def overridestatusfn(orig, repo, rev2, **opts):
311 with lfstatus(repo._repo):
311 with lfstatus(repo._repo):
312 return orig(repo, rev2, **opts)
312 return orig(repo, rev2, **opts)
313
313
314
314
315 @eh.wrapcommand(b'status')
315 @eh.wrapcommand(b'status')
316 def overridestatus(orig, ui, repo, *pats, **opts):
316 def overridestatus(orig, ui, repo, *pats, **opts):
317 with lfstatus(repo):
317 with lfstatus(repo):
318 return orig(ui, repo, *pats, **opts)
318 return orig(ui, repo, *pats, **opts)
319
319
320
320
321 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
321 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
322 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
322 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
323 with lfstatus(repo._repo):
323 with lfstatus(repo._repo):
324 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
324 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
325
325
326
326
327 @eh.wrapcommand(b'log')
327 @eh.wrapcommand(b'log')
328 def overridelog(orig, ui, repo, *pats, **opts):
328 def overridelog(orig, ui, repo, *pats, **opts):
329 def overridematchandpats(
329 def overridematchandpats(
330 orig,
330 orig,
331 ctx,
331 ctx,
332 pats=(),
332 pats=(),
333 opts=None,
333 opts=None,
334 globbed=False,
334 globbed=False,
335 default=b'relpath',
335 default=b'relpath',
336 badfn=None,
336 badfn=None,
337 ):
337 ):
338 """Matcher that merges root directory with .hglf, suitable for log.
338 """Matcher that merges root directory with .hglf, suitable for log.
339 It is still possible to match .hglf directly.
339 It is still possible to match .hglf directly.
340 For any listed files run log on the standin too.
340 For any listed files run log on the standin too.
341 matchfn tries both the given filename and with .hglf stripped.
341 matchfn tries both the given filename and with .hglf stripped.
342 """
342 """
343 if opts is None:
343 if opts is None:
344 opts = {}
344 opts = {}
345 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
345 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
346 m, p = copy.copy(matchandpats)
346 m, p = copy.copy(matchandpats)
347
347
348 if m.always():
348 if m.always():
349 # We want to match everything anyway, so there's no benefit trying
349 # We want to match everything anyway, so there's no benefit trying
350 # to add standins.
350 # to add standins.
351 return matchandpats
351 return matchandpats
352
352
353 pats = set(p)
353 pats = set(p)
354
354
355 def fixpats(pat, tostandin=lfutil.standin):
355 def fixpats(pat, tostandin=lfutil.standin):
356 if pat.startswith(b'set:'):
356 if pat.startswith(b'set:'):
357 return pat
357 return pat
358
358
359 kindpat = matchmod._patsplit(pat, None)
359 kindpat = matchmod._patsplit(pat, None)
360
360
361 if kindpat[0] is not None:
361 if kindpat[0] is not None:
362 return kindpat[0] + b':' + tostandin(kindpat[1])
362 return kindpat[0] + b':' + tostandin(kindpat[1])
363 return tostandin(kindpat[1])
363 return tostandin(kindpat[1])
364
364
365 cwd = repo.getcwd()
365 cwd = repo.getcwd()
366 if cwd:
366 if cwd:
367 hglf = lfutil.shortname
367 hglf = lfutil.shortname
368 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
368 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
369
369
370 def tostandin(f):
370 def tostandin(f):
371 # The file may already be a standin, so truncate the back
371 # The file may already be a standin, so truncate the back
372 # prefix and test before mangling it. This avoids turning
372 # prefix and test before mangling it. This avoids turning
373 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
373 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
374 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
374 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
375 return f
375 return f
376
376
377 # An absolute path is from outside the repo, so truncate the
377 # An absolute path is from outside the repo, so truncate the
378 # path to the root before building the standin. Otherwise cwd
378 # path to the root before building the standin. Otherwise cwd
379 # is somewhere in the repo, relative to root, and needs to be
379 # is somewhere in the repo, relative to root, and needs to be
380 # prepended before building the standin.
380 # prepended before building the standin.
381 if os.path.isabs(cwd):
381 if os.path.isabs(cwd):
382 f = f[len(back) :]
382 f = f[len(back) :]
383 else:
383 else:
384 f = cwd + b'/' + f
384 f = cwd + b'/' + f
385 return back + lfutil.standin(f)
385 return back + lfutil.standin(f)
386
386
387 else:
387 else:
388
388
389 def tostandin(f):
389 def tostandin(f):
390 if lfutil.isstandin(f):
390 if lfutil.isstandin(f):
391 return f
391 return f
392 return lfutil.standin(f)
392 return lfutil.standin(f)
393
393
394 pats.update(fixpats(f, tostandin) for f in p)
394 pats.update(fixpats(f, tostandin) for f in p)
395
395
396 for i in range(0, len(m._files)):
396 for i in range(0, len(m._files)):
397 # Don't add '.hglf' to m.files, since that is already covered by '.'
397 # Don't add '.hglf' to m.files, since that is already covered by '.'
398 if m._files[i] == b'.':
398 if m._files[i] == b'.':
399 continue
399 continue
400 standin = lfutil.standin(m._files[i])
400 standin = lfutil.standin(m._files[i])
401 # If the "standin" is a directory, append instead of replace to
401 # If the "standin" is a directory, append instead of replace to
402 # support naming a directory on the command line with only
402 # support naming a directory on the command line with only
403 # largefiles. The original directory is kept to support normal
403 # largefiles. The original directory is kept to support normal
404 # files.
404 # files.
405 if standin in ctx:
405 if standin in ctx:
406 m._files[i] = standin
406 m._files[i] = standin
407 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
407 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
408 m._files.append(standin)
408 m._files.append(standin)
409
409
410 m._fileset = set(m._files)
410 m._fileset = set(m._files)
411 m.always = lambda: False
411 m.always = lambda: False
412 origmatchfn = m.matchfn
412 origmatchfn = m.matchfn
413
413
414 def lfmatchfn(f):
414 def lfmatchfn(f):
415 lf = lfutil.splitstandin(f)
415 lf = lfutil.splitstandin(f)
416 if lf is not None and origmatchfn(lf):
416 if lf is not None and origmatchfn(lf):
417 return True
417 return True
418 r = origmatchfn(f)
418 r = origmatchfn(f)
419 return r
419 return r
420
420
421 m.matchfn = lfmatchfn
421 m.matchfn = lfmatchfn
422
422
423 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
423 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
424 return m, pats
424 return m, pats
425
425
426 # For hg log --patch, the match object is used in two different senses:
426 # For hg log --patch, the match object is used in two different senses:
427 # (1) to determine what revisions should be printed out, and
427 # (1) to determine what revisions should be printed out, and
428 # (2) to determine what files to print out diffs for.
428 # (2) to determine what files to print out diffs for.
429 # The magic matchandpats override should be used for case (1) but not for
429 # The magic matchandpats override should be used for case (1) but not for
430 # case (2).
430 # case (2).
431 oldmatchandpats = scmutil.matchandpats
431 oldmatchandpats = scmutil.matchandpats
432
432
433 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
433 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
434 wctx = repo[None]
434 wctx = repo[None]
435 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
435 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
436 return lambda ctx: match
436 return lambda ctx: match
437
437
438 wrappedmatchandpats = extensions.wrappedfunction(
438 wrappedmatchandpats = extensions.wrappedfunction(
439 scmutil, b'matchandpats', overridematchandpats
439 scmutil, b'matchandpats', overridematchandpats
440 )
440 )
441 wrappedmakefilematcher = extensions.wrappedfunction(
441 wrappedmakefilematcher = extensions.wrappedfunction(
442 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
442 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
443 )
443 )
444 with wrappedmatchandpats, wrappedmakefilematcher:
444 with wrappedmatchandpats, wrappedmakefilematcher:
445 return orig(ui, repo, *pats, **opts)
445 return orig(ui, repo, *pats, **opts)
446
446
447
447
448 @eh.wrapcommand(
448 @eh.wrapcommand(
449 b'verify',
449 b'verify',
450 opts=[
450 opts=[
451 (
451 (
452 b'',
452 b'',
453 b'large',
453 b'large',
454 None,
454 None,
455 _(b'verify that all largefiles in current revision exists'),
455 _(b'verify that all largefiles in current revision exists'),
456 ),
456 ),
457 (
457 (
458 b'',
458 b'',
459 b'lfa',
459 b'lfa',
460 None,
460 None,
461 _(b'verify largefiles in all revisions, not just current'),
461 _(b'verify largefiles in all revisions, not just current'),
462 ),
462 ),
463 (
463 (
464 b'',
464 b'',
465 b'lfc',
465 b'lfc',
466 None,
466 None,
467 _(b'verify local largefile contents, not just existence'),
467 _(b'verify local largefile contents, not just existence'),
468 ),
468 ),
469 ],
469 ],
470 )
470 )
471 def overrideverify(orig, ui, repo, *pats, **opts):
471 def overrideverify(orig, ui, repo, *pats, **opts):
472 large = opts.pop('large', False)
472 large = opts.pop('large', False)
473 all = opts.pop('lfa', False)
473 all = opts.pop('lfa', False)
474 contents = opts.pop('lfc', False)
474 contents = opts.pop('lfc', False)
475
475
476 result = orig(ui, repo, *pats, **opts)
476 result = orig(ui, repo, *pats, **opts)
477 if large or all or contents:
477 if large or all or contents:
478 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
478 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
479 return result
479 return result
480
480
481
481
482 @eh.wrapcommand(
482 @eh.wrapcommand(
483 b'debugstate',
483 b'debugstate',
484 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
484 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
485 )
485 )
486 def overridedebugstate(orig, ui, repo, *pats, **opts):
486 def overridedebugstate(orig, ui, repo, *pats, **opts):
487 large = opts.pop('large', False)
487 large = opts.pop('large', False)
488 if large:
488 if large:
489
489
490 class fakerepo(object):
490 class fakerepo(object):
491 dirstate = lfutil.openlfdirstate(ui, repo)
491 dirstate = lfutil.openlfdirstate(ui, repo)
492
492
493 orig(ui, fakerepo, *pats, **opts)
493 orig(ui, fakerepo, *pats, **opts)
494 else:
494 else:
495 orig(ui, repo, *pats, **opts)
495 orig(ui, repo, *pats, **opts)
496
496
497
497
498 # Before starting the manifest merge, merge.updates will call
498 # Before starting the manifest merge, merge.updates will call
499 # _checkunknownfile to check if there are any files in the merged-in
499 # _checkunknownfile to check if there are any files in the merged-in
500 # changeset that collide with unknown files in the working copy.
500 # changeset that collide with unknown files in the working copy.
501 #
501 #
502 # The largefiles are seen as unknown, so this prevents us from merging
502 # The largefiles are seen as unknown, so this prevents us from merging
503 # in a file 'foo' if we already have a largefile with the same name.
503 # in a file 'foo' if we already have a largefile with the same name.
504 #
504 #
505 # The overridden function filters the unknown files by removing any
505 # The overridden function filters the unknown files by removing any
506 # largefiles. This makes the merge proceed and we can then handle this
506 # largefiles. This makes the merge proceed and we can then handle this
507 # case further in the overridden calculateupdates function below.
507 # case further in the overridden calculateupdates function below.
508 @eh.wrapfunction(merge, b'_checkunknownfile')
508 @eh.wrapfunction(merge, b'_checkunknownfile')
509 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
509 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
510 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
510 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
511 return False
511 return False
512 return origfn(repo, wctx, mctx, f, f2)
512 return origfn(repo, wctx, mctx, f, f2)
513
513
514
514
515 # The manifest merge handles conflicts on the manifest level. We want
515 # The manifest merge handles conflicts on the manifest level. We want
516 # to handle changes in largefile-ness of files at this level too.
516 # to handle changes in largefile-ness of files at this level too.
517 #
517 #
518 # The strategy is to run the original calculateupdates and then process
518 # The strategy is to run the original calculateupdates and then process
519 # the action list it outputs. There are two cases we need to deal with:
519 # the action list it outputs. There are two cases we need to deal with:
520 #
520 #
521 # 1. Normal file in p1, largefile in p2. Here the largefile is
521 # 1. Normal file in p1, largefile in p2. Here the largefile is
522 # detected via its standin file, which will enter the working copy
522 # detected via its standin file, which will enter the working copy
523 # with a "get" action. It is not "merge" since the standin is all
523 # with a "get" action. It is not "merge" since the standin is all
524 # Mercurial is concerned with at this level -- the link to the
524 # Mercurial is concerned with at this level -- the link to the
525 # existing normal file is not relevant here.
525 # existing normal file is not relevant here.
526 #
526 #
527 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
527 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
528 # since the largefile will be present in the working copy and
528 # since the largefile will be present in the working copy and
529 # different from the normal file in p2. Mercurial therefore
529 # different from the normal file in p2. Mercurial therefore
530 # triggers a merge action.
530 # triggers a merge action.
531 #
531 #
532 # In both cases, we prompt the user and emit new actions to either
532 # In both cases, we prompt the user and emit new actions to either
533 # remove the standin (if the normal file was kept) or to remove the
533 # remove the standin (if the normal file was kept) or to remove the
534 # normal file and get the standin (if the largefile was kept). The
534 # normal file and get the standin (if the largefile was kept). The
535 # default prompt answer is to use the largefile version since it was
535 # default prompt answer is to use the largefile version since it was
536 # presumably changed on purpose.
536 # presumably changed on purpose.
537 #
537 #
538 # Finally, the merge.applyupdates function will then take care of
538 # Finally, the merge.applyupdates function will then take care of
539 # writing the files into the working copy and lfcommands.updatelfiles
539 # writing the files into the working copy and lfcommands.updatelfiles
540 # will update the largefiles.
540 # will update the largefiles.
541 @eh.wrapfunction(merge, b'calculateupdates')
541 @eh.wrapfunction(merge, b'calculateupdates')
542 def overridecalculateupdates(
542 def overridecalculateupdates(
543 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
543 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
544 ):
544 ):
545 overwrite = force and not branchmerge
545 overwrite = force and not branchmerge
546 mresult = origfn(
546 mresult = origfn(
547 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
547 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
548 )
548 )
549
549
550 if overwrite:
550 if overwrite:
551 return mresult
551 return mresult
552
552
553 # Convert to dictionary with filename as key and action as value.
553 # Convert to dictionary with filename as key and action as value.
554 lfiles = set()
554 lfiles = set()
555 for f in mresult.actions:
555 for f in mresult.actions:
556 splitstandin = lfutil.splitstandin(f)
556 splitstandin = lfutil.splitstandin(f)
557 if splitstandin is not None and splitstandin in p1:
557 if splitstandin is not None and splitstandin in p1:
558 lfiles.add(splitstandin)
558 lfiles.add(splitstandin)
559 elif lfutil.standin(f) in p1:
559 elif lfutil.standin(f) in p1:
560 lfiles.add(f)
560 lfiles.add(f)
561
561
562 for lfile in sorted(lfiles):
562 for lfile in sorted(lfiles):
563 standin = lfutil.standin(lfile)
563 standin = lfutil.standin(lfile)
564 (lm, largs, lmsg) = mresult.actions.get(lfile, (None, None, None))
564 (lm, largs, lmsg) = mresult.actions.get(lfile, (None, None, None))
565 (sm, sargs, smsg) = mresult.actions.get(standin, (None, None, None))
565 (sm, sargs, smsg) = mresult.actions.get(standin, (None, None, None))
566 if sm in (b'g', b'dc') and lm != b'r':
566 if sm in (b'g', b'dc') and lm != b'r':
567 if sm == b'dc':
567 if sm == b'dc':
568 f1, f2, fa, move, anc = sargs
568 f1, f2, fa, move, anc = sargs
569 sargs = (p2[f2].flags(), False)
569 sargs = (p2[f2].flags(), False)
570 # Case 1: normal file in the working copy, largefile in
570 # Case 1: normal file in the working copy, largefile in
571 # the second parent
571 # the second parent
572 usermsg = (
572 usermsg = (
573 _(
573 _(
574 b'remote turned local normal file %s into a largefile\n'
574 b'remote turned local normal file %s into a largefile\n'
575 b'use (l)argefile or keep (n)ormal file?'
575 b'use (l)argefile or keep (n)ormal file?'
576 b'$$ &Largefile $$ &Normal file'
576 b'$$ &Largefile $$ &Normal file'
577 )
577 )
578 % lfile
578 % lfile
579 )
579 )
580 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
580 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
581 mresult.actions[lfile] = (b'r', None, b'replaced by standin')
581 mresult.addfile(lfile, b'r', None, b'replaced by standin')
582 mresult.actions[standin] = (b'g', sargs, b'replaces standin')
582 mresult.addfile(standin, b'g', sargs, b'replaces standin')
583 else: # keep local normal file
583 else: # keep local normal file
584 mresult.actions[lfile] = (b'k', None, b'replaces standin')
584 mresult.addfile(lfile, b'k', None, b'replaces standin')
585 if branchmerge:
585 if branchmerge:
586 mresult.actions[standin] = (
586 mresult.addfile(
587 b'k',
587 standin, b'k', None, b'replaced by non-standin',
588 None,
589 b'replaced by non-standin',
590 )
588 )
591 else:
589 else:
592 mresult.actions[standin] = (
590 mresult.addfile(
593 b'r',
591 standin, b'r', None, b'replaced by non-standin',
594 None,
595 b'replaced by non-standin',
596 )
592 )
597 elif lm in (b'g', b'dc') and sm != b'r':
593 elif lm in (b'g', b'dc') and sm != b'r':
598 if lm == b'dc':
594 if lm == b'dc':
599 f1, f2, fa, move, anc = largs
595 f1, f2, fa, move, anc = largs
600 largs = (p2[f2].flags(), False)
596 largs = (p2[f2].flags(), False)
601 # Case 2: largefile in the working copy, normal file in
597 # Case 2: largefile in the working copy, normal file in
602 # the second parent
598 # the second parent
603 usermsg = (
599 usermsg = (
604 _(
600 _(
605 b'remote turned local largefile %s into a normal file\n'
601 b'remote turned local largefile %s into a normal file\n'
606 b'keep (l)argefile or use (n)ormal file?'
602 b'keep (l)argefile or use (n)ormal file?'
607 b'$$ &Largefile $$ &Normal file'
603 b'$$ &Largefile $$ &Normal file'
608 )
604 )
609 % lfile
605 % lfile
610 )
606 )
611 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
607 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
612 if branchmerge:
608 if branchmerge:
613 # largefile can be restored from standin safely
609 # largefile can be restored from standin safely
614 mresult.actions[lfile] = (
610 mresult.addfile(
615 b'k',
611 lfile, b'k', None, b'replaced by standin',
616 None,
617 b'replaced by standin',
618 )
612 )
619 mresult.actions[standin] = (b'k', None, b'replaces standin')
613 mresult.addfile(standin, b'k', None, b'replaces standin')
620 else:
614 else:
621 # "lfile" should be marked as "removed" without
615 # "lfile" should be marked as "removed" without
622 # removal of itself
616 # removal of itself
623 mresult.actions[lfile] = (
617 mresult.addfile(
624 b'lfmr',
618 lfile, b'lfmr', None, b'forget non-standin largefile',
625 None,
626 b'forget non-standin largefile',
627 )
619 )
628
620
629 # linear-merge should treat this largefile as 're-added'
621 # linear-merge should treat this largefile as 're-added'
630 mresult.actions[standin] = (b'a', None, b'keep standin')
622 mresult.addfile(standin, b'a', None, b'keep standin')
631 else: # pick remote normal file
623 else: # pick remote normal file
632 mresult.actions[lfile] = (b'g', largs, b'replaces standin')
624 mresult.addfile(lfile, b'g', largs, b'replaces standin')
633 mresult.actions[standin] = (
625 mresult.addfile(
634 b'r',
626 standin, b'r', None, b'replaced by non-standin',
635 None,
636 b'replaced by non-standin',
637 )
627 )
638
628
639 return mresult
629 return mresult
640
630
641
631
642 @eh.wrapfunction(mergestatemod, b'recordupdates')
632 @eh.wrapfunction(mergestatemod, b'recordupdates')
643 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
633 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
644 if b'lfmr' in actions:
634 if b'lfmr' in actions:
645 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
635 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
646 for lfile, args, msg in actions[b'lfmr']:
636 for lfile, args, msg in actions[b'lfmr']:
647 # this should be executed before 'orig', to execute 'remove'
637 # this should be executed before 'orig', to execute 'remove'
648 # before all other actions
638 # before all other actions
649 repo.dirstate.remove(lfile)
639 repo.dirstate.remove(lfile)
650 # make sure lfile doesn't get synclfdirstate'd as normal
640 # make sure lfile doesn't get synclfdirstate'd as normal
651 lfdirstate.add(lfile)
641 lfdirstate.add(lfile)
652 lfdirstate.write()
642 lfdirstate.write()
653
643
654 return orig(repo, actions, branchmerge, getfiledata)
644 return orig(repo, actions, branchmerge, getfiledata)
655
645
656
646
657 # Override filemerge to prompt the user about how they wish to merge
647 # Override filemerge to prompt the user about how they wish to merge
658 # largefiles. This will handle identical edits without prompting the user.
648 # largefiles. This will handle identical edits without prompting the user.
659 @eh.wrapfunction(filemerge, b'_filemerge')
649 @eh.wrapfunction(filemerge, b'_filemerge')
660 def overridefilemerge(
650 def overridefilemerge(
661 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
651 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
662 ):
652 ):
663 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
653 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
664 return origfn(
654 return origfn(
665 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
655 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
666 )
656 )
667
657
668 ahash = lfutil.readasstandin(fca).lower()
658 ahash = lfutil.readasstandin(fca).lower()
669 dhash = lfutil.readasstandin(fcd).lower()
659 dhash = lfutil.readasstandin(fcd).lower()
670 ohash = lfutil.readasstandin(fco).lower()
660 ohash = lfutil.readasstandin(fco).lower()
671 if (
661 if (
672 ohash != ahash
662 ohash != ahash
673 and ohash != dhash
663 and ohash != dhash
674 and (
664 and (
675 dhash == ahash
665 dhash == ahash
676 or repo.ui.promptchoice(
666 or repo.ui.promptchoice(
677 _(
667 _(
678 b'largefile %s has a merge conflict\nancestor was %s\n'
668 b'largefile %s has a merge conflict\nancestor was %s\n'
679 b'you can keep (l)ocal %s or take (o)ther %s.\n'
669 b'you can keep (l)ocal %s or take (o)ther %s.\n'
680 b'what do you want to do?'
670 b'what do you want to do?'
681 b'$$ &Local $$ &Other'
671 b'$$ &Local $$ &Other'
682 )
672 )
683 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
673 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
684 0,
674 0,
685 )
675 )
686 == 1
676 == 1
687 )
677 )
688 ):
678 ):
689 repo.wwrite(fcd.path(), fco.data(), fco.flags())
679 repo.wwrite(fcd.path(), fco.data(), fco.flags())
690 return True, 0, False
680 return True, 0, False
691
681
692
682
693 @eh.wrapfunction(copiesmod, b'pathcopies')
683 @eh.wrapfunction(copiesmod, b'pathcopies')
694 def copiespathcopies(orig, ctx1, ctx2, match=None):
684 def copiespathcopies(orig, ctx1, ctx2, match=None):
695 copies = orig(ctx1, ctx2, match=match)
685 copies = orig(ctx1, ctx2, match=match)
696 updated = {}
686 updated = {}
697
687
698 for k, v in pycompat.iteritems(copies):
688 for k, v in pycompat.iteritems(copies):
699 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
689 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
700
690
701 return updated
691 return updated
702
692
703
693
704 # Copy first changes the matchers to match standins instead of
694 # Copy first changes the matchers to match standins instead of
705 # largefiles. Then it overrides util.copyfile in that function it
695 # largefiles. Then it overrides util.copyfile in that function it
706 # checks if the destination largefile already exists. It also keeps a
696 # checks if the destination largefile already exists. It also keeps a
707 # list of copied files so that the largefiles can be copied and the
697 # list of copied files so that the largefiles can be copied and the
708 # dirstate updated.
698 # dirstate updated.
709 @eh.wrapfunction(cmdutil, b'copy')
699 @eh.wrapfunction(cmdutil, b'copy')
710 def overridecopy(orig, ui, repo, pats, opts, rename=False):
700 def overridecopy(orig, ui, repo, pats, opts, rename=False):
711 # doesn't remove largefile on rename
701 # doesn't remove largefile on rename
712 if len(pats) < 2:
702 if len(pats) < 2:
713 # this isn't legal, let the original function deal with it
703 # this isn't legal, let the original function deal with it
714 return orig(ui, repo, pats, opts, rename)
704 return orig(ui, repo, pats, opts, rename)
715
705
716 # This could copy both lfiles and normal files in one command,
706 # This could copy both lfiles and normal files in one command,
717 # but we don't want to do that. First replace their matcher to
707 # but we don't want to do that. First replace their matcher to
718 # only match normal files and run it, then replace it to just
708 # only match normal files and run it, then replace it to just
719 # match largefiles and run it again.
709 # match largefiles and run it again.
720 nonormalfiles = False
710 nonormalfiles = False
721 nolfiles = False
711 nolfiles = False
722 manifest = repo[None].manifest()
712 manifest = repo[None].manifest()
723
713
724 def normalfilesmatchfn(
714 def normalfilesmatchfn(
725 orig,
715 orig,
726 ctx,
716 ctx,
727 pats=(),
717 pats=(),
728 opts=None,
718 opts=None,
729 globbed=False,
719 globbed=False,
730 default=b'relpath',
720 default=b'relpath',
731 badfn=None,
721 badfn=None,
732 ):
722 ):
733 if opts is None:
723 if opts is None:
734 opts = {}
724 opts = {}
735 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
725 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
736 return composenormalfilematcher(match, manifest)
726 return composenormalfilematcher(match, manifest)
737
727
738 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
728 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
739 try:
729 try:
740 result = orig(ui, repo, pats, opts, rename)
730 result = orig(ui, repo, pats, opts, rename)
741 except error.Abort as e:
731 except error.Abort as e:
742 if pycompat.bytestr(e) != _(b'no files to copy'):
732 if pycompat.bytestr(e) != _(b'no files to copy'):
743 raise e
733 raise e
744 else:
734 else:
745 nonormalfiles = True
735 nonormalfiles = True
746 result = 0
736 result = 0
747
737
748 # The first rename can cause our current working directory to be removed.
738 # The first rename can cause our current working directory to be removed.
749 # In that case there is nothing left to copy/rename so just quit.
739 # In that case there is nothing left to copy/rename so just quit.
750 try:
740 try:
751 repo.getcwd()
741 repo.getcwd()
752 except OSError:
742 except OSError:
753 return result
743 return result
754
744
755 def makestandin(relpath):
745 def makestandin(relpath):
756 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
746 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
757 return repo.wvfs.join(lfutil.standin(path))
747 return repo.wvfs.join(lfutil.standin(path))
758
748
759 fullpats = scmutil.expandpats(pats)
749 fullpats = scmutil.expandpats(pats)
760 dest = fullpats[-1]
750 dest = fullpats[-1]
761
751
762 if os.path.isdir(dest):
752 if os.path.isdir(dest):
763 if not os.path.isdir(makestandin(dest)):
753 if not os.path.isdir(makestandin(dest)):
764 os.makedirs(makestandin(dest))
754 os.makedirs(makestandin(dest))
765
755
766 try:
756 try:
767 # When we call orig below it creates the standins but we don't add
757 # When we call orig below it creates the standins but we don't add
768 # them to the dir state until later so lock during that time.
758 # them to the dir state until later so lock during that time.
769 wlock = repo.wlock()
759 wlock = repo.wlock()
770
760
771 manifest = repo[None].manifest()
761 manifest = repo[None].manifest()
772
762
773 def overridematch(
763 def overridematch(
774 orig,
764 orig,
775 ctx,
765 ctx,
776 pats=(),
766 pats=(),
777 opts=None,
767 opts=None,
778 globbed=False,
768 globbed=False,
779 default=b'relpath',
769 default=b'relpath',
780 badfn=None,
770 badfn=None,
781 ):
771 ):
782 if opts is None:
772 if opts is None:
783 opts = {}
773 opts = {}
784 newpats = []
774 newpats = []
785 # The patterns were previously mangled to add the standin
775 # The patterns were previously mangled to add the standin
786 # directory; we need to remove that now
776 # directory; we need to remove that now
787 for pat in pats:
777 for pat in pats:
788 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
778 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
789 newpats.append(pat.replace(lfutil.shortname, b''))
779 newpats.append(pat.replace(lfutil.shortname, b''))
790 else:
780 else:
791 newpats.append(pat)
781 newpats.append(pat)
792 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
782 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
793 m = copy.copy(match)
783 m = copy.copy(match)
794 lfile = lambda f: lfutil.standin(f) in manifest
784 lfile = lambda f: lfutil.standin(f) in manifest
795 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
785 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
796 m._fileset = set(m._files)
786 m._fileset = set(m._files)
797 origmatchfn = m.matchfn
787 origmatchfn = m.matchfn
798
788
799 def matchfn(f):
789 def matchfn(f):
800 lfile = lfutil.splitstandin(f)
790 lfile = lfutil.splitstandin(f)
801 return (
791 return (
802 lfile is not None
792 lfile is not None
803 and (f in manifest)
793 and (f in manifest)
804 and origmatchfn(lfile)
794 and origmatchfn(lfile)
805 or None
795 or None
806 )
796 )
807
797
808 m.matchfn = matchfn
798 m.matchfn = matchfn
809 return m
799 return m
810
800
811 listpats = []
801 listpats = []
812 for pat in pats:
802 for pat in pats:
813 if matchmod.patkind(pat) is not None:
803 if matchmod.patkind(pat) is not None:
814 listpats.append(pat)
804 listpats.append(pat)
815 else:
805 else:
816 listpats.append(makestandin(pat))
806 listpats.append(makestandin(pat))
817
807
818 copiedfiles = []
808 copiedfiles = []
819
809
820 def overridecopyfile(orig, src, dest, *args, **kwargs):
810 def overridecopyfile(orig, src, dest, *args, **kwargs):
821 if lfutil.shortname in src and dest.startswith(
811 if lfutil.shortname in src and dest.startswith(
822 repo.wjoin(lfutil.shortname)
812 repo.wjoin(lfutil.shortname)
823 ):
813 ):
824 destlfile = dest.replace(lfutil.shortname, b'')
814 destlfile = dest.replace(lfutil.shortname, b'')
825 if not opts[b'force'] and os.path.exists(destlfile):
815 if not opts[b'force'] and os.path.exists(destlfile):
826 raise IOError(
816 raise IOError(
827 b'', _(b'destination largefile already exists')
817 b'', _(b'destination largefile already exists')
828 )
818 )
829 copiedfiles.append((src, dest))
819 copiedfiles.append((src, dest))
830 orig(src, dest, *args, **kwargs)
820 orig(src, dest, *args, **kwargs)
831
821
832 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
822 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
833 with extensions.wrappedfunction(scmutil, b'match', overridematch):
823 with extensions.wrappedfunction(scmutil, b'match', overridematch):
834 result += orig(ui, repo, listpats, opts, rename)
824 result += orig(ui, repo, listpats, opts, rename)
835
825
836 lfdirstate = lfutil.openlfdirstate(ui, repo)
826 lfdirstate = lfutil.openlfdirstate(ui, repo)
837 for (src, dest) in copiedfiles:
827 for (src, dest) in copiedfiles:
838 if lfutil.shortname in src and dest.startswith(
828 if lfutil.shortname in src and dest.startswith(
839 repo.wjoin(lfutil.shortname)
829 repo.wjoin(lfutil.shortname)
840 ):
830 ):
841 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
831 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
842 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
832 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
843 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
833 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
844 if not os.path.isdir(destlfiledir):
834 if not os.path.isdir(destlfiledir):
845 os.makedirs(destlfiledir)
835 os.makedirs(destlfiledir)
846 if rename:
836 if rename:
847 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
837 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
848
838
849 # The file is gone, but this deletes any empty parent
839 # The file is gone, but this deletes any empty parent
850 # directories as a side-effect.
840 # directories as a side-effect.
851 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
841 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
852 lfdirstate.remove(srclfile)
842 lfdirstate.remove(srclfile)
853 else:
843 else:
854 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
844 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
855
845
856 lfdirstate.add(destlfile)
846 lfdirstate.add(destlfile)
857 lfdirstate.write()
847 lfdirstate.write()
858 except error.Abort as e:
848 except error.Abort as e:
859 if pycompat.bytestr(e) != _(b'no files to copy'):
849 if pycompat.bytestr(e) != _(b'no files to copy'):
860 raise e
850 raise e
861 else:
851 else:
862 nolfiles = True
852 nolfiles = True
863 finally:
853 finally:
864 wlock.release()
854 wlock.release()
865
855
866 if nolfiles and nonormalfiles:
856 if nolfiles and nonormalfiles:
867 raise error.Abort(_(b'no files to copy'))
857 raise error.Abort(_(b'no files to copy'))
868
858
869 return result
859 return result
870
860
871
861
872 # When the user calls revert, we have to be careful to not revert any
862 # When the user calls revert, we have to be careful to not revert any
873 # changes to other largefiles accidentally. This means we have to keep
863 # changes to other largefiles accidentally. This means we have to keep
874 # track of the largefiles that are being reverted so we only pull down
864 # track of the largefiles that are being reverted so we only pull down
875 # the necessary largefiles.
865 # the necessary largefiles.
876 #
866 #
877 # Standins are only updated (to match the hash of largefiles) before
867 # Standins are only updated (to match the hash of largefiles) before
878 # commits. Update the standins then run the original revert, changing
868 # commits. Update the standins then run the original revert, changing
879 # the matcher to hit standins instead of largefiles. Based on the
869 # the matcher to hit standins instead of largefiles. Based on the
880 # resulting standins update the largefiles.
870 # resulting standins update the largefiles.
881 @eh.wrapfunction(cmdutil, b'revert')
871 @eh.wrapfunction(cmdutil, b'revert')
882 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
872 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
883 # Because we put the standins in a bad state (by updating them)
873 # Because we put the standins in a bad state (by updating them)
884 # and then return them to a correct state we need to lock to
874 # and then return them to a correct state we need to lock to
885 # prevent others from changing them in their incorrect state.
875 # prevent others from changing them in their incorrect state.
886 with repo.wlock():
876 with repo.wlock():
887 lfdirstate = lfutil.openlfdirstate(ui, repo)
877 lfdirstate = lfutil.openlfdirstate(ui, repo)
888 s = lfutil.lfdirstatestatus(lfdirstate, repo)
878 s = lfutil.lfdirstatestatus(lfdirstate, repo)
889 lfdirstate.write()
879 lfdirstate.write()
890 for lfile in s.modified:
880 for lfile in s.modified:
891 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
881 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
892 for lfile in s.deleted:
882 for lfile in s.deleted:
893 fstandin = lfutil.standin(lfile)
883 fstandin = lfutil.standin(lfile)
894 if repo.wvfs.exists(fstandin):
884 if repo.wvfs.exists(fstandin):
895 repo.wvfs.unlink(fstandin)
885 repo.wvfs.unlink(fstandin)
896
886
897 oldstandins = lfutil.getstandinsstate(repo)
887 oldstandins = lfutil.getstandinsstate(repo)
898
888
899 def overridematch(
889 def overridematch(
900 orig,
890 orig,
901 mctx,
891 mctx,
902 pats=(),
892 pats=(),
903 opts=None,
893 opts=None,
904 globbed=False,
894 globbed=False,
905 default=b'relpath',
895 default=b'relpath',
906 badfn=None,
896 badfn=None,
907 ):
897 ):
908 if opts is None:
898 if opts is None:
909 opts = {}
899 opts = {}
910 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
900 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
911 m = copy.copy(match)
901 m = copy.copy(match)
912
902
913 # revert supports recursing into subrepos, and though largefiles
903 # revert supports recursing into subrepos, and though largefiles
914 # currently doesn't work correctly in that case, this match is
904 # currently doesn't work correctly in that case, this match is
915 # called, so the lfdirstate above may not be the correct one for
905 # called, so the lfdirstate above may not be the correct one for
916 # this invocation of match.
906 # this invocation of match.
917 lfdirstate = lfutil.openlfdirstate(
907 lfdirstate = lfutil.openlfdirstate(
918 mctx.repo().ui, mctx.repo(), False
908 mctx.repo().ui, mctx.repo(), False
919 )
909 )
920
910
921 wctx = repo[None]
911 wctx = repo[None]
922 matchfiles = []
912 matchfiles = []
923 for f in m._files:
913 for f in m._files:
924 standin = lfutil.standin(f)
914 standin = lfutil.standin(f)
925 if standin in ctx or standin in mctx:
915 if standin in ctx or standin in mctx:
926 matchfiles.append(standin)
916 matchfiles.append(standin)
927 elif standin in wctx or lfdirstate[f] == b'r':
917 elif standin in wctx or lfdirstate[f] == b'r':
928 continue
918 continue
929 else:
919 else:
930 matchfiles.append(f)
920 matchfiles.append(f)
931 m._files = matchfiles
921 m._files = matchfiles
932 m._fileset = set(m._files)
922 m._fileset = set(m._files)
933 origmatchfn = m.matchfn
923 origmatchfn = m.matchfn
934
924
935 def matchfn(f):
925 def matchfn(f):
936 lfile = lfutil.splitstandin(f)
926 lfile = lfutil.splitstandin(f)
937 if lfile is not None:
927 if lfile is not None:
938 return origmatchfn(lfile) and (f in ctx or f in mctx)
928 return origmatchfn(lfile) and (f in ctx or f in mctx)
939 return origmatchfn(f)
929 return origmatchfn(f)
940
930
941 m.matchfn = matchfn
931 m.matchfn = matchfn
942 return m
932 return m
943
933
944 with extensions.wrappedfunction(scmutil, b'match', overridematch):
934 with extensions.wrappedfunction(scmutil, b'match', overridematch):
945 orig(ui, repo, ctx, parents, *pats, **opts)
935 orig(ui, repo, ctx, parents, *pats, **opts)
946
936
947 newstandins = lfutil.getstandinsstate(repo)
937 newstandins = lfutil.getstandinsstate(repo)
948 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
938 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
949 # lfdirstate should be 'normallookup'-ed for updated files,
939 # lfdirstate should be 'normallookup'-ed for updated files,
950 # because reverting doesn't touch dirstate for 'normal' files
940 # because reverting doesn't touch dirstate for 'normal' files
951 # when target revision is explicitly specified: in such case,
941 # when target revision is explicitly specified: in such case,
952 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
942 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
953 # of target (standin) file.
943 # of target (standin) file.
954 lfcommands.updatelfiles(
944 lfcommands.updatelfiles(
955 ui, repo, filelist, printmessage=False, normallookup=True
945 ui, repo, filelist, printmessage=False, normallookup=True
956 )
946 )
957
947
958
948
959 # after pulling changesets, we need to take some extra care to get
949 # after pulling changesets, we need to take some extra care to get
960 # largefiles updated remotely
950 # largefiles updated remotely
961 @eh.wrapcommand(
951 @eh.wrapcommand(
962 b'pull',
952 b'pull',
963 opts=[
953 opts=[
964 (
954 (
965 b'',
955 b'',
966 b'all-largefiles',
956 b'all-largefiles',
967 None,
957 None,
968 _(b'download all pulled versions of largefiles (DEPRECATED)'),
958 _(b'download all pulled versions of largefiles (DEPRECATED)'),
969 ),
959 ),
970 (
960 (
971 b'',
961 b'',
972 b'lfrev',
962 b'lfrev',
973 [],
963 [],
974 _(b'download largefiles for these revisions'),
964 _(b'download largefiles for these revisions'),
975 _(b'REV'),
965 _(b'REV'),
976 ),
966 ),
977 ],
967 ],
978 )
968 )
979 def overridepull(orig, ui, repo, source=None, **opts):
969 def overridepull(orig, ui, repo, source=None, **opts):
980 revsprepull = len(repo)
970 revsprepull = len(repo)
981 if not source:
971 if not source:
982 source = b'default'
972 source = b'default'
983 repo.lfpullsource = source
973 repo.lfpullsource = source
984 result = orig(ui, repo, source, **opts)
974 result = orig(ui, repo, source, **opts)
985 revspostpull = len(repo)
975 revspostpull = len(repo)
986 lfrevs = opts.get('lfrev', [])
976 lfrevs = opts.get('lfrev', [])
987 if opts.get('all_largefiles'):
977 if opts.get('all_largefiles'):
988 lfrevs.append(b'pulled()')
978 lfrevs.append(b'pulled()')
989 if lfrevs and revspostpull > revsprepull:
979 if lfrevs and revspostpull > revsprepull:
990 numcached = 0
980 numcached = 0
991 repo.firstpulled = revsprepull # for pulled() revset expression
981 repo.firstpulled = revsprepull # for pulled() revset expression
992 try:
982 try:
993 for rev in scmutil.revrange(repo, lfrevs):
983 for rev in scmutil.revrange(repo, lfrevs):
994 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
984 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
995 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
985 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
996 numcached += len(cached)
986 numcached += len(cached)
997 finally:
987 finally:
998 del repo.firstpulled
988 del repo.firstpulled
999 ui.status(_(b"%d largefiles cached\n") % numcached)
989 ui.status(_(b"%d largefiles cached\n") % numcached)
1000 return result
990 return result
1001
991
1002
992
1003 @eh.wrapcommand(
993 @eh.wrapcommand(
1004 b'push',
994 b'push',
1005 opts=[
995 opts=[
1006 (
996 (
1007 b'',
997 b'',
1008 b'lfrev',
998 b'lfrev',
1009 [],
999 [],
1010 _(b'upload largefiles for these revisions'),
1000 _(b'upload largefiles for these revisions'),
1011 _(b'REV'),
1001 _(b'REV'),
1012 )
1002 )
1013 ],
1003 ],
1014 )
1004 )
1015 def overridepush(orig, ui, repo, *args, **kwargs):
1005 def overridepush(orig, ui, repo, *args, **kwargs):
1016 """Override push command and store --lfrev parameters in opargs"""
1006 """Override push command and store --lfrev parameters in opargs"""
1017 lfrevs = kwargs.pop('lfrev', None)
1007 lfrevs = kwargs.pop('lfrev', None)
1018 if lfrevs:
1008 if lfrevs:
1019 opargs = kwargs.setdefault('opargs', {})
1009 opargs = kwargs.setdefault('opargs', {})
1020 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1010 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1021 return orig(ui, repo, *args, **kwargs)
1011 return orig(ui, repo, *args, **kwargs)
1022
1012
1023
1013
1024 @eh.wrapfunction(exchange, b'pushoperation')
1014 @eh.wrapfunction(exchange, b'pushoperation')
1025 def exchangepushoperation(orig, *args, **kwargs):
1015 def exchangepushoperation(orig, *args, **kwargs):
1026 """Override pushoperation constructor and store lfrevs parameter"""
1016 """Override pushoperation constructor and store lfrevs parameter"""
1027 lfrevs = kwargs.pop('lfrevs', None)
1017 lfrevs = kwargs.pop('lfrevs', None)
1028 pushop = orig(*args, **kwargs)
1018 pushop = orig(*args, **kwargs)
1029 pushop.lfrevs = lfrevs
1019 pushop.lfrevs = lfrevs
1030 return pushop
1020 return pushop
1031
1021
1032
1022
1033 @eh.revsetpredicate(b'pulled()')
1023 @eh.revsetpredicate(b'pulled()')
1034 def pulledrevsetsymbol(repo, subset, x):
1024 def pulledrevsetsymbol(repo, subset, x):
1035 """Changesets that just has been pulled.
1025 """Changesets that just has been pulled.
1036
1026
1037 Only available with largefiles from pull --lfrev expressions.
1027 Only available with largefiles from pull --lfrev expressions.
1038
1028
1039 .. container:: verbose
1029 .. container:: verbose
1040
1030
1041 Some examples:
1031 Some examples:
1042
1032
1043 - pull largefiles for all new changesets::
1033 - pull largefiles for all new changesets::
1044
1034
1045 hg pull -lfrev "pulled()"
1035 hg pull -lfrev "pulled()"
1046
1036
1047 - pull largefiles for all new branch heads::
1037 - pull largefiles for all new branch heads::
1048
1038
1049 hg pull -lfrev "head(pulled()) and not closed()"
1039 hg pull -lfrev "head(pulled()) and not closed()"
1050
1040
1051 """
1041 """
1052
1042
1053 try:
1043 try:
1054 firstpulled = repo.firstpulled
1044 firstpulled = repo.firstpulled
1055 except AttributeError:
1045 except AttributeError:
1056 raise error.Abort(_(b"pulled() only available in --lfrev"))
1046 raise error.Abort(_(b"pulled() only available in --lfrev"))
1057 return smartset.baseset([r for r in subset if r >= firstpulled])
1047 return smartset.baseset([r for r in subset if r >= firstpulled])
1058
1048
1059
1049
1060 @eh.wrapcommand(
1050 @eh.wrapcommand(
1061 b'clone',
1051 b'clone',
1062 opts=[
1052 opts=[
1063 (
1053 (
1064 b'',
1054 b'',
1065 b'all-largefiles',
1055 b'all-largefiles',
1066 None,
1056 None,
1067 _(b'download all versions of all largefiles'),
1057 _(b'download all versions of all largefiles'),
1068 )
1058 )
1069 ],
1059 ],
1070 )
1060 )
1071 def overrideclone(orig, ui, source, dest=None, **opts):
1061 def overrideclone(orig, ui, source, dest=None, **opts):
1072 d = dest
1062 d = dest
1073 if d is None:
1063 if d is None:
1074 d = hg.defaultdest(source)
1064 d = hg.defaultdest(source)
1075 if opts.get('all_largefiles') and not hg.islocal(d):
1065 if opts.get('all_largefiles') and not hg.islocal(d):
1076 raise error.Abort(
1066 raise error.Abort(
1077 _(b'--all-largefiles is incompatible with non-local destination %s')
1067 _(b'--all-largefiles is incompatible with non-local destination %s')
1078 % d
1068 % d
1079 )
1069 )
1080
1070
1081 return orig(ui, source, dest, **opts)
1071 return orig(ui, source, dest, **opts)
1082
1072
1083
1073
1084 @eh.wrapfunction(hg, b'clone')
1074 @eh.wrapfunction(hg, b'clone')
1085 def hgclone(orig, ui, opts, *args, **kwargs):
1075 def hgclone(orig, ui, opts, *args, **kwargs):
1086 result = orig(ui, opts, *args, **kwargs)
1076 result = orig(ui, opts, *args, **kwargs)
1087
1077
1088 if result is not None:
1078 if result is not None:
1089 sourcerepo, destrepo = result
1079 sourcerepo, destrepo = result
1090 repo = destrepo.local()
1080 repo = destrepo.local()
1091
1081
1092 # When cloning to a remote repo (like through SSH), no repo is available
1082 # When cloning to a remote repo (like through SSH), no repo is available
1093 # from the peer. Therefore the largefiles can't be downloaded and the
1083 # from the peer. Therefore the largefiles can't be downloaded and the
1094 # hgrc can't be updated.
1084 # hgrc can't be updated.
1095 if not repo:
1085 if not repo:
1096 return result
1086 return result
1097
1087
1098 # Caching is implicitly limited to 'rev' option, since the dest repo was
1088 # Caching is implicitly limited to 'rev' option, since the dest repo was
1099 # truncated at that point. The user may expect a download count with
1089 # truncated at that point. The user may expect a download count with
1100 # this option, so attempt whether or not this is a largefile repo.
1090 # this option, so attempt whether or not this is a largefile repo.
1101 if opts.get(b'all_largefiles'):
1091 if opts.get(b'all_largefiles'):
1102 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1092 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1103
1093
1104 if missing != 0:
1094 if missing != 0:
1105 return None
1095 return None
1106
1096
1107 return result
1097 return result
1108
1098
1109
1099
1110 @eh.wrapcommand(b'rebase', extension=b'rebase')
1100 @eh.wrapcommand(b'rebase', extension=b'rebase')
1111 def overriderebase(orig, ui, repo, **opts):
1101 def overriderebase(orig, ui, repo, **opts):
1112 if not util.safehasattr(repo, b'_largefilesenabled'):
1102 if not util.safehasattr(repo, b'_largefilesenabled'):
1113 return orig(ui, repo, **opts)
1103 return orig(ui, repo, **opts)
1114
1104
1115 resuming = opts.get('continue')
1105 resuming = opts.get('continue')
1116 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1106 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1117 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1107 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1118 try:
1108 try:
1119 return orig(ui, repo, **opts)
1109 return orig(ui, repo, **opts)
1120 finally:
1110 finally:
1121 repo._lfstatuswriters.pop()
1111 repo._lfstatuswriters.pop()
1122 repo._lfcommithooks.pop()
1112 repo._lfcommithooks.pop()
1123
1113
1124
1114
1125 @eh.wrapcommand(b'archive')
1115 @eh.wrapcommand(b'archive')
1126 def overridearchivecmd(orig, ui, repo, dest, **opts):
1116 def overridearchivecmd(orig, ui, repo, dest, **opts):
1127 with lfstatus(repo.unfiltered()):
1117 with lfstatus(repo.unfiltered()):
1128 return orig(ui, repo.unfiltered(), dest, **opts)
1118 return orig(ui, repo.unfiltered(), dest, **opts)
1129
1119
1130
1120
1131 @eh.wrapfunction(webcommands, b'archive')
1121 @eh.wrapfunction(webcommands, b'archive')
1132 def hgwebarchive(orig, web):
1122 def hgwebarchive(orig, web):
1133 with lfstatus(web.repo):
1123 with lfstatus(web.repo):
1134 return orig(web)
1124 return orig(web)
1135
1125
1136
1126
1137 @eh.wrapfunction(archival, b'archive')
1127 @eh.wrapfunction(archival, b'archive')
1138 def overridearchive(
1128 def overridearchive(
1139 orig,
1129 orig,
1140 repo,
1130 repo,
1141 dest,
1131 dest,
1142 node,
1132 node,
1143 kind,
1133 kind,
1144 decode=True,
1134 decode=True,
1145 match=None,
1135 match=None,
1146 prefix=b'',
1136 prefix=b'',
1147 mtime=None,
1137 mtime=None,
1148 subrepos=None,
1138 subrepos=None,
1149 ):
1139 ):
1150 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1140 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1151 # unfiltered repo's attr, so check that as well.
1141 # unfiltered repo's attr, so check that as well.
1152 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1142 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1153 return orig(
1143 return orig(
1154 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1144 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1155 )
1145 )
1156
1146
1157 # No need to lock because we are only reading history and
1147 # No need to lock because we are only reading history and
1158 # largefile caches, neither of which are modified.
1148 # largefile caches, neither of which are modified.
1159 if node is not None:
1149 if node is not None:
1160 lfcommands.cachelfiles(repo.ui, repo, node)
1150 lfcommands.cachelfiles(repo.ui, repo, node)
1161
1151
1162 if kind not in archival.archivers:
1152 if kind not in archival.archivers:
1163 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1153 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1164
1154
1165 ctx = repo[node]
1155 ctx = repo[node]
1166
1156
1167 if kind == b'files':
1157 if kind == b'files':
1168 if prefix:
1158 if prefix:
1169 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1159 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1170 else:
1160 else:
1171 prefix = archival.tidyprefix(dest, kind, prefix)
1161 prefix = archival.tidyprefix(dest, kind, prefix)
1172
1162
1173 def write(name, mode, islink, getdata):
1163 def write(name, mode, islink, getdata):
1174 if match and not match(name):
1164 if match and not match(name):
1175 return
1165 return
1176 data = getdata()
1166 data = getdata()
1177 if decode:
1167 if decode:
1178 data = repo.wwritedata(name, data)
1168 data = repo.wwritedata(name, data)
1179 archiver.addfile(prefix + name, mode, islink, data)
1169 archiver.addfile(prefix + name, mode, islink, data)
1180
1170
1181 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1171 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1182
1172
1183 if repo.ui.configbool(b"ui", b"archivemeta"):
1173 if repo.ui.configbool(b"ui", b"archivemeta"):
1184 write(
1174 write(
1185 b'.hg_archival.txt',
1175 b'.hg_archival.txt',
1186 0o644,
1176 0o644,
1187 False,
1177 False,
1188 lambda: archival.buildmetadata(ctx),
1178 lambda: archival.buildmetadata(ctx),
1189 )
1179 )
1190
1180
1191 for f in ctx:
1181 for f in ctx:
1192 ff = ctx.flags(f)
1182 ff = ctx.flags(f)
1193 getdata = ctx[f].data
1183 getdata = ctx[f].data
1194 lfile = lfutil.splitstandin(f)
1184 lfile = lfutil.splitstandin(f)
1195 if lfile is not None:
1185 if lfile is not None:
1196 if node is not None:
1186 if node is not None:
1197 path = lfutil.findfile(repo, getdata().strip())
1187 path = lfutil.findfile(repo, getdata().strip())
1198
1188
1199 if path is None:
1189 if path is None:
1200 raise error.Abort(
1190 raise error.Abort(
1201 _(
1191 _(
1202 b'largefile %s not found in repo store or system cache'
1192 b'largefile %s not found in repo store or system cache'
1203 )
1193 )
1204 % lfile
1194 % lfile
1205 )
1195 )
1206 else:
1196 else:
1207 path = lfile
1197 path = lfile
1208
1198
1209 f = lfile
1199 f = lfile
1210
1200
1211 getdata = lambda: util.readfile(path)
1201 getdata = lambda: util.readfile(path)
1212 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1202 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1213
1203
1214 if subrepos:
1204 if subrepos:
1215 for subpath in sorted(ctx.substate):
1205 for subpath in sorted(ctx.substate):
1216 sub = ctx.workingsub(subpath)
1206 sub = ctx.workingsub(subpath)
1217 submatch = matchmod.subdirmatcher(subpath, match)
1207 submatch = matchmod.subdirmatcher(subpath, match)
1218 subprefix = prefix + subpath + b'/'
1208 subprefix = prefix + subpath + b'/'
1219
1209
1220 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1210 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1221 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1211 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1222 # allow only hgsubrepos to set this, instead of the current scheme
1212 # allow only hgsubrepos to set this, instead of the current scheme
1223 # where the parent sets this for the child.
1213 # where the parent sets this for the child.
1224 with (
1214 with (
1225 util.safehasattr(sub, '_repo')
1215 util.safehasattr(sub, '_repo')
1226 and lfstatus(sub._repo)
1216 and lfstatus(sub._repo)
1227 or util.nullcontextmanager()
1217 or util.nullcontextmanager()
1228 ):
1218 ):
1229 sub.archive(archiver, subprefix, submatch)
1219 sub.archive(archiver, subprefix, submatch)
1230
1220
1231 archiver.done()
1221 archiver.done()
1232
1222
1233
1223
1234 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1224 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1235 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1225 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1236 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1226 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1237 if not lfenabled or not repo._repo.lfstatus:
1227 if not lfenabled or not repo._repo.lfstatus:
1238 return orig(repo, archiver, prefix, match, decode)
1228 return orig(repo, archiver, prefix, match, decode)
1239
1229
1240 repo._get(repo._state + (b'hg',))
1230 repo._get(repo._state + (b'hg',))
1241 rev = repo._state[1]
1231 rev = repo._state[1]
1242 ctx = repo._repo[rev]
1232 ctx = repo._repo[rev]
1243
1233
1244 if ctx.node() is not None:
1234 if ctx.node() is not None:
1245 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1235 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1246
1236
1247 def write(name, mode, islink, getdata):
1237 def write(name, mode, islink, getdata):
1248 # At this point, the standin has been replaced with the largefile name,
1238 # At this point, the standin has been replaced with the largefile name,
1249 # so the normal matcher works here without the lfutil variants.
1239 # so the normal matcher works here without the lfutil variants.
1250 if match and not match(f):
1240 if match and not match(f):
1251 return
1241 return
1252 data = getdata()
1242 data = getdata()
1253 if decode:
1243 if decode:
1254 data = repo._repo.wwritedata(name, data)
1244 data = repo._repo.wwritedata(name, data)
1255
1245
1256 archiver.addfile(prefix + name, mode, islink, data)
1246 archiver.addfile(prefix + name, mode, islink, data)
1257
1247
1258 for f in ctx:
1248 for f in ctx:
1259 ff = ctx.flags(f)
1249 ff = ctx.flags(f)
1260 getdata = ctx[f].data
1250 getdata = ctx[f].data
1261 lfile = lfutil.splitstandin(f)
1251 lfile = lfutil.splitstandin(f)
1262 if lfile is not None:
1252 if lfile is not None:
1263 if ctx.node() is not None:
1253 if ctx.node() is not None:
1264 path = lfutil.findfile(repo._repo, getdata().strip())
1254 path = lfutil.findfile(repo._repo, getdata().strip())
1265
1255
1266 if path is None:
1256 if path is None:
1267 raise error.Abort(
1257 raise error.Abort(
1268 _(
1258 _(
1269 b'largefile %s not found in repo store or system cache'
1259 b'largefile %s not found in repo store or system cache'
1270 )
1260 )
1271 % lfile
1261 % lfile
1272 )
1262 )
1273 else:
1263 else:
1274 path = lfile
1264 path = lfile
1275
1265
1276 f = lfile
1266 f = lfile
1277
1267
1278 getdata = lambda: util.readfile(os.path.join(prefix, path))
1268 getdata = lambda: util.readfile(os.path.join(prefix, path))
1279
1269
1280 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1270 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1281
1271
1282 for subpath in sorted(ctx.substate):
1272 for subpath in sorted(ctx.substate):
1283 sub = ctx.workingsub(subpath)
1273 sub = ctx.workingsub(subpath)
1284 submatch = matchmod.subdirmatcher(subpath, match)
1274 submatch = matchmod.subdirmatcher(subpath, match)
1285 subprefix = prefix + subpath + b'/'
1275 subprefix = prefix + subpath + b'/'
1286 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1276 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1287 # infer and possibly set lfstatus at the top of this function. That
1277 # infer and possibly set lfstatus at the top of this function. That
1288 # would allow only hgsubrepos to set this, instead of the current scheme
1278 # would allow only hgsubrepos to set this, instead of the current scheme
1289 # where the parent sets this for the child.
1279 # where the parent sets this for the child.
1290 with (
1280 with (
1291 util.safehasattr(sub, '_repo')
1281 util.safehasattr(sub, '_repo')
1292 and lfstatus(sub._repo)
1282 and lfstatus(sub._repo)
1293 or util.nullcontextmanager()
1283 or util.nullcontextmanager()
1294 ):
1284 ):
1295 sub.archive(archiver, subprefix, submatch, decode)
1285 sub.archive(archiver, subprefix, submatch, decode)
1296
1286
1297
1287
1298 # If a largefile is modified, the change is not reflected in its
1288 # If a largefile is modified, the change is not reflected in its
1299 # standin until a commit. cmdutil.bailifchanged() raises an exception
1289 # standin until a commit. cmdutil.bailifchanged() raises an exception
1300 # if the repo has uncommitted changes. Wrap it to also check if
1290 # if the repo has uncommitted changes. Wrap it to also check if
1301 # largefiles were changed. This is used by bisect, backout and fetch.
1291 # largefiles were changed. This is used by bisect, backout and fetch.
1302 @eh.wrapfunction(cmdutil, b'bailifchanged')
1292 @eh.wrapfunction(cmdutil, b'bailifchanged')
1303 def overridebailifchanged(orig, repo, *args, **kwargs):
1293 def overridebailifchanged(orig, repo, *args, **kwargs):
1304 orig(repo, *args, **kwargs)
1294 orig(repo, *args, **kwargs)
1305 with lfstatus(repo):
1295 with lfstatus(repo):
1306 s = repo.status()
1296 s = repo.status()
1307 if s.modified or s.added or s.removed or s.deleted:
1297 if s.modified or s.added or s.removed or s.deleted:
1308 raise error.Abort(_(b'uncommitted changes'))
1298 raise error.Abort(_(b'uncommitted changes'))
1309
1299
1310
1300
1311 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1301 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1312 def postcommitstatus(orig, repo, *args, **kwargs):
1302 def postcommitstatus(orig, repo, *args, **kwargs):
1313 with lfstatus(repo):
1303 with lfstatus(repo):
1314 return orig(repo, *args, **kwargs)
1304 return orig(repo, *args, **kwargs)
1315
1305
1316
1306
1317 @eh.wrapfunction(cmdutil, b'forget')
1307 @eh.wrapfunction(cmdutil, b'forget')
1318 def cmdutilforget(
1308 def cmdutilforget(
1319 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1309 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1320 ):
1310 ):
1321 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1311 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1322 bad, forgot = orig(
1312 bad, forgot = orig(
1323 ui,
1313 ui,
1324 repo,
1314 repo,
1325 normalmatcher,
1315 normalmatcher,
1326 prefix,
1316 prefix,
1327 uipathfn,
1317 uipathfn,
1328 explicitonly,
1318 explicitonly,
1329 dryrun,
1319 dryrun,
1330 interactive,
1320 interactive,
1331 )
1321 )
1332 m = composelargefilematcher(match, repo[None].manifest())
1322 m = composelargefilematcher(match, repo[None].manifest())
1333
1323
1334 with lfstatus(repo):
1324 with lfstatus(repo):
1335 s = repo.status(match=m, clean=True)
1325 s = repo.status(match=m, clean=True)
1336 manifest = repo[None].manifest()
1326 manifest = repo[None].manifest()
1337 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1327 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1338 forget = [f for f in forget if lfutil.standin(f) in manifest]
1328 forget = [f for f in forget if lfutil.standin(f) in manifest]
1339
1329
1340 for f in forget:
1330 for f in forget:
1341 fstandin = lfutil.standin(f)
1331 fstandin = lfutil.standin(f)
1342 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1332 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1343 ui.warn(
1333 ui.warn(
1344 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1334 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1345 )
1335 )
1346 bad.append(f)
1336 bad.append(f)
1347
1337
1348 for f in forget:
1338 for f in forget:
1349 if ui.verbose or not m.exact(f):
1339 if ui.verbose or not m.exact(f):
1350 ui.status(_(b'removing %s\n') % uipathfn(f))
1340 ui.status(_(b'removing %s\n') % uipathfn(f))
1351
1341
1352 # Need to lock because standin files are deleted then removed from the
1342 # Need to lock because standin files are deleted then removed from the
1353 # repository and we could race in-between.
1343 # repository and we could race in-between.
1354 with repo.wlock():
1344 with repo.wlock():
1355 lfdirstate = lfutil.openlfdirstate(ui, repo)
1345 lfdirstate = lfutil.openlfdirstate(ui, repo)
1356 for f in forget:
1346 for f in forget:
1357 if lfdirstate[f] == b'a':
1347 if lfdirstate[f] == b'a':
1358 lfdirstate.drop(f)
1348 lfdirstate.drop(f)
1359 else:
1349 else:
1360 lfdirstate.remove(f)
1350 lfdirstate.remove(f)
1361 lfdirstate.write()
1351 lfdirstate.write()
1362 standins = [lfutil.standin(f) for f in forget]
1352 standins = [lfutil.standin(f) for f in forget]
1363 for f in standins:
1353 for f in standins:
1364 repo.wvfs.unlinkpath(f, ignoremissing=True)
1354 repo.wvfs.unlinkpath(f, ignoremissing=True)
1365 rejected = repo[None].forget(standins)
1355 rejected = repo[None].forget(standins)
1366
1356
1367 bad.extend(f for f in rejected if f in m.files())
1357 bad.extend(f for f in rejected if f in m.files())
1368 forgot.extend(f for f in forget if f not in rejected)
1358 forgot.extend(f for f in forget if f not in rejected)
1369 return bad, forgot
1359 return bad, forgot
1370
1360
1371
1361
1372 def _getoutgoings(repo, other, missing, addfunc):
1362 def _getoutgoings(repo, other, missing, addfunc):
1373 """get pairs of filename and largefile hash in outgoing revisions
1363 """get pairs of filename and largefile hash in outgoing revisions
1374 in 'missing'.
1364 in 'missing'.
1375
1365
1376 largefiles already existing on 'other' repository are ignored.
1366 largefiles already existing on 'other' repository are ignored.
1377
1367
1378 'addfunc' is invoked with each unique pairs of filename and
1368 'addfunc' is invoked with each unique pairs of filename and
1379 largefile hash value.
1369 largefile hash value.
1380 """
1370 """
1381 knowns = set()
1371 knowns = set()
1382 lfhashes = set()
1372 lfhashes = set()
1383
1373
1384 def dedup(fn, lfhash):
1374 def dedup(fn, lfhash):
1385 k = (fn, lfhash)
1375 k = (fn, lfhash)
1386 if k not in knowns:
1376 if k not in knowns:
1387 knowns.add(k)
1377 knowns.add(k)
1388 lfhashes.add(lfhash)
1378 lfhashes.add(lfhash)
1389
1379
1390 lfutil.getlfilestoupload(repo, missing, dedup)
1380 lfutil.getlfilestoupload(repo, missing, dedup)
1391 if lfhashes:
1381 if lfhashes:
1392 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1382 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1393 for fn, lfhash in knowns:
1383 for fn, lfhash in knowns:
1394 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1384 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1395 addfunc(fn, lfhash)
1385 addfunc(fn, lfhash)
1396
1386
1397
1387
1398 def outgoinghook(ui, repo, other, opts, missing):
1388 def outgoinghook(ui, repo, other, opts, missing):
1399 if opts.pop(b'large', None):
1389 if opts.pop(b'large', None):
1400 lfhashes = set()
1390 lfhashes = set()
1401 if ui.debugflag:
1391 if ui.debugflag:
1402 toupload = {}
1392 toupload = {}
1403
1393
1404 def addfunc(fn, lfhash):
1394 def addfunc(fn, lfhash):
1405 if fn not in toupload:
1395 if fn not in toupload:
1406 toupload[fn] = []
1396 toupload[fn] = []
1407 toupload[fn].append(lfhash)
1397 toupload[fn].append(lfhash)
1408 lfhashes.add(lfhash)
1398 lfhashes.add(lfhash)
1409
1399
1410 def showhashes(fn):
1400 def showhashes(fn):
1411 for lfhash in sorted(toupload[fn]):
1401 for lfhash in sorted(toupload[fn]):
1412 ui.debug(b' %s\n' % lfhash)
1402 ui.debug(b' %s\n' % lfhash)
1413
1403
1414 else:
1404 else:
1415 toupload = set()
1405 toupload = set()
1416
1406
1417 def addfunc(fn, lfhash):
1407 def addfunc(fn, lfhash):
1418 toupload.add(fn)
1408 toupload.add(fn)
1419 lfhashes.add(lfhash)
1409 lfhashes.add(lfhash)
1420
1410
1421 def showhashes(fn):
1411 def showhashes(fn):
1422 pass
1412 pass
1423
1413
1424 _getoutgoings(repo, other, missing, addfunc)
1414 _getoutgoings(repo, other, missing, addfunc)
1425
1415
1426 if not toupload:
1416 if not toupload:
1427 ui.status(_(b'largefiles: no files to upload\n'))
1417 ui.status(_(b'largefiles: no files to upload\n'))
1428 else:
1418 else:
1429 ui.status(
1419 ui.status(
1430 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1420 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1431 )
1421 )
1432 for file in sorted(toupload):
1422 for file in sorted(toupload):
1433 ui.status(lfutil.splitstandin(file) + b'\n')
1423 ui.status(lfutil.splitstandin(file) + b'\n')
1434 showhashes(file)
1424 showhashes(file)
1435 ui.status(b'\n')
1425 ui.status(b'\n')
1436
1426
1437
1427
1438 @eh.wrapcommand(
1428 @eh.wrapcommand(
1439 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1429 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1440 )
1430 )
1441 def _outgoingcmd(orig, *args, **kwargs):
1431 def _outgoingcmd(orig, *args, **kwargs):
1442 # Nothing to do here other than add the extra help option- the hook above
1432 # Nothing to do here other than add the extra help option- the hook above
1443 # processes it.
1433 # processes it.
1444 return orig(*args, **kwargs)
1434 return orig(*args, **kwargs)
1445
1435
1446
1436
1447 def summaryremotehook(ui, repo, opts, changes):
1437 def summaryremotehook(ui, repo, opts, changes):
1448 largeopt = opts.get(b'large', False)
1438 largeopt = opts.get(b'large', False)
1449 if changes is None:
1439 if changes is None:
1450 if largeopt:
1440 if largeopt:
1451 return (False, True) # only outgoing check is needed
1441 return (False, True) # only outgoing check is needed
1452 else:
1442 else:
1453 return (False, False)
1443 return (False, False)
1454 elif largeopt:
1444 elif largeopt:
1455 url, branch, peer, outgoing = changes[1]
1445 url, branch, peer, outgoing = changes[1]
1456 if peer is None:
1446 if peer is None:
1457 # i18n: column positioning for "hg summary"
1447 # i18n: column positioning for "hg summary"
1458 ui.status(_(b'largefiles: (no remote repo)\n'))
1448 ui.status(_(b'largefiles: (no remote repo)\n'))
1459 return
1449 return
1460
1450
1461 toupload = set()
1451 toupload = set()
1462 lfhashes = set()
1452 lfhashes = set()
1463
1453
1464 def addfunc(fn, lfhash):
1454 def addfunc(fn, lfhash):
1465 toupload.add(fn)
1455 toupload.add(fn)
1466 lfhashes.add(lfhash)
1456 lfhashes.add(lfhash)
1467
1457
1468 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1458 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1469
1459
1470 if not toupload:
1460 if not toupload:
1471 # i18n: column positioning for "hg summary"
1461 # i18n: column positioning for "hg summary"
1472 ui.status(_(b'largefiles: (no files to upload)\n'))
1462 ui.status(_(b'largefiles: (no files to upload)\n'))
1473 else:
1463 else:
1474 # i18n: column positioning for "hg summary"
1464 # i18n: column positioning for "hg summary"
1475 ui.status(
1465 ui.status(
1476 _(b'largefiles: %d entities for %d files to upload\n')
1466 _(b'largefiles: %d entities for %d files to upload\n')
1477 % (len(lfhashes), len(toupload))
1467 % (len(lfhashes), len(toupload))
1478 )
1468 )
1479
1469
1480
1470
1481 @eh.wrapcommand(
1471 @eh.wrapcommand(
1482 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1472 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1483 )
1473 )
1484 def overridesummary(orig, ui, repo, *pats, **opts):
1474 def overridesummary(orig, ui, repo, *pats, **opts):
1485 with lfstatus(repo):
1475 with lfstatus(repo):
1486 orig(ui, repo, *pats, **opts)
1476 orig(ui, repo, *pats, **opts)
1487
1477
1488
1478
1489 @eh.wrapfunction(scmutil, b'addremove')
1479 @eh.wrapfunction(scmutil, b'addremove')
1490 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1480 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1491 if opts is None:
1481 if opts is None:
1492 opts = {}
1482 opts = {}
1493 if not lfutil.islfilesrepo(repo):
1483 if not lfutil.islfilesrepo(repo):
1494 return orig(repo, matcher, prefix, uipathfn, opts)
1484 return orig(repo, matcher, prefix, uipathfn, opts)
1495 # Get the list of missing largefiles so we can remove them
1485 # Get the list of missing largefiles so we can remove them
1496 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1486 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1497 unsure, s = lfdirstate.status(
1487 unsure, s = lfdirstate.status(
1498 matchmod.always(),
1488 matchmod.always(),
1499 subrepos=[],
1489 subrepos=[],
1500 ignored=False,
1490 ignored=False,
1501 clean=False,
1491 clean=False,
1502 unknown=False,
1492 unknown=False,
1503 )
1493 )
1504
1494
1505 # Call into the normal remove code, but the removing of the standin, we want
1495 # Call into the normal remove code, but the removing of the standin, we want
1506 # to have handled by original addremove. Monkey patching here makes sure
1496 # to have handled by original addremove. Monkey patching here makes sure
1507 # we don't remove the standin in the largefiles code, preventing a very
1497 # we don't remove the standin in the largefiles code, preventing a very
1508 # confused state later.
1498 # confused state later.
1509 if s.deleted:
1499 if s.deleted:
1510 m = copy.copy(matcher)
1500 m = copy.copy(matcher)
1511
1501
1512 # The m._files and m._map attributes are not changed to the deleted list
1502 # The m._files and m._map attributes are not changed to the deleted list
1513 # because that affects the m.exact() test, which in turn governs whether
1503 # because that affects the m.exact() test, which in turn governs whether
1514 # or not the file name is printed, and how. Simply limit the original
1504 # or not the file name is printed, and how. Simply limit the original
1515 # matches to those in the deleted status list.
1505 # matches to those in the deleted status list.
1516 matchfn = m.matchfn
1506 matchfn = m.matchfn
1517 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1507 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1518
1508
1519 removelargefiles(
1509 removelargefiles(
1520 repo.ui,
1510 repo.ui,
1521 repo,
1511 repo,
1522 True,
1512 True,
1523 m,
1513 m,
1524 uipathfn,
1514 uipathfn,
1525 opts.get(b'dry_run'),
1515 opts.get(b'dry_run'),
1526 **pycompat.strkwargs(opts)
1516 **pycompat.strkwargs(opts)
1527 )
1517 )
1528 # Call into the normal add code, and any files that *should* be added as
1518 # Call into the normal add code, and any files that *should* be added as
1529 # largefiles will be
1519 # largefiles will be
1530 added, bad = addlargefiles(
1520 added, bad = addlargefiles(
1531 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1521 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1532 )
1522 )
1533 # Now that we've handled largefiles, hand off to the original addremove
1523 # Now that we've handled largefiles, hand off to the original addremove
1534 # function to take care of the rest. Make sure it doesn't do anything with
1524 # function to take care of the rest. Make sure it doesn't do anything with
1535 # largefiles by passing a matcher that will ignore them.
1525 # largefiles by passing a matcher that will ignore them.
1536 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1526 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1537 return orig(repo, matcher, prefix, uipathfn, opts)
1527 return orig(repo, matcher, prefix, uipathfn, opts)
1538
1528
1539
1529
1540 # Calling purge with --all will cause the largefiles to be deleted.
1530 # Calling purge with --all will cause the largefiles to be deleted.
1541 # Override repo.status to prevent this from happening.
1531 # Override repo.status to prevent this from happening.
1542 @eh.wrapcommand(b'purge', extension=b'purge')
1532 @eh.wrapcommand(b'purge', extension=b'purge')
1543 def overridepurge(orig, ui, repo, *dirs, **opts):
1533 def overridepurge(orig, ui, repo, *dirs, **opts):
1544 # XXX Monkey patching a repoview will not work. The assigned attribute will
1534 # XXX Monkey patching a repoview will not work. The assigned attribute will
1545 # be set on the unfiltered repo, but we will only lookup attributes in the
1535 # be set on the unfiltered repo, but we will only lookup attributes in the
1546 # unfiltered repo if the lookup in the repoview object itself fails. As the
1536 # unfiltered repo if the lookup in the repoview object itself fails. As the
1547 # monkey patched method exists on the repoview class the lookup will not
1537 # monkey patched method exists on the repoview class the lookup will not
1548 # fail. As a result, the original version will shadow the monkey patched
1538 # fail. As a result, the original version will shadow the monkey patched
1549 # one, defeating the monkey patch.
1539 # one, defeating the monkey patch.
1550 #
1540 #
1551 # As a work around we use an unfiltered repo here. We should do something
1541 # As a work around we use an unfiltered repo here. We should do something
1552 # cleaner instead.
1542 # cleaner instead.
1553 repo = repo.unfiltered()
1543 repo = repo.unfiltered()
1554 oldstatus = repo.status
1544 oldstatus = repo.status
1555
1545
1556 def overridestatus(
1546 def overridestatus(
1557 node1=b'.',
1547 node1=b'.',
1558 node2=None,
1548 node2=None,
1559 match=None,
1549 match=None,
1560 ignored=False,
1550 ignored=False,
1561 clean=False,
1551 clean=False,
1562 unknown=False,
1552 unknown=False,
1563 listsubrepos=False,
1553 listsubrepos=False,
1564 ):
1554 ):
1565 r = oldstatus(
1555 r = oldstatus(
1566 node1, node2, match, ignored, clean, unknown, listsubrepos
1556 node1, node2, match, ignored, clean, unknown, listsubrepos
1567 )
1557 )
1568 lfdirstate = lfutil.openlfdirstate(ui, repo)
1558 lfdirstate = lfutil.openlfdirstate(ui, repo)
1569 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1559 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1570 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1560 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1571 return scmutil.status(
1561 return scmutil.status(
1572 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1562 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1573 )
1563 )
1574
1564
1575 repo.status = overridestatus
1565 repo.status = overridestatus
1576 orig(ui, repo, *dirs, **opts)
1566 orig(ui, repo, *dirs, **opts)
1577 repo.status = oldstatus
1567 repo.status = oldstatus
1578
1568
1579
1569
1580 @eh.wrapcommand(b'rollback')
1570 @eh.wrapcommand(b'rollback')
1581 def overriderollback(orig, ui, repo, **opts):
1571 def overriderollback(orig, ui, repo, **opts):
1582 with repo.wlock():
1572 with repo.wlock():
1583 before = repo.dirstate.parents()
1573 before = repo.dirstate.parents()
1584 orphans = {
1574 orphans = {
1585 f
1575 f
1586 for f in repo.dirstate
1576 for f in repo.dirstate
1587 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1577 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1588 }
1578 }
1589 result = orig(ui, repo, **opts)
1579 result = orig(ui, repo, **opts)
1590 after = repo.dirstate.parents()
1580 after = repo.dirstate.parents()
1591 if before == after:
1581 if before == after:
1592 return result # no need to restore standins
1582 return result # no need to restore standins
1593
1583
1594 pctx = repo[b'.']
1584 pctx = repo[b'.']
1595 for f in repo.dirstate:
1585 for f in repo.dirstate:
1596 if lfutil.isstandin(f):
1586 if lfutil.isstandin(f):
1597 orphans.discard(f)
1587 orphans.discard(f)
1598 if repo.dirstate[f] == b'r':
1588 if repo.dirstate[f] == b'r':
1599 repo.wvfs.unlinkpath(f, ignoremissing=True)
1589 repo.wvfs.unlinkpath(f, ignoremissing=True)
1600 elif f in pctx:
1590 elif f in pctx:
1601 fctx = pctx[f]
1591 fctx = pctx[f]
1602 repo.wwrite(f, fctx.data(), fctx.flags())
1592 repo.wwrite(f, fctx.data(), fctx.flags())
1603 else:
1593 else:
1604 # content of standin is not so important in 'a',
1594 # content of standin is not so important in 'a',
1605 # 'm' or 'n' (coming from the 2nd parent) cases
1595 # 'm' or 'n' (coming from the 2nd parent) cases
1606 lfutil.writestandin(repo, f, b'', False)
1596 lfutil.writestandin(repo, f, b'', False)
1607 for standin in orphans:
1597 for standin in orphans:
1608 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1598 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1609
1599
1610 lfdirstate = lfutil.openlfdirstate(ui, repo)
1600 lfdirstate = lfutil.openlfdirstate(ui, repo)
1611 orphans = set(lfdirstate)
1601 orphans = set(lfdirstate)
1612 lfiles = lfutil.listlfiles(repo)
1602 lfiles = lfutil.listlfiles(repo)
1613 for file in lfiles:
1603 for file in lfiles:
1614 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1604 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1615 orphans.discard(file)
1605 orphans.discard(file)
1616 for lfile in orphans:
1606 for lfile in orphans:
1617 lfdirstate.drop(lfile)
1607 lfdirstate.drop(lfile)
1618 lfdirstate.write()
1608 lfdirstate.write()
1619 return result
1609 return result
1620
1610
1621
1611
1622 @eh.wrapcommand(b'transplant', extension=b'transplant')
1612 @eh.wrapcommand(b'transplant', extension=b'transplant')
1623 def overridetransplant(orig, ui, repo, *revs, **opts):
1613 def overridetransplant(orig, ui, repo, *revs, **opts):
1624 resuming = opts.get('continue')
1614 resuming = opts.get('continue')
1625 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1615 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1626 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1616 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1627 try:
1617 try:
1628 result = orig(ui, repo, *revs, **opts)
1618 result = orig(ui, repo, *revs, **opts)
1629 finally:
1619 finally:
1630 repo._lfstatuswriters.pop()
1620 repo._lfstatuswriters.pop()
1631 repo._lfcommithooks.pop()
1621 repo._lfcommithooks.pop()
1632 return result
1622 return result
1633
1623
1634
1624
1635 @eh.wrapcommand(b'cat')
1625 @eh.wrapcommand(b'cat')
1636 def overridecat(orig, ui, repo, file1, *pats, **opts):
1626 def overridecat(orig, ui, repo, file1, *pats, **opts):
1637 opts = pycompat.byteskwargs(opts)
1627 opts = pycompat.byteskwargs(opts)
1638 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1628 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1639 err = 1
1629 err = 1
1640 notbad = set()
1630 notbad = set()
1641 m = scmutil.match(ctx, (file1,) + pats, opts)
1631 m = scmutil.match(ctx, (file1,) + pats, opts)
1642 origmatchfn = m.matchfn
1632 origmatchfn = m.matchfn
1643
1633
1644 def lfmatchfn(f):
1634 def lfmatchfn(f):
1645 if origmatchfn(f):
1635 if origmatchfn(f):
1646 return True
1636 return True
1647 lf = lfutil.splitstandin(f)
1637 lf = lfutil.splitstandin(f)
1648 if lf is None:
1638 if lf is None:
1649 return False
1639 return False
1650 notbad.add(lf)
1640 notbad.add(lf)
1651 return origmatchfn(lf)
1641 return origmatchfn(lf)
1652
1642
1653 m.matchfn = lfmatchfn
1643 m.matchfn = lfmatchfn
1654 origbadfn = m.bad
1644 origbadfn = m.bad
1655
1645
1656 def lfbadfn(f, msg):
1646 def lfbadfn(f, msg):
1657 if not f in notbad:
1647 if not f in notbad:
1658 origbadfn(f, msg)
1648 origbadfn(f, msg)
1659
1649
1660 m.bad = lfbadfn
1650 m.bad = lfbadfn
1661
1651
1662 origvisitdirfn = m.visitdir
1652 origvisitdirfn = m.visitdir
1663
1653
1664 def lfvisitdirfn(dir):
1654 def lfvisitdirfn(dir):
1665 if dir == lfutil.shortname:
1655 if dir == lfutil.shortname:
1666 return True
1656 return True
1667 ret = origvisitdirfn(dir)
1657 ret = origvisitdirfn(dir)
1668 if ret:
1658 if ret:
1669 return ret
1659 return ret
1670 lf = lfutil.splitstandin(dir)
1660 lf = lfutil.splitstandin(dir)
1671 if lf is None:
1661 if lf is None:
1672 return False
1662 return False
1673 return origvisitdirfn(lf)
1663 return origvisitdirfn(lf)
1674
1664
1675 m.visitdir = lfvisitdirfn
1665 m.visitdir = lfvisitdirfn
1676
1666
1677 for f in ctx.walk(m):
1667 for f in ctx.walk(m):
1678 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1668 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1679 lf = lfutil.splitstandin(f)
1669 lf = lfutil.splitstandin(f)
1680 if lf is None or origmatchfn(f):
1670 if lf is None or origmatchfn(f):
1681 # duplicating unreachable code from commands.cat
1671 # duplicating unreachable code from commands.cat
1682 data = ctx[f].data()
1672 data = ctx[f].data()
1683 if opts.get(b'decode'):
1673 if opts.get(b'decode'):
1684 data = repo.wwritedata(f, data)
1674 data = repo.wwritedata(f, data)
1685 fp.write(data)
1675 fp.write(data)
1686 else:
1676 else:
1687 hash = lfutil.readasstandin(ctx[f])
1677 hash = lfutil.readasstandin(ctx[f])
1688 if not lfutil.inusercache(repo.ui, hash):
1678 if not lfutil.inusercache(repo.ui, hash):
1689 store = storefactory.openstore(repo)
1679 store = storefactory.openstore(repo)
1690 success, missing = store.get([(lf, hash)])
1680 success, missing = store.get([(lf, hash)])
1691 if len(success) != 1:
1681 if len(success) != 1:
1692 raise error.Abort(
1682 raise error.Abort(
1693 _(
1683 _(
1694 b'largefile %s is not in cache and could not be '
1684 b'largefile %s is not in cache and could not be '
1695 b'downloaded'
1685 b'downloaded'
1696 )
1686 )
1697 % lf
1687 % lf
1698 )
1688 )
1699 path = lfutil.usercachepath(repo.ui, hash)
1689 path = lfutil.usercachepath(repo.ui, hash)
1700 with open(path, b"rb") as fpin:
1690 with open(path, b"rb") as fpin:
1701 for chunk in util.filechunkiter(fpin):
1691 for chunk in util.filechunkiter(fpin):
1702 fp.write(chunk)
1692 fp.write(chunk)
1703 err = 0
1693 err = 0
1704 return err
1694 return err
1705
1695
1706
1696
1707 @eh.wrapfunction(merge, b'update')
1697 @eh.wrapfunction(merge, b'update')
1708 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1698 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1709 matcher = kwargs.get('matcher', None)
1699 matcher = kwargs.get('matcher', None)
1710 # note if this is a partial update
1700 # note if this is a partial update
1711 partial = matcher and not matcher.always()
1701 partial = matcher and not matcher.always()
1712 with repo.wlock():
1702 with repo.wlock():
1713 # branch | | |
1703 # branch | | |
1714 # merge | force | partial | action
1704 # merge | force | partial | action
1715 # -------+-------+---------+--------------
1705 # -------+-------+---------+--------------
1716 # x | x | x | linear-merge
1706 # x | x | x | linear-merge
1717 # o | x | x | branch-merge
1707 # o | x | x | branch-merge
1718 # x | o | x | overwrite (as clean update)
1708 # x | o | x | overwrite (as clean update)
1719 # o | o | x | force-branch-merge (*1)
1709 # o | o | x | force-branch-merge (*1)
1720 # x | x | o | (*)
1710 # x | x | o | (*)
1721 # o | x | o | (*)
1711 # o | x | o | (*)
1722 # x | o | o | overwrite (as revert)
1712 # x | o | o | overwrite (as revert)
1723 # o | o | o | (*)
1713 # o | o | o | (*)
1724 #
1714 #
1725 # (*) don't care
1715 # (*) don't care
1726 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1716 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1727
1717
1728 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1718 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1729 unsure, s = lfdirstate.status(
1719 unsure, s = lfdirstate.status(
1730 matchmod.always(),
1720 matchmod.always(),
1731 subrepos=[],
1721 subrepos=[],
1732 ignored=False,
1722 ignored=False,
1733 clean=True,
1723 clean=True,
1734 unknown=False,
1724 unknown=False,
1735 )
1725 )
1736 oldclean = set(s.clean)
1726 oldclean = set(s.clean)
1737 pctx = repo[b'.']
1727 pctx = repo[b'.']
1738 dctx = repo[node]
1728 dctx = repo[node]
1739 for lfile in unsure + s.modified:
1729 for lfile in unsure + s.modified:
1740 lfileabs = repo.wvfs.join(lfile)
1730 lfileabs = repo.wvfs.join(lfile)
1741 if not repo.wvfs.exists(lfileabs):
1731 if not repo.wvfs.exists(lfileabs):
1742 continue
1732 continue
1743 lfhash = lfutil.hashfile(lfileabs)
1733 lfhash = lfutil.hashfile(lfileabs)
1744 standin = lfutil.standin(lfile)
1734 standin = lfutil.standin(lfile)
1745 lfutil.writestandin(
1735 lfutil.writestandin(
1746 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1736 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1747 )
1737 )
1748 if standin in pctx and lfhash == lfutil.readasstandin(
1738 if standin in pctx and lfhash == lfutil.readasstandin(
1749 pctx[standin]
1739 pctx[standin]
1750 ):
1740 ):
1751 oldclean.add(lfile)
1741 oldclean.add(lfile)
1752 for lfile in s.added:
1742 for lfile in s.added:
1753 fstandin = lfutil.standin(lfile)
1743 fstandin = lfutil.standin(lfile)
1754 if fstandin not in dctx:
1744 if fstandin not in dctx:
1755 # in this case, content of standin file is meaningless
1745 # in this case, content of standin file is meaningless
1756 # (in dctx, lfile is unknown, or normal file)
1746 # (in dctx, lfile is unknown, or normal file)
1757 continue
1747 continue
1758 lfutil.updatestandin(repo, lfile, fstandin)
1748 lfutil.updatestandin(repo, lfile, fstandin)
1759 # mark all clean largefiles as dirty, just in case the update gets
1749 # mark all clean largefiles as dirty, just in case the update gets
1760 # interrupted before largefiles and lfdirstate are synchronized
1750 # interrupted before largefiles and lfdirstate are synchronized
1761 for lfile in oldclean:
1751 for lfile in oldclean:
1762 lfdirstate.normallookup(lfile)
1752 lfdirstate.normallookup(lfile)
1763 lfdirstate.write()
1753 lfdirstate.write()
1764
1754
1765 oldstandins = lfutil.getstandinsstate(repo)
1755 oldstandins = lfutil.getstandinsstate(repo)
1766 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1756 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1767 # good candidate for in-memory merge (large files, custom dirstate,
1757 # good candidate for in-memory merge (large files, custom dirstate,
1768 # matcher usage).
1758 # matcher usage).
1769 kwargs['wc'] = repo[None]
1759 kwargs['wc'] = repo[None]
1770 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1760 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1771
1761
1772 newstandins = lfutil.getstandinsstate(repo)
1762 newstandins = lfutil.getstandinsstate(repo)
1773 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1763 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1774
1764
1775 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1765 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1776 # all the ones that didn't change as clean
1766 # all the ones that didn't change as clean
1777 for lfile in oldclean.difference(filelist):
1767 for lfile in oldclean.difference(filelist):
1778 lfdirstate.normal(lfile)
1768 lfdirstate.normal(lfile)
1779 lfdirstate.write()
1769 lfdirstate.write()
1780
1770
1781 if branchmerge or force or partial:
1771 if branchmerge or force or partial:
1782 filelist.extend(s.deleted + s.removed)
1772 filelist.extend(s.deleted + s.removed)
1783
1773
1784 lfcommands.updatelfiles(
1774 lfcommands.updatelfiles(
1785 repo.ui, repo, filelist=filelist, normallookup=partial
1775 repo.ui, repo, filelist=filelist, normallookup=partial
1786 )
1776 )
1787
1777
1788 return result
1778 return result
1789
1779
1790
1780
1791 @eh.wrapfunction(scmutil, b'marktouched')
1781 @eh.wrapfunction(scmutil, b'marktouched')
1792 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1782 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1793 result = orig(repo, files, *args, **kwargs)
1783 result = orig(repo, files, *args, **kwargs)
1794
1784
1795 filelist = []
1785 filelist = []
1796 for f in files:
1786 for f in files:
1797 lf = lfutil.splitstandin(f)
1787 lf = lfutil.splitstandin(f)
1798 if lf is not None:
1788 if lf is not None:
1799 filelist.append(lf)
1789 filelist.append(lf)
1800 if filelist:
1790 if filelist:
1801 lfcommands.updatelfiles(
1791 lfcommands.updatelfiles(
1802 repo.ui,
1792 repo.ui,
1803 repo,
1793 repo,
1804 filelist=filelist,
1794 filelist=filelist,
1805 printmessage=False,
1795 printmessage=False,
1806 normallookup=True,
1796 normallookup=True,
1807 )
1797 )
1808
1798
1809 return result
1799 return result
1810
1800
1811
1801
1812 @eh.wrapfunction(upgrade, b'preservedrequirements')
1802 @eh.wrapfunction(upgrade, b'preservedrequirements')
1813 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1803 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1814 def upgraderequirements(orig, repo):
1804 def upgraderequirements(orig, repo):
1815 reqs = orig(repo)
1805 reqs = orig(repo)
1816 if b'largefiles' in repo.requirements:
1806 if b'largefiles' in repo.requirements:
1817 reqs.add(b'largefiles')
1807 reqs.add(b'largefiles')
1818 return reqs
1808 return reqs
1819
1809
1820
1810
1821 _lfscheme = b'largefile://'
1811 _lfscheme = b'largefile://'
1822
1812
1823
1813
1824 @eh.wrapfunction(urlmod, b'open')
1814 @eh.wrapfunction(urlmod, b'open')
1825 def openlargefile(orig, ui, url_, data=None):
1815 def openlargefile(orig, ui, url_, data=None):
1826 if url_.startswith(_lfscheme):
1816 if url_.startswith(_lfscheme):
1827 if data:
1817 if data:
1828 msg = b"cannot use data on a 'largefile://' url"
1818 msg = b"cannot use data on a 'largefile://' url"
1829 raise error.ProgrammingError(msg)
1819 raise error.ProgrammingError(msg)
1830 lfid = url_[len(_lfscheme) :]
1820 lfid = url_[len(_lfscheme) :]
1831 return storefactory.getlfile(ui, lfid)
1821 return storefactory.getlfile(ui, lfid)
1832 else:
1822 else:
1833 return orig(ui, url_, data=data)
1823 return orig(ui, url_, data=data)
@@ -1,2195 +1,2216 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import stat
11 import stat
12 import struct
12 import struct
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 addednodeid,
16 addednodeid,
17 modifiednodeid,
17 modifiednodeid,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 )
20 )
21 from .thirdparty import attr
21 from .thirdparty import attr
22 from . import (
22 from . import (
23 copies,
23 copies,
24 encoding,
24 encoding,
25 error,
25 error,
26 filemerge,
26 filemerge,
27 match as matchmod,
27 match as matchmod,
28 mergestate as mergestatemod,
28 mergestate as mergestatemod,
29 obsutil,
29 obsutil,
30 pathutil,
30 pathutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepoutil,
33 subrepoutil,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41
41
42 def _getcheckunknownconfig(repo, section, name):
42 def _getcheckunknownconfig(repo, section, name):
43 config = repo.ui.config(section, name)
43 config = repo.ui.config(section, name)
44 valid = [b'abort', b'ignore', b'warn']
44 valid = [b'abort', b'ignore', b'warn']
45 if config not in valid:
45 if config not in valid:
46 validstr = b', '.join([b"'" + v + b"'" for v in valid])
46 validstr = b', '.join([b"'" + v + b"'" for v in valid])
47 raise error.ConfigError(
47 raise error.ConfigError(
48 _(b"%s.%s not valid ('%s' is none of %s)")
48 _(b"%s.%s not valid ('%s' is none of %s)")
49 % (section, name, config, validstr)
49 % (section, name, config, validstr)
50 )
50 )
51 return config
51 return config
52
52
53
53
54 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
54 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
55 if wctx.isinmemory():
55 if wctx.isinmemory():
56 # Nothing to do in IMM because nothing in the "working copy" can be an
56 # Nothing to do in IMM because nothing in the "working copy" can be an
57 # unknown file.
57 # unknown file.
58 #
58 #
59 # Note that we should bail out here, not in ``_checkunknownfiles()``,
59 # Note that we should bail out here, not in ``_checkunknownfiles()``,
60 # because that function does other useful work.
60 # because that function does other useful work.
61 return False
61 return False
62
62
63 if f2 is None:
63 if f2 is None:
64 f2 = f
64 f2 = f
65 return (
65 return (
66 repo.wvfs.audit.check(f)
66 repo.wvfs.audit.check(f)
67 and repo.wvfs.isfileorlink(f)
67 and repo.wvfs.isfileorlink(f)
68 and repo.dirstate.normalize(f) not in repo.dirstate
68 and repo.dirstate.normalize(f) not in repo.dirstate
69 and mctx[f2].cmp(wctx[f])
69 and mctx[f2].cmp(wctx[f])
70 )
70 )
71
71
72
72
73 class _unknowndirschecker(object):
73 class _unknowndirschecker(object):
74 """
74 """
75 Look for any unknown files or directories that may have a path conflict
75 Look for any unknown files or directories that may have a path conflict
76 with a file. If any path prefix of the file exists as a file or link,
76 with a file. If any path prefix of the file exists as a file or link,
77 then it conflicts. If the file itself is a directory that contains any
77 then it conflicts. If the file itself is a directory that contains any
78 file that is not tracked, then it conflicts.
78 file that is not tracked, then it conflicts.
79
79
80 Returns the shortest path at which a conflict occurs, or None if there is
80 Returns the shortest path at which a conflict occurs, or None if there is
81 no conflict.
81 no conflict.
82 """
82 """
83
83
84 def __init__(self):
84 def __init__(self):
85 # A set of paths known to be good. This prevents repeated checking of
85 # A set of paths known to be good. This prevents repeated checking of
86 # dirs. It will be updated with any new dirs that are checked and found
86 # dirs. It will be updated with any new dirs that are checked and found
87 # to be safe.
87 # to be safe.
88 self._unknowndircache = set()
88 self._unknowndircache = set()
89
89
90 # A set of paths that are known to be absent. This prevents repeated
90 # A set of paths that are known to be absent. This prevents repeated
91 # checking of subdirectories that are known not to exist. It will be
91 # checking of subdirectories that are known not to exist. It will be
92 # updated with any new dirs that are checked and found to be absent.
92 # updated with any new dirs that are checked and found to be absent.
93 self._missingdircache = set()
93 self._missingdircache = set()
94
94
95 def __call__(self, repo, wctx, f):
95 def __call__(self, repo, wctx, f):
96 if wctx.isinmemory():
96 if wctx.isinmemory():
97 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
97 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
98 return False
98 return False
99
99
100 # Check for path prefixes that exist as unknown files.
100 # Check for path prefixes that exist as unknown files.
101 for p in reversed(list(pathutil.finddirs(f))):
101 for p in reversed(list(pathutil.finddirs(f))):
102 if p in self._missingdircache:
102 if p in self._missingdircache:
103 return
103 return
104 if p in self._unknowndircache:
104 if p in self._unknowndircache:
105 continue
105 continue
106 if repo.wvfs.audit.check(p):
106 if repo.wvfs.audit.check(p):
107 if (
107 if (
108 repo.wvfs.isfileorlink(p)
108 repo.wvfs.isfileorlink(p)
109 and repo.dirstate.normalize(p) not in repo.dirstate
109 and repo.dirstate.normalize(p) not in repo.dirstate
110 ):
110 ):
111 return p
111 return p
112 if not repo.wvfs.lexists(p):
112 if not repo.wvfs.lexists(p):
113 self._missingdircache.add(p)
113 self._missingdircache.add(p)
114 return
114 return
115 self._unknowndircache.add(p)
115 self._unknowndircache.add(p)
116
116
117 # Check if the file conflicts with a directory containing unknown files.
117 # Check if the file conflicts with a directory containing unknown files.
118 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
118 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
119 # Does the directory contain any files that are not in the dirstate?
119 # Does the directory contain any files that are not in the dirstate?
120 for p, dirs, files in repo.wvfs.walk(f):
120 for p, dirs, files in repo.wvfs.walk(f):
121 for fn in files:
121 for fn in files:
122 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
122 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
123 relf = repo.dirstate.normalize(relf, isknown=True)
123 relf = repo.dirstate.normalize(relf, isknown=True)
124 if relf not in repo.dirstate:
124 if relf not in repo.dirstate:
125 return f
125 return f
126 return None
126 return None
127
127
128
128
129 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
129 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
130 """
130 """
131 Considers any actions that care about the presence of conflicting unknown
131 Considers any actions that care about the presence of conflicting unknown
132 files. For some actions, the result is to abort; for others, it is to
132 files. For some actions, the result is to abort; for others, it is to
133 choose a different action.
133 choose a different action.
134 """
134 """
135 fileconflicts = set()
135 fileconflicts = set()
136 pathconflicts = set()
136 pathconflicts = set()
137 warnconflicts = set()
137 warnconflicts = set()
138 abortconflicts = set()
138 abortconflicts = set()
139 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
139 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
140 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
140 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
141 pathconfig = repo.ui.configbool(
141 pathconfig = repo.ui.configbool(
142 b'experimental', b'merge.checkpathconflicts'
142 b'experimental', b'merge.checkpathconflicts'
143 )
143 )
144 if not force:
144 if not force:
145
145
146 def collectconflicts(conflicts, config):
146 def collectconflicts(conflicts, config):
147 if config == b'abort':
147 if config == b'abort':
148 abortconflicts.update(conflicts)
148 abortconflicts.update(conflicts)
149 elif config == b'warn':
149 elif config == b'warn':
150 warnconflicts.update(conflicts)
150 warnconflicts.update(conflicts)
151
151
152 checkunknowndirs = _unknowndirschecker()
152 checkunknowndirs = _unknowndirschecker()
153 for f, (m, args, msg) in pycompat.iteritems(actions):
153 for f, (m, args, msg) in pycompat.iteritems(actions):
154 if m in (
154 if m in (
155 mergestatemod.ACTION_CREATED,
155 mergestatemod.ACTION_CREATED,
156 mergestatemod.ACTION_DELETED_CHANGED,
156 mergestatemod.ACTION_DELETED_CHANGED,
157 ):
157 ):
158 if _checkunknownfile(repo, wctx, mctx, f):
158 if _checkunknownfile(repo, wctx, mctx, f):
159 fileconflicts.add(f)
159 fileconflicts.add(f)
160 elif pathconfig and f not in wctx:
160 elif pathconfig and f not in wctx:
161 path = checkunknowndirs(repo, wctx, f)
161 path = checkunknowndirs(repo, wctx, f)
162 if path is not None:
162 if path is not None:
163 pathconflicts.add(path)
163 pathconflicts.add(path)
164 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
164 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
165 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
165 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
166 fileconflicts.add(f)
166 fileconflicts.add(f)
167
167
168 allconflicts = fileconflicts | pathconflicts
168 allconflicts = fileconflicts | pathconflicts
169 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
169 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
170 unknownconflicts = allconflicts - ignoredconflicts
170 unknownconflicts = allconflicts - ignoredconflicts
171 collectconflicts(ignoredconflicts, ignoredconfig)
171 collectconflicts(ignoredconflicts, ignoredconfig)
172 collectconflicts(unknownconflicts, unknownconfig)
172 collectconflicts(unknownconflicts, unknownconfig)
173 else:
173 else:
174 for f, (m, args, msg) in pycompat.iteritems(actions):
174 for f, (m, args, msg) in pycompat.iteritems(actions):
175 if m == mergestatemod.ACTION_CREATED_MERGE:
175 if m == mergestatemod.ACTION_CREATED_MERGE:
176 fl2, anc = args
176 fl2, anc = args
177 different = _checkunknownfile(repo, wctx, mctx, f)
177 different = _checkunknownfile(repo, wctx, mctx, f)
178 if repo.dirstate._ignore(f):
178 if repo.dirstate._ignore(f):
179 config = ignoredconfig
179 config = ignoredconfig
180 else:
180 else:
181 config = unknownconfig
181 config = unknownconfig
182
182
183 # The behavior when force is True is described by this table:
183 # The behavior when force is True is described by this table:
184 # config different mergeforce | action backup
184 # config different mergeforce | action backup
185 # * n * | get n
185 # * n * | get n
186 # * y y | merge -
186 # * y y | merge -
187 # abort y n | merge - (1)
187 # abort y n | merge - (1)
188 # warn y n | warn + get y
188 # warn y n | warn + get y
189 # ignore y n | get y
189 # ignore y n | get y
190 #
190 #
191 # (1) this is probably the wrong behavior here -- we should
191 # (1) this is probably the wrong behavior here -- we should
192 # probably abort, but some actions like rebases currently
192 # probably abort, but some actions like rebases currently
193 # don't like an abort happening in the middle of
193 # don't like an abort happening in the middle of
194 # merge.update.
194 # merge.update.
195 if not different:
195 if not different:
196 actions[f] = (
196 actions[f] = (
197 mergestatemod.ACTION_GET,
197 mergestatemod.ACTION_GET,
198 (fl2, False),
198 (fl2, False),
199 b'remote created',
199 b'remote created',
200 )
200 )
201 elif mergeforce or config == b'abort':
201 elif mergeforce or config == b'abort':
202 actions[f] = (
202 actions[f] = (
203 mergestatemod.ACTION_MERGE,
203 mergestatemod.ACTION_MERGE,
204 (f, f, None, False, anc),
204 (f, f, None, False, anc),
205 b'remote differs from untracked local',
205 b'remote differs from untracked local',
206 )
206 )
207 elif config == b'abort':
207 elif config == b'abort':
208 abortconflicts.add(f)
208 abortconflicts.add(f)
209 else:
209 else:
210 if config == b'warn':
210 if config == b'warn':
211 warnconflicts.add(f)
211 warnconflicts.add(f)
212 actions[f] = (
212 actions[f] = (
213 mergestatemod.ACTION_GET,
213 mergestatemod.ACTION_GET,
214 (fl2, True),
214 (fl2, True),
215 b'remote created',
215 b'remote created',
216 )
216 )
217
217
218 for f in sorted(abortconflicts):
218 for f in sorted(abortconflicts):
219 warn = repo.ui.warn
219 warn = repo.ui.warn
220 if f in pathconflicts:
220 if f in pathconflicts:
221 if repo.wvfs.isfileorlink(f):
221 if repo.wvfs.isfileorlink(f):
222 warn(_(b"%s: untracked file conflicts with directory\n") % f)
222 warn(_(b"%s: untracked file conflicts with directory\n") % f)
223 else:
223 else:
224 warn(_(b"%s: untracked directory conflicts with file\n") % f)
224 warn(_(b"%s: untracked directory conflicts with file\n") % f)
225 else:
225 else:
226 warn(_(b"%s: untracked file differs\n") % f)
226 warn(_(b"%s: untracked file differs\n") % f)
227 if abortconflicts:
227 if abortconflicts:
228 raise error.Abort(
228 raise error.Abort(
229 _(
229 _(
230 b"untracked files in working directory "
230 b"untracked files in working directory "
231 b"differ from files in requested revision"
231 b"differ from files in requested revision"
232 )
232 )
233 )
233 )
234
234
235 for f in sorted(warnconflicts):
235 for f in sorted(warnconflicts):
236 if repo.wvfs.isfileorlink(f):
236 if repo.wvfs.isfileorlink(f):
237 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
237 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
238 else:
238 else:
239 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
239 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
240
240
241 for f, (m, args, msg) in pycompat.iteritems(actions):
241 for f, (m, args, msg) in pycompat.iteritems(actions):
242 if m == mergestatemod.ACTION_CREATED:
242 if m == mergestatemod.ACTION_CREATED:
243 backup = (
243 backup = (
244 f in fileconflicts
244 f in fileconflicts
245 or f in pathconflicts
245 or f in pathconflicts
246 or any(p in pathconflicts for p in pathutil.finddirs(f))
246 or any(p in pathconflicts for p in pathutil.finddirs(f))
247 )
247 )
248 (flags,) = args
248 (flags,) = args
249 actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
249 actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
250
250
251
251
252 def _forgetremoved(wctx, mctx, branchmerge):
252 def _forgetremoved(wctx, mctx, branchmerge):
253 """
253 """
254 Forget removed files
254 Forget removed files
255
255
256 If we're jumping between revisions (as opposed to merging), and if
256 If we're jumping between revisions (as opposed to merging), and if
257 neither the working directory nor the target rev has the file,
257 neither the working directory nor the target rev has the file,
258 then we need to remove it from the dirstate, to prevent the
258 then we need to remove it from the dirstate, to prevent the
259 dirstate from listing the file when it is no longer in the
259 dirstate from listing the file when it is no longer in the
260 manifest.
260 manifest.
261
261
262 If we're merging, and the other revision has removed a file
262 If we're merging, and the other revision has removed a file
263 that is not present in the working directory, we need to mark it
263 that is not present in the working directory, we need to mark it
264 as removed.
264 as removed.
265 """
265 """
266
266
267 actions = {}
267 actions = {}
268 m = mergestatemod.ACTION_FORGET
268 m = mergestatemod.ACTION_FORGET
269 if branchmerge:
269 if branchmerge:
270 m = mergestatemod.ACTION_REMOVE
270 m = mergestatemod.ACTION_REMOVE
271 for f in wctx.deleted():
271 for f in wctx.deleted():
272 if f not in mctx:
272 if f not in mctx:
273 actions[f] = m, None, b"forget deleted"
273 actions[f] = m, None, b"forget deleted"
274
274
275 if not branchmerge:
275 if not branchmerge:
276 for f in wctx.removed():
276 for f in wctx.removed():
277 if f not in mctx:
277 if f not in mctx:
278 actions[f] = (
278 actions[f] = (
279 mergestatemod.ACTION_FORGET,
279 mergestatemod.ACTION_FORGET,
280 None,
280 None,
281 b"forget removed",
281 b"forget removed",
282 )
282 )
283
283
284 return actions
284 return actions
285
285
286
286
287 def _checkcollision(repo, wmf, actions):
287 def _checkcollision(repo, wmf, actions):
288 """
288 """
289 Check for case-folding collisions.
289 Check for case-folding collisions.
290 """
290 """
291 # If the repo is narrowed, filter out files outside the narrowspec.
291 # If the repo is narrowed, filter out files outside the narrowspec.
292 narrowmatch = repo.narrowmatch()
292 narrowmatch = repo.narrowmatch()
293 if not narrowmatch.always():
293 if not narrowmatch.always():
294 pmmf = set(wmf.walk(narrowmatch))
294 pmmf = set(wmf.walk(narrowmatch))
295 if actions:
295 if actions:
296 narrowactions = {}
296 narrowactions = {}
297 for m, actionsfortype in pycompat.iteritems(actions):
297 for m, actionsfortype in pycompat.iteritems(actions):
298 narrowactions[m] = []
298 narrowactions[m] = []
299 for (f, args, msg) in actionsfortype:
299 for (f, args, msg) in actionsfortype:
300 if narrowmatch(f):
300 if narrowmatch(f):
301 narrowactions[m].append((f, args, msg))
301 narrowactions[m].append((f, args, msg))
302 actions = narrowactions
302 actions = narrowactions
303 else:
303 else:
304 # build provisional merged manifest up
304 # build provisional merged manifest up
305 pmmf = set(wmf)
305 pmmf = set(wmf)
306
306
307 if actions:
307 if actions:
308 # KEEP and EXEC are no-op
308 # KEEP and EXEC are no-op
309 for m in (
309 for m in (
310 mergestatemod.ACTION_ADD,
310 mergestatemod.ACTION_ADD,
311 mergestatemod.ACTION_ADD_MODIFIED,
311 mergestatemod.ACTION_ADD_MODIFIED,
312 mergestatemod.ACTION_FORGET,
312 mergestatemod.ACTION_FORGET,
313 mergestatemod.ACTION_GET,
313 mergestatemod.ACTION_GET,
314 mergestatemod.ACTION_CHANGED_DELETED,
314 mergestatemod.ACTION_CHANGED_DELETED,
315 mergestatemod.ACTION_DELETED_CHANGED,
315 mergestatemod.ACTION_DELETED_CHANGED,
316 ):
316 ):
317 for f, args, msg in actions[m]:
317 for f, args, msg in actions[m]:
318 pmmf.add(f)
318 pmmf.add(f)
319 for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
319 for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
320 pmmf.discard(f)
320 pmmf.discard(f)
321 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
321 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
322 f2, flags = args
322 f2, flags = args
323 pmmf.discard(f2)
323 pmmf.discard(f2)
324 pmmf.add(f)
324 pmmf.add(f)
325 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
325 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
326 pmmf.add(f)
326 pmmf.add(f)
327 for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
327 for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
328 f1, f2, fa, move, anc = args
328 f1, f2, fa, move, anc = args
329 if move:
329 if move:
330 pmmf.discard(f1)
330 pmmf.discard(f1)
331 pmmf.add(f)
331 pmmf.add(f)
332
332
333 # check case-folding collision in provisional merged manifest
333 # check case-folding collision in provisional merged manifest
334 foldmap = {}
334 foldmap = {}
335 for f in pmmf:
335 for f in pmmf:
336 fold = util.normcase(f)
336 fold = util.normcase(f)
337 if fold in foldmap:
337 if fold in foldmap:
338 raise error.Abort(
338 raise error.Abort(
339 _(b"case-folding collision between %s and %s")
339 _(b"case-folding collision between %s and %s")
340 % (f, foldmap[fold])
340 % (f, foldmap[fold])
341 )
341 )
342 foldmap[fold] = f
342 foldmap[fold] = f
343
343
344 # check case-folding of directories
344 # check case-folding of directories
345 foldprefix = unfoldprefix = lastfull = b''
345 foldprefix = unfoldprefix = lastfull = b''
346 for fold, f in sorted(foldmap.items()):
346 for fold, f in sorted(foldmap.items()):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 # the folded prefix matches but actual casing is different
348 # the folded prefix matches but actual casing is different
349 raise error.Abort(
349 raise error.Abort(
350 _(b"case-folding collision between %s and directory of %s")
350 _(b"case-folding collision between %s and directory of %s")
351 % (lastfull, f)
351 % (lastfull, f)
352 )
352 )
353 foldprefix = fold + b'/'
353 foldprefix = fold + b'/'
354 unfoldprefix = f + b'/'
354 unfoldprefix = f + b'/'
355 lastfull = f
355 lastfull = f
356
356
357
357
358 def driverpreprocess(repo, ms, wctx, labels=None):
358 def driverpreprocess(repo, ms, wctx, labels=None):
359 """run the preprocess step of the merge driver, if any
359 """run the preprocess step of the merge driver, if any
360
360
361 This is currently not implemented -- it's an extension point."""
361 This is currently not implemented -- it's an extension point."""
362 return True
362 return True
363
363
364
364
365 def driverconclude(repo, ms, wctx, labels=None):
365 def driverconclude(repo, ms, wctx, labels=None):
366 """run the conclude step of the merge driver, if any
366 """run the conclude step of the merge driver, if any
367
367
368 This is currently not implemented -- it's an extension point."""
368 This is currently not implemented -- it's an extension point."""
369 return True
369 return True
370
370
371
371
372 def _filesindirs(repo, manifest, dirs):
372 def _filesindirs(repo, manifest, dirs):
373 """
373 """
374 Generator that yields pairs of all the files in the manifest that are found
374 Generator that yields pairs of all the files in the manifest that are found
375 inside the directories listed in dirs, and which directory they are found
375 inside the directories listed in dirs, and which directory they are found
376 in.
376 in.
377 """
377 """
378 for f in manifest:
378 for f in manifest:
379 for p in pathutil.finddirs(f):
379 for p in pathutil.finddirs(f):
380 if p in dirs:
380 if p in dirs:
381 yield f, p
381 yield f, p
382 break
382 break
383
383
384
384
385 def checkpathconflicts(repo, wctx, mctx, actions):
385 def checkpathconflicts(repo, wctx, mctx, actions):
386 """
386 """
387 Check if any actions introduce path conflicts in the repository, updating
387 Check if any actions introduce path conflicts in the repository, updating
388 actions to record or handle the path conflict accordingly.
388 actions to record or handle the path conflict accordingly.
389 """
389 """
390 mf = wctx.manifest()
390 mf = wctx.manifest()
391
391
392 # The set of local files that conflict with a remote directory.
392 # The set of local files that conflict with a remote directory.
393 localconflicts = set()
393 localconflicts = set()
394
394
395 # The set of directories that conflict with a remote file, and so may cause
395 # The set of directories that conflict with a remote file, and so may cause
396 # conflicts if they still contain any files after the merge.
396 # conflicts if they still contain any files after the merge.
397 remoteconflicts = set()
397 remoteconflicts = set()
398
398
399 # The set of directories that appear as both a file and a directory in the
399 # The set of directories that appear as both a file and a directory in the
400 # remote manifest. These indicate an invalid remote manifest, which
400 # remote manifest. These indicate an invalid remote manifest, which
401 # can't be updated to cleanly.
401 # can't be updated to cleanly.
402 invalidconflicts = set()
402 invalidconflicts = set()
403
403
404 # The set of directories that contain files that are being created.
404 # The set of directories that contain files that are being created.
405 createdfiledirs = set()
405 createdfiledirs = set()
406
406
407 # The set of files deleted by all the actions.
407 # The set of files deleted by all the actions.
408 deletedfiles = set()
408 deletedfiles = set()
409
409
410 for f, (m, args, msg) in actions.items():
410 for f, (m, args, msg) in actions.items():
411 if m in (
411 if m in (
412 mergestatemod.ACTION_CREATED,
412 mergestatemod.ACTION_CREATED,
413 mergestatemod.ACTION_DELETED_CHANGED,
413 mergestatemod.ACTION_DELETED_CHANGED,
414 mergestatemod.ACTION_MERGE,
414 mergestatemod.ACTION_MERGE,
415 mergestatemod.ACTION_CREATED_MERGE,
415 mergestatemod.ACTION_CREATED_MERGE,
416 ):
416 ):
417 # This action may create a new local file.
417 # This action may create a new local file.
418 createdfiledirs.update(pathutil.finddirs(f))
418 createdfiledirs.update(pathutil.finddirs(f))
419 if mf.hasdir(f):
419 if mf.hasdir(f):
420 # The file aliases a local directory. This might be ok if all
420 # The file aliases a local directory. This might be ok if all
421 # the files in the local directory are being deleted. This
421 # the files in the local directory are being deleted. This
422 # will be checked once we know what all the deleted files are.
422 # will be checked once we know what all the deleted files are.
423 remoteconflicts.add(f)
423 remoteconflicts.add(f)
424 # Track the names of all deleted files.
424 # Track the names of all deleted files.
425 if m == mergestatemod.ACTION_REMOVE:
425 if m == mergestatemod.ACTION_REMOVE:
426 deletedfiles.add(f)
426 deletedfiles.add(f)
427 if m == mergestatemod.ACTION_MERGE:
427 if m == mergestatemod.ACTION_MERGE:
428 f1, f2, fa, move, anc = args
428 f1, f2, fa, move, anc = args
429 if move:
429 if move:
430 deletedfiles.add(f1)
430 deletedfiles.add(f1)
431 if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
431 if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
432 f2, flags = args
432 f2, flags = args
433 deletedfiles.add(f2)
433 deletedfiles.add(f2)
434
434
435 # Check all directories that contain created files for path conflicts.
435 # Check all directories that contain created files for path conflicts.
436 for p in createdfiledirs:
436 for p in createdfiledirs:
437 if p in mf:
437 if p in mf:
438 if p in mctx:
438 if p in mctx:
439 # A file is in a directory which aliases both a local
439 # A file is in a directory which aliases both a local
440 # and a remote file. This is an internal inconsistency
440 # and a remote file. This is an internal inconsistency
441 # within the remote manifest.
441 # within the remote manifest.
442 invalidconflicts.add(p)
442 invalidconflicts.add(p)
443 else:
443 else:
444 # A file is in a directory which aliases a local file.
444 # A file is in a directory which aliases a local file.
445 # We will need to rename the local file.
445 # We will need to rename the local file.
446 localconflicts.add(p)
446 localconflicts.add(p)
447 if p in actions and actions[p][0] in (
447 if p in actions and actions[p][0] in (
448 mergestatemod.ACTION_CREATED,
448 mergestatemod.ACTION_CREATED,
449 mergestatemod.ACTION_DELETED_CHANGED,
449 mergestatemod.ACTION_DELETED_CHANGED,
450 mergestatemod.ACTION_MERGE,
450 mergestatemod.ACTION_MERGE,
451 mergestatemod.ACTION_CREATED_MERGE,
451 mergestatemod.ACTION_CREATED_MERGE,
452 ):
452 ):
453 # The file is in a directory which aliases a remote file.
453 # The file is in a directory which aliases a remote file.
454 # This is an internal inconsistency within the remote
454 # This is an internal inconsistency within the remote
455 # manifest.
455 # manifest.
456 invalidconflicts.add(p)
456 invalidconflicts.add(p)
457
457
458 # Rename all local conflicting files that have not been deleted.
458 # Rename all local conflicting files that have not been deleted.
459 for p in localconflicts:
459 for p in localconflicts:
460 if p not in deletedfiles:
460 if p not in deletedfiles:
461 ctxname = bytes(wctx).rstrip(b'+')
461 ctxname = bytes(wctx).rstrip(b'+')
462 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
462 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
463 porig = wctx[p].copysource() or p
463 porig = wctx[p].copysource() or p
464 actions[pnew] = (
464 actions[pnew] = (
465 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
465 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
466 (p, porig),
466 (p, porig),
467 b'local path conflict',
467 b'local path conflict',
468 )
468 )
469 actions[p] = (
469 actions[p] = (
470 mergestatemod.ACTION_PATH_CONFLICT,
470 mergestatemod.ACTION_PATH_CONFLICT,
471 (pnew, b'l'),
471 (pnew, b'l'),
472 b'path conflict',
472 b'path conflict',
473 )
473 )
474
474
475 if remoteconflicts:
475 if remoteconflicts:
476 # Check if all files in the conflicting directories have been removed.
476 # Check if all files in the conflicting directories have been removed.
477 ctxname = bytes(mctx).rstrip(b'+')
477 ctxname = bytes(mctx).rstrip(b'+')
478 for f, p in _filesindirs(repo, mf, remoteconflicts):
478 for f, p in _filesindirs(repo, mf, remoteconflicts):
479 if f not in deletedfiles:
479 if f not in deletedfiles:
480 m, args, msg = actions[p]
480 m, args, msg = actions[p]
481 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
481 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
482 if m in (
482 if m in (
483 mergestatemod.ACTION_DELETED_CHANGED,
483 mergestatemod.ACTION_DELETED_CHANGED,
484 mergestatemod.ACTION_MERGE,
484 mergestatemod.ACTION_MERGE,
485 ):
485 ):
486 # Action was merge, just update target.
486 # Action was merge, just update target.
487 actions[pnew] = (m, args, msg)
487 actions[pnew] = (m, args, msg)
488 else:
488 else:
489 # Action was create, change to renamed get action.
489 # Action was create, change to renamed get action.
490 fl = args[0]
490 fl = args[0]
491 actions[pnew] = (
491 actions[pnew] = (
492 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
492 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
493 (p, fl),
493 (p, fl),
494 b'remote path conflict',
494 b'remote path conflict',
495 )
495 )
496 actions[p] = (
496 actions[p] = (
497 mergestatemod.ACTION_PATH_CONFLICT,
497 mergestatemod.ACTION_PATH_CONFLICT,
498 (pnew, mergestatemod.ACTION_REMOVE),
498 (pnew, mergestatemod.ACTION_REMOVE),
499 b'path conflict',
499 b'path conflict',
500 )
500 )
501 remoteconflicts.remove(p)
501 remoteconflicts.remove(p)
502 break
502 break
503
503
504 if invalidconflicts:
504 if invalidconflicts:
505 for p in invalidconflicts:
505 for p in invalidconflicts:
506 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
506 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
507 raise error.Abort(_(b"destination manifest contains path conflicts"))
507 raise error.Abort(_(b"destination manifest contains path conflicts"))
508
508
509
509
510 def _filternarrowactions(narrowmatch, branchmerge, actions):
510 def _filternarrowactions(narrowmatch, branchmerge, actions):
511 """
511 """
512 Filters out actions that can ignored because the repo is narrowed.
512 Filters out actions that can ignored because the repo is narrowed.
513
513
514 Raise an exception if the merge cannot be completed because the repo is
514 Raise an exception if the merge cannot be completed because the repo is
515 narrowed.
515 narrowed.
516 """
516 """
517 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
517 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
518 nonconflicttypes = set(b'a am c cm f g gs r e'.split())
518 nonconflicttypes = set(b'a am c cm f g gs r e'.split())
519 # We mutate the items in the dict during iteration, so iterate
519 # We mutate the items in the dict during iteration, so iterate
520 # over a copy.
520 # over a copy.
521 for f, action in list(actions.items()):
521 for f, action in list(actions.items()):
522 if narrowmatch(f):
522 if narrowmatch(f):
523 pass
523 pass
524 elif not branchmerge:
524 elif not branchmerge:
525 del actions[f] # just updating, ignore changes outside clone
525 del actions[f] # just updating, ignore changes outside clone
526 elif action[0] in nooptypes:
526 elif action[0] in nooptypes:
527 del actions[f] # merge does not affect file
527 del actions[f] # merge does not affect file
528 elif action[0] in nonconflicttypes:
528 elif action[0] in nonconflicttypes:
529 raise error.Abort(
529 raise error.Abort(
530 _(
530 _(
531 b'merge affects file \'%s\' outside narrow, '
531 b'merge affects file \'%s\' outside narrow, '
532 b'which is not yet supported'
532 b'which is not yet supported'
533 )
533 )
534 % f,
534 % f,
535 hint=_(b'merging in the other direction may work'),
535 hint=_(b'merging in the other direction may work'),
536 )
536 )
537 else:
537 else:
538 raise error.Abort(
538 raise error.Abort(
539 _(b'conflict in file \'%s\' is outside narrow clone') % f
539 _(b'conflict in file \'%s\' is outside narrow clone') % f
540 )
540 )
541
541
542
542
543 class mergeresult(object):
543 class mergeresult(object):
544 ''''An object representing result of merging manifests.
544 ''''An object representing result of merging manifests.
545
545
546 It has information about what actions need to be performed on dirstate
546 It has information about what actions need to be performed on dirstate
547 mapping of divergent renames and other such cases. '''
547 mapping of divergent renames and other such cases. '''
548
548
549 def __init__(self):
549 def __init__(self):
550 """
550 """
551 actions: dict of filename as keys and action related info as values
551 actions: dict of filename as keys and action related info as values
552 diverge: mapping of source name -> list of dest name for
552 diverge: mapping of source name -> list of dest name for
553 divergent renames
553 divergent renames
554 renamedelete: mapping of source name -> list of destinations for files
554 renamedelete: mapping of source name -> list of destinations for files
555 deleted on one side and renamed on other.
555 deleted on one side and renamed on other.
556 commitinfo: dict containing data which should be used on commit
556 commitinfo: dict containing data which should be used on commit
557 contains a filename -> info mapping
557 contains a filename -> info mapping
558 """
558 """
559 self._actions = {}
559 self._actions = {}
560 self._diverge = {}
560 self._diverge = {}
561 self._renamedelete = {}
561 self._renamedelete = {}
562 self._commitinfo = {}
562 self._commitinfo = {}
563
563
564 def updatevalues(self, actions, diverge, renamedelete, commitinfo):
564 def updatevalues(self, diverge, renamedelete, commitinfo):
565 self._actions = actions
566 self._diverge = diverge
565 self._diverge = diverge
567 self._renamedelete = renamedelete
566 self._renamedelete = renamedelete
568 self._commitinfo = commitinfo
567 self._commitinfo = commitinfo
569
568
569 def addfile(self, filename, action, data, message):
570 """ adds a new file to the mergeresult object
571
572 filename: file which we are adding
573 action: one of mergestatemod.ACTION_*
574 data: a tuple of information like fctx and ctx related to this merge
575 message: a message about the merge
576 """
577 self._actions[filename] = (action, data, message)
578
570 @property
579 @property
571 def actions(self):
580 def actions(self):
572 return self._actions
581 return self._actions
573
582
574 @property
583 @property
575 def diverge(self):
584 def diverge(self):
576 return self._diverge
585 return self._diverge
577
586
578 @property
587 @property
579 def renamedelete(self):
588 def renamedelete(self):
580 return self._renamedelete
589 return self._renamedelete
581
590
582 @property
591 @property
583 def commitinfo(self):
592 def commitinfo(self):
584 return self._commitinfo
593 return self._commitinfo
585
594
586 @property
595 @property
587 def actionsdict(self):
596 def actionsdict(self):
588 """ returns a dictionary of actions to be perfomed with action as key
597 """ returns a dictionary of actions to be perfomed with action as key
589 and a list of files and related arguments as values """
598 and a list of files and related arguments as values """
590 # Convert to dictionary-of-lists format
599 # Convert to dictionary-of-lists format
591 actions = emptyactions()
600 actions = emptyactions()
592 for f, (m, args, msg) in pycompat.iteritems(self._actions):
601 for f, (m, args, msg) in pycompat.iteritems(self._actions):
593 if m not in actions:
602 if m not in actions:
594 actions[m] = []
603 actions[m] = []
595 actions[m].append((f, args, msg))
604 actions[m].append((f, args, msg))
596
605
597 return actions
606 return actions
598
607
599 def setactions(self, actions):
608 def setactions(self, actions):
600 self._actions = actions
609 self._actions = actions
601
610
602 def hasconflicts(self):
611 def hasconflicts(self):
603 """ tells whether this merge resulted in some actions which can
612 """ tells whether this merge resulted in some actions which can
604 result in conflicts or not """
613 result in conflicts or not """
605 for _f, (m, _unused, _unused) in pycompat.iteritems(self._actions):
614 for _f, (m, _unused, _unused) in pycompat.iteritems(self._actions):
606 if m not in (
615 if m not in (
607 mergestatemod.ACTION_GET,
616 mergestatemod.ACTION_GET,
608 mergestatemod.ACTION_KEEP,
617 mergestatemod.ACTION_KEEP,
609 mergestatemod.ACTION_EXEC,
618 mergestatemod.ACTION_EXEC,
610 mergestatemod.ACTION_REMOVE,
619 mergestatemod.ACTION_REMOVE,
611 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
620 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
612 ):
621 ):
613 return True
622 return True
614
623
615 return False
624 return False
616
625
617
626
618 def manifestmerge(
627 def manifestmerge(
619 repo,
628 repo,
620 wctx,
629 wctx,
621 p2,
630 p2,
622 pa,
631 pa,
623 branchmerge,
632 branchmerge,
624 force,
633 force,
625 matcher,
634 matcher,
626 acceptremote,
635 acceptremote,
627 followcopies,
636 followcopies,
628 forcefulldiff=False,
637 forcefulldiff=False,
629 ):
638 ):
630 """
639 """
631 Merge wctx and p2 with ancestor pa and generate merge action list
640 Merge wctx and p2 with ancestor pa and generate merge action list
632
641
633 branchmerge and force are as passed in to update
642 branchmerge and force are as passed in to update
634 matcher = matcher to filter file lists
643 matcher = matcher to filter file lists
635 acceptremote = accept the incoming changes without prompting
644 acceptremote = accept the incoming changes without prompting
636
645
637 Returns an object of mergeresult class
646 Returns an object of mergeresult class
638 """
647 """
648 mresult = mergeresult()
639 if matcher is not None and matcher.always():
649 if matcher is not None and matcher.always():
640 matcher = None
650 matcher = None
641
651
642 # manifests fetched in order are going to be faster, so prime the caches
652 # manifests fetched in order are going to be faster, so prime the caches
643 [
653 [
644 x.manifest()
654 x.manifest()
645 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
655 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
646 ]
656 ]
647
657
648 branch_copies1 = copies.branch_copies()
658 branch_copies1 = copies.branch_copies()
649 branch_copies2 = copies.branch_copies()
659 branch_copies2 = copies.branch_copies()
650 diverge = {}
660 diverge = {}
651 # information from merge which is needed at commit time
661 # information from merge which is needed at commit time
652 # for example choosing filelog of which parent to commit
662 # for example choosing filelog of which parent to commit
653 # TODO: use specific constants in future for this mapping
663 # TODO: use specific constants in future for this mapping
654 commitinfo = {}
664 commitinfo = {}
655 if followcopies:
665 if followcopies:
656 branch_copies1, branch_copies2, diverge = copies.mergecopies(
666 branch_copies1, branch_copies2, diverge = copies.mergecopies(
657 repo, wctx, p2, pa
667 repo, wctx, p2, pa
658 )
668 )
659
669
660 boolbm = pycompat.bytestr(bool(branchmerge))
670 boolbm = pycompat.bytestr(bool(branchmerge))
661 boolf = pycompat.bytestr(bool(force))
671 boolf = pycompat.bytestr(bool(force))
662 boolm = pycompat.bytestr(bool(matcher))
672 boolm = pycompat.bytestr(bool(matcher))
663 repo.ui.note(_(b"resolving manifests\n"))
673 repo.ui.note(_(b"resolving manifests\n"))
664 repo.ui.debug(
674 repo.ui.debug(
665 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
675 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
666 )
676 )
667 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
677 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
668
678
669 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
679 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
670 copied1 = set(branch_copies1.copy.values())
680 copied1 = set(branch_copies1.copy.values())
671 copied1.update(branch_copies1.movewithdir.values())
681 copied1.update(branch_copies1.movewithdir.values())
672 copied2 = set(branch_copies2.copy.values())
682 copied2 = set(branch_copies2.copy.values())
673 copied2.update(branch_copies2.movewithdir.values())
683 copied2.update(branch_copies2.movewithdir.values())
674
684
675 if b'.hgsubstate' in m1 and wctx.rev() is None:
685 if b'.hgsubstate' in m1 and wctx.rev() is None:
676 # Check whether sub state is modified, and overwrite the manifest
686 # Check whether sub state is modified, and overwrite the manifest
677 # to flag the change. If wctx is a committed revision, we shouldn't
687 # to flag the change. If wctx is a committed revision, we shouldn't
678 # care for the dirty state of the working directory.
688 # care for the dirty state of the working directory.
679 if any(wctx.sub(s).dirty() for s in wctx.substate):
689 if any(wctx.sub(s).dirty() for s in wctx.substate):
680 m1[b'.hgsubstate'] = modifiednodeid
690 m1[b'.hgsubstate'] = modifiednodeid
681
691
682 # Don't use m2-vs-ma optimization if:
692 # Don't use m2-vs-ma optimization if:
683 # - ma is the same as m1 or m2, which we're just going to diff again later
693 # - ma is the same as m1 or m2, which we're just going to diff again later
684 # - The caller specifically asks for a full diff, which is useful during bid
694 # - The caller specifically asks for a full diff, which is useful during bid
685 # merge.
695 # merge.
686 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
696 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
687 # Identify which files are relevant to the merge, so we can limit the
697 # Identify which files are relevant to the merge, so we can limit the
688 # total m1-vs-m2 diff to just those files. This has significant
698 # total m1-vs-m2 diff to just those files. This has significant
689 # performance benefits in large repositories.
699 # performance benefits in large repositories.
690 relevantfiles = set(ma.diff(m2).keys())
700 relevantfiles = set(ma.diff(m2).keys())
691
701
692 # For copied and moved files, we need to add the source file too.
702 # For copied and moved files, we need to add the source file too.
693 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
703 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
694 if copyvalue in relevantfiles:
704 if copyvalue in relevantfiles:
695 relevantfiles.add(copykey)
705 relevantfiles.add(copykey)
696 for movedirkey in branch_copies1.movewithdir:
706 for movedirkey in branch_copies1.movewithdir:
697 relevantfiles.add(movedirkey)
707 relevantfiles.add(movedirkey)
698 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
708 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
699 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
709 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
700
710
701 diff = m1.diff(m2, match=matcher)
711 diff = m1.diff(m2, match=matcher)
702
712
703 actions = {}
704 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
713 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
705 if n1 and n2: # file exists on both local and remote side
714 if n1 and n2: # file exists on both local and remote side
706 if f not in ma:
715 if f not in ma:
707 # TODO: what if they're renamed from different sources?
716 # TODO: what if they're renamed from different sources?
708 fa = branch_copies1.copy.get(
717 fa = branch_copies1.copy.get(
709 f, None
718 f, None
710 ) or branch_copies2.copy.get(f, None)
719 ) or branch_copies2.copy.get(f, None)
711 if fa is not None:
720 if fa is not None:
712 actions[f] = (
721 mresult.addfile(
722 f,
713 mergestatemod.ACTION_MERGE,
723 mergestatemod.ACTION_MERGE,
714 (f, f, fa, False, pa.node()),
724 (f, f, fa, False, pa.node()),
715 b'both renamed from %s' % fa,
725 b'both renamed from %s' % fa,
716 )
726 )
717 else:
727 else:
718 actions[f] = (
728 mresult.addfile(
729 f,
719 mergestatemod.ACTION_MERGE,
730 mergestatemod.ACTION_MERGE,
720 (f, f, None, False, pa.node()),
731 (f, f, None, False, pa.node()),
721 b'both created',
732 b'both created',
722 )
733 )
723 else:
734 else:
724 a = ma[f]
735 a = ma[f]
725 fla = ma.flags(f)
736 fla = ma.flags(f)
726 nol = b'l' not in fl1 + fl2 + fla
737 nol = b'l' not in fl1 + fl2 + fla
727 if n2 == a and fl2 == fla:
738 if n2 == a and fl2 == fla:
728 actions[f] = (
739 mresult.addfile(
729 mergestatemod.ACTION_KEEP,
740 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
730 (),
731 b'remote unchanged',
732 )
741 )
733 elif n1 == a and fl1 == fla: # local unchanged - use remote
742 elif n1 == a and fl1 == fla: # local unchanged - use remote
734 if n1 == n2: # optimization: keep local content
743 if n1 == n2: # optimization: keep local content
735 actions[f] = (
744 mresult.addfile(
745 f,
736 mergestatemod.ACTION_EXEC,
746 mergestatemod.ACTION_EXEC,
737 (fl2,),
747 (fl2,),
738 b'update permissions',
748 b'update permissions',
739 )
749 )
740 else:
750 else:
741 actions[f] = (
751 mresult.addfile(
752 f,
742 mergestatemod.ACTION_GET,
753 mergestatemod.ACTION_GET,
743 (fl2, False),
754 (fl2, False),
744 b'remote is newer',
755 b'remote is newer',
745 )
756 )
746 if branchmerge:
757 if branchmerge:
747 commitinfo[f] = b'other'
758 commitinfo[f] = b'other'
748 elif nol and n2 == a: # remote only changed 'x'
759 elif nol and n2 == a: # remote only changed 'x'
749 actions[f] = (
760 mresult.addfile(
761 f,
750 mergestatemod.ACTION_EXEC,
762 mergestatemod.ACTION_EXEC,
751 (fl2,),
763 (fl2,),
752 b'update permissions',
764 b'update permissions',
753 )
765 )
754 elif nol and n1 == a: # local only changed 'x'
766 elif nol and n1 == a: # local only changed 'x'
755 actions[f] = (
767 mresult.addfile(
768 f,
756 mergestatemod.ACTION_GET,
769 mergestatemod.ACTION_GET,
757 (fl1, False),
770 (fl1, False),
758 b'remote is newer',
771 b'remote is newer',
759 )
772 )
760 if branchmerge:
773 if branchmerge:
761 commitinfo[f] = b'other'
774 commitinfo[f] = b'other'
762 else: # both changed something
775 else: # both changed something
763 actions[f] = (
776 mresult.addfile(
777 f,
764 mergestatemod.ACTION_MERGE,
778 mergestatemod.ACTION_MERGE,
765 (f, f, f, False, pa.node()),
779 (f, f, f, False, pa.node()),
766 b'versions differ',
780 b'versions differ',
767 )
781 )
768 elif n1: # file exists only on local side
782 elif n1: # file exists only on local side
769 if f in copied2:
783 if f in copied2:
770 pass # we'll deal with it on m2 side
784 pass # we'll deal with it on m2 side
771 elif (
785 elif (
772 f in branch_copies1.movewithdir
786 f in branch_copies1.movewithdir
773 ): # directory rename, move local
787 ): # directory rename, move local
774 f2 = branch_copies1.movewithdir[f]
788 f2 = branch_copies1.movewithdir[f]
775 if f2 in m2:
789 if f2 in m2:
776 actions[f2] = (
790 mresult.addfile(
791 f2,
777 mergestatemod.ACTION_MERGE,
792 mergestatemod.ACTION_MERGE,
778 (f, f2, None, True, pa.node()),
793 (f, f2, None, True, pa.node()),
779 b'remote directory rename, both created',
794 b'remote directory rename, both created',
780 )
795 )
781 else:
796 else:
782 actions[f2] = (
797 mresult.addfile(
798 f2,
783 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
799 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
784 (f, fl1),
800 (f, fl1),
785 b'remote directory rename - move from %s' % f,
801 b'remote directory rename - move from %s' % f,
786 )
802 )
787 elif f in branch_copies1.copy:
803 elif f in branch_copies1.copy:
788 f2 = branch_copies1.copy[f]
804 f2 = branch_copies1.copy[f]
789 actions[f] = (
805 mresult.addfile(
806 f,
790 mergestatemod.ACTION_MERGE,
807 mergestatemod.ACTION_MERGE,
791 (f, f2, f2, False, pa.node()),
808 (f, f2, f2, False, pa.node()),
792 b'local copied/moved from %s' % f2,
809 b'local copied/moved from %s' % f2,
793 )
810 )
794 elif f in ma: # clean, a different, no remote
811 elif f in ma: # clean, a different, no remote
795 if n1 != ma[f]:
812 if n1 != ma[f]:
796 if acceptremote:
813 if acceptremote:
797 actions[f] = (
814 mresult.addfile(
815 f,
798 mergestatemod.ACTION_REMOVE,
816 mergestatemod.ACTION_REMOVE,
799 None,
817 None,
800 b'remote delete',
818 b'remote delete',
801 )
819 )
802 else:
820 else:
803 actions[f] = (
821 mresult.addfile(
822 f,
804 mergestatemod.ACTION_CHANGED_DELETED,
823 mergestatemod.ACTION_CHANGED_DELETED,
805 (f, None, f, False, pa.node()),
824 (f, None, f, False, pa.node()),
806 b'prompt changed/deleted',
825 b'prompt changed/deleted',
807 )
826 )
808 elif n1 == addednodeid:
827 elif n1 == addednodeid:
809 # This file was locally added. We should forget it instead of
828 # This file was locally added. We should forget it instead of
810 # deleting it.
829 # deleting it.
811 actions[f] = (
830 mresult.addfile(
812 mergestatemod.ACTION_FORGET,
831 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
813 None,
814 b'remote deleted',
815 )
832 )
816 else:
833 else:
817 actions[f] = (
834 mresult.addfile(
818 mergestatemod.ACTION_REMOVE,
835 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
819 None,
820 b'other deleted',
821 )
836 )
822 elif n2: # file exists only on remote side
837 elif n2: # file exists only on remote side
823 if f in copied1:
838 if f in copied1:
824 pass # we'll deal with it on m1 side
839 pass # we'll deal with it on m1 side
825 elif f in branch_copies2.movewithdir:
840 elif f in branch_copies2.movewithdir:
826 f2 = branch_copies2.movewithdir[f]
841 f2 = branch_copies2.movewithdir[f]
827 if f2 in m1:
842 if f2 in m1:
828 actions[f2] = (
843 mresult.addfile(
844 f2,
829 mergestatemod.ACTION_MERGE,
845 mergestatemod.ACTION_MERGE,
830 (f2, f, None, False, pa.node()),
846 (f2, f, None, False, pa.node()),
831 b'local directory rename, both created',
847 b'local directory rename, both created',
832 )
848 )
833 else:
849 else:
834 actions[f2] = (
850 mresult.addfile(
851 f2,
835 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
852 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
836 (f, fl2),
853 (f, fl2),
837 b'local directory rename - get from %s' % f,
854 b'local directory rename - get from %s' % f,
838 )
855 )
839 elif f in branch_copies2.copy:
856 elif f in branch_copies2.copy:
840 f2 = branch_copies2.copy[f]
857 f2 = branch_copies2.copy[f]
841 if f2 in m2:
858 if f2 in m2:
842 actions[f] = (
859 mresult.addfile(
860 f,
843 mergestatemod.ACTION_MERGE,
861 mergestatemod.ACTION_MERGE,
844 (f2, f, f2, False, pa.node()),
862 (f2, f, f2, False, pa.node()),
845 b'remote copied from %s' % f2,
863 b'remote copied from %s' % f2,
846 )
864 )
847 else:
865 else:
848 actions[f] = (
866 mresult.addfile(
867 f,
849 mergestatemod.ACTION_MERGE,
868 mergestatemod.ACTION_MERGE,
850 (f2, f, f2, True, pa.node()),
869 (f2, f, f2, True, pa.node()),
851 b'remote moved from %s' % f2,
870 b'remote moved from %s' % f2,
852 )
871 )
853 elif f not in ma:
872 elif f not in ma:
854 # local unknown, remote created: the logic is described by the
873 # local unknown, remote created: the logic is described by the
855 # following table:
874 # following table:
856 #
875 #
857 # force branchmerge different | action
876 # force branchmerge different | action
858 # n * * | create
877 # n * * | create
859 # y n * | create
878 # y n * | create
860 # y y n | create
879 # y y n | create
861 # y y y | merge
880 # y y y | merge
862 #
881 #
863 # Checking whether the files are different is expensive, so we
882 # Checking whether the files are different is expensive, so we
864 # don't do that when we can avoid it.
883 # don't do that when we can avoid it.
865 if not force:
884 if not force:
866 actions[f] = (
885 mresult.addfile(
886 f,
867 mergestatemod.ACTION_CREATED,
887 mergestatemod.ACTION_CREATED,
868 (fl2,),
888 (fl2,),
869 b'remote created',
889 b'remote created',
870 )
890 )
871 elif not branchmerge:
891 elif not branchmerge:
872 actions[f] = (
892 mresult.addfile(
893 f,
873 mergestatemod.ACTION_CREATED,
894 mergestatemod.ACTION_CREATED,
874 (fl2,),
895 (fl2,),
875 b'remote created',
896 b'remote created',
876 )
897 )
877 else:
898 else:
878 actions[f] = (
899 mresult.addfile(
900 f,
879 mergestatemod.ACTION_CREATED_MERGE,
901 mergestatemod.ACTION_CREATED_MERGE,
880 (fl2, pa.node()),
902 (fl2, pa.node()),
881 b'remote created, get or merge',
903 b'remote created, get or merge',
882 )
904 )
883 elif n2 != ma[f]:
905 elif n2 != ma[f]:
884 df = None
906 df = None
885 for d in branch_copies1.dirmove:
907 for d in branch_copies1.dirmove:
886 if f.startswith(d):
908 if f.startswith(d):
887 # new file added in a directory that was moved
909 # new file added in a directory that was moved
888 df = branch_copies1.dirmove[d] + f[len(d) :]
910 df = branch_copies1.dirmove[d] + f[len(d) :]
889 break
911 break
890 if df is not None and df in m1:
912 if df is not None and df in m1:
891 actions[df] = (
913 mresult.addfile(
914 df,
892 mergestatemod.ACTION_MERGE,
915 mergestatemod.ACTION_MERGE,
893 (df, f, f, False, pa.node()),
916 (df, f, f, False, pa.node()),
894 b'local directory rename - respect move '
917 b'local directory rename - respect move '
895 b'from %s' % f,
918 b'from %s' % f,
896 )
919 )
897 elif acceptremote:
920 elif acceptremote:
898 actions[f] = (
921 mresult.addfile(
922 f,
899 mergestatemod.ACTION_CREATED,
923 mergestatemod.ACTION_CREATED,
900 (fl2,),
924 (fl2,),
901 b'remote recreating',
925 b'remote recreating',
902 )
926 )
903 else:
927 else:
904 actions[f] = (
928 mresult.addfile(
929 f,
905 mergestatemod.ACTION_DELETED_CHANGED,
930 mergestatemod.ACTION_DELETED_CHANGED,
906 (None, f, f, False, pa.node()),
931 (None, f, f, False, pa.node()),
907 b'prompt deleted/changed',
932 b'prompt deleted/changed',
908 )
933 )
909
934
910 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
935 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
911 # If we are merging, look for path conflicts.
936 # If we are merging, look for path conflicts.
912 checkpathconflicts(repo, wctx, p2, actions)
937 checkpathconflicts(repo, wctx, p2, mresult.actions)
913
938
914 narrowmatch = repo.narrowmatch()
939 narrowmatch = repo.narrowmatch()
915 if not narrowmatch.always():
940 if not narrowmatch.always():
916 # Updates "actions" in place
941 # Updates "actions" in place
917 _filternarrowactions(narrowmatch, branchmerge, actions)
942 _filternarrowactions(narrowmatch, branchmerge, mresult.actions)
918
943
919 renamedelete = branch_copies1.renamedelete
944 renamedelete = branch_copies1.renamedelete
920 renamedelete.update(branch_copies2.renamedelete)
945 renamedelete.update(branch_copies2.renamedelete)
921
946
922 mresult = mergeresult()
947 mresult.updatevalues(diverge, renamedelete, commitinfo)
923 mresult.updatevalues(actions, diverge, renamedelete, commitinfo)
924 return mresult
948 return mresult
925
949
926
950
927 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
951 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
928 """Resolves false conflicts where the nodeid changed but the content
952 """Resolves false conflicts where the nodeid changed but the content
929 remained the same."""
953 remained the same."""
930 # We force a copy of actions.items() because we're going to mutate
954 # We force a copy of actions.items() because we're going to mutate
931 # actions as we resolve trivial conflicts.
955 # actions as we resolve trivial conflicts.
932 for f, (m, args, msg) in list(actions.items()):
956 for f, (m, args, msg) in list(actions.items()):
933 if (
957 if (
934 m == mergestatemod.ACTION_CHANGED_DELETED
958 m == mergestatemod.ACTION_CHANGED_DELETED
935 and f in ancestor
959 and f in ancestor
936 and not wctx[f].cmp(ancestor[f])
960 and not wctx[f].cmp(ancestor[f])
937 ):
961 ):
938 # local did change but ended up with same content
962 # local did change but ended up with same content
939 actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
963 actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
940 elif (
964 elif (
941 m == mergestatemod.ACTION_DELETED_CHANGED
965 m == mergestatemod.ACTION_DELETED_CHANGED
942 and f in ancestor
966 and f in ancestor
943 and not mctx[f].cmp(ancestor[f])
967 and not mctx[f].cmp(ancestor[f])
944 ):
968 ):
945 # remote did change but ended up with same content
969 # remote did change but ended up with same content
946 del actions[f] # don't get = keep local deleted
970 del actions[f] # don't get = keep local deleted
947
971
948
972
949 def calculateupdates(
973 def calculateupdates(
950 repo,
974 repo,
951 wctx,
975 wctx,
952 mctx,
976 mctx,
953 ancestors,
977 ancestors,
954 branchmerge,
978 branchmerge,
955 force,
979 force,
956 acceptremote,
980 acceptremote,
957 followcopies,
981 followcopies,
958 matcher=None,
982 matcher=None,
959 mergeforce=False,
983 mergeforce=False,
960 ):
984 ):
961 """
985 """
962 Calculate the actions needed to merge mctx into wctx using ancestors
986 Calculate the actions needed to merge mctx into wctx using ancestors
963
987
964 Uses manifestmerge() to merge manifest and get list of actions required to
988 Uses manifestmerge() to merge manifest and get list of actions required to
965 perform for merging two manifests. If there are multiple ancestors, uses bid
989 perform for merging two manifests. If there are multiple ancestors, uses bid
966 merge if enabled.
990 merge if enabled.
967
991
968 Also filters out actions which are unrequired if repository is sparse.
992 Also filters out actions which are unrequired if repository is sparse.
969
993
970 Returns mergeresult object same as manifestmerge().
994 Returns mergeresult object same as manifestmerge().
971 """
995 """
972 # Avoid cycle.
996 # Avoid cycle.
973 from . import sparse
997 from . import sparse
974
998
975 mresult = None
999 mresult = None
976 if len(ancestors) == 1: # default
1000 if len(ancestors) == 1: # default
977 mresult = manifestmerge(
1001 mresult = manifestmerge(
978 repo,
1002 repo,
979 wctx,
1003 wctx,
980 mctx,
1004 mctx,
981 ancestors[0],
1005 ancestors[0],
982 branchmerge,
1006 branchmerge,
983 force,
1007 force,
984 matcher,
1008 matcher,
985 acceptremote,
1009 acceptremote,
986 followcopies,
1010 followcopies,
987 )
1011 )
988 _checkunknownfiles(repo, wctx, mctx, force, mresult.actions, mergeforce)
1012 _checkunknownfiles(repo, wctx, mctx, force, mresult.actions, mergeforce)
989
1013
990 else: # only when merge.preferancestor=* - the default
1014 else: # only when merge.preferancestor=* - the default
991 repo.ui.note(
1015 repo.ui.note(
992 _(b"note: merging %s and %s using bids from ancestors %s\n")
1016 _(b"note: merging %s and %s using bids from ancestors %s\n")
993 % (
1017 % (
994 wctx,
1018 wctx,
995 mctx,
1019 mctx,
996 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1020 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
997 )
1021 )
998 )
1022 )
999
1023
1000 # mapping filename to bids (action method to list af actions)
1024 # mapping filename to bids (action method to list af actions)
1001 # {FILENAME1 : BID1, FILENAME2 : BID2}
1025 # {FILENAME1 : BID1, FILENAME2 : BID2}
1002 # BID is another dictionary which contains
1026 # BID is another dictionary which contains
1003 # mapping of following form:
1027 # mapping of following form:
1004 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1028 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1005 fbids = {}
1029 fbids = {}
1006 diverge, renamedelete = None, None
1030 diverge, renamedelete = None, None
1007 for ancestor in ancestors:
1031 for ancestor in ancestors:
1008 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1032 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1009 mresult1 = manifestmerge(
1033 mresult1 = manifestmerge(
1010 repo,
1034 repo,
1011 wctx,
1035 wctx,
1012 mctx,
1036 mctx,
1013 ancestor,
1037 ancestor,
1014 branchmerge,
1038 branchmerge,
1015 force,
1039 force,
1016 matcher,
1040 matcher,
1017 acceptremote,
1041 acceptremote,
1018 followcopies,
1042 followcopies,
1019 forcefulldiff=True,
1043 forcefulldiff=True,
1020 )
1044 )
1021 _checkunknownfiles(
1045 _checkunknownfiles(
1022 repo, wctx, mctx, force, mresult1.actions, mergeforce
1046 repo, wctx, mctx, force, mresult1.actions, mergeforce
1023 )
1047 )
1024
1048
1025 # Track the shortest set of warning on the theory that bid
1049 # Track the shortest set of warning on the theory that bid
1026 # merge will correctly incorporate more information
1050 # merge will correctly incorporate more information
1027 if diverge is None or len(mresult1.diverge) < len(diverge):
1051 if diverge is None or len(mresult1.diverge) < len(diverge):
1028 diverge = mresult1.diverge
1052 diverge = mresult1.diverge
1029 if renamedelete is None or len(renamedelete) < len(
1053 if renamedelete is None or len(renamedelete) < len(
1030 mresult1.renamedelete
1054 mresult1.renamedelete
1031 ):
1055 ):
1032 renamedelete = mresult1.renamedelete
1056 renamedelete = mresult1.renamedelete
1033
1057
1034 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1058 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1035 m, args, msg = a
1059 m, args, msg = a
1036 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1060 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1037 if f in fbids:
1061 if f in fbids:
1038 d = fbids[f]
1062 d = fbids[f]
1039 if m in d:
1063 if m in d:
1040 d[m].append(a)
1064 d[m].append(a)
1041 else:
1065 else:
1042 d[m] = [a]
1066 d[m] = [a]
1043 else:
1067 else:
1044 fbids[f] = {m: [a]}
1068 fbids[f] = {m: [a]}
1045
1069
1046 # Call for bids
1070 # Call for bids
1047 # Pick the best bid for each file
1071 # Pick the best bid for each file
1048 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1072 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1049 actions = {}
1073 mresult = mergeresult()
1050 for f, bids in sorted(fbids.items()):
1074 for f, bids in sorted(fbids.items()):
1051 # bids is a mapping from action method to list af actions
1075 # bids is a mapping from action method to list af actions
1052 # Consensus?
1076 # Consensus?
1053 if len(bids) == 1: # all bids are the same kind of method
1077 if len(bids) == 1: # all bids are the same kind of method
1054 m, l = list(bids.items())[0]
1078 m, l = list(bids.items())[0]
1055 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1079 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1056 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1080 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1057 actions[f] = l[0]
1081 mresult.addfile(f, *l[0])
1058 continue
1082 continue
1059 # If keep is an option, just do it.
1083 # If keep is an option, just do it.
1060 if mergestatemod.ACTION_KEEP in bids:
1084 if mergestatemod.ACTION_KEEP in bids:
1061 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1085 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1062 actions[f] = bids[mergestatemod.ACTION_KEEP][0]
1086 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1063 continue
1087 continue
1064 # If there are gets and they all agree [how could they not?], do it.
1088 # If there are gets and they all agree [how could they not?], do it.
1065 if mergestatemod.ACTION_GET in bids:
1089 if mergestatemod.ACTION_GET in bids:
1066 ga0 = bids[mergestatemod.ACTION_GET][0]
1090 ga0 = bids[mergestatemod.ACTION_GET][0]
1067 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1091 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1068 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1092 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1069 actions[f] = ga0
1093 mresult.addfile(f, *ga0)
1070 continue
1094 continue
1071 # TODO: Consider other simple actions such as mode changes
1095 # TODO: Consider other simple actions such as mode changes
1072 # Handle inefficient democrazy.
1096 # Handle inefficient democrazy.
1073 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1097 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1074 for m, l in sorted(bids.items()):
1098 for m, l in sorted(bids.items()):
1075 for _f, args, msg in l:
1099 for _f, args, msg in l:
1076 repo.ui.note(b' %s -> %s\n' % (msg, m))
1100 repo.ui.note(b' %s -> %s\n' % (msg, m))
1077 # Pick random action. TODO: Instead, prompt user when resolving
1101 # Pick random action. TODO: Instead, prompt user when resolving
1078 m, l = list(bids.items())[0]
1102 m, l = list(bids.items())[0]
1079 repo.ui.warn(
1103 repo.ui.warn(
1080 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1104 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1081 )
1105 )
1082 actions[f] = l[0]
1106 mresult.addfile(f, *l[0])
1083 continue
1107 continue
1084 repo.ui.note(_(b'end of auction\n\n'))
1108 repo.ui.note(_(b'end of auction\n\n'))
1085 # TODO: think about commitinfo when bid merge is used
1109 # TODO: think about commitinfo when bid merge is used
1086 mresult = mergeresult()
1110 mresult.updatevalues(diverge, renamedelete, {})
1087 mresult.updatevalues(actions, diverge, renamedelete, {})
1088
1111
1089 if wctx.rev() is None:
1112 if wctx.rev() is None:
1090 fractions = _forgetremoved(wctx, mctx, branchmerge)
1113 fractions = _forgetremoved(wctx, mctx, branchmerge)
1091 mresult.actions.update(fractions)
1114 mresult.actions.update(fractions)
1092
1115
1093 prunedactions = sparse.filterupdatesactions(
1116 prunedactions = sparse.filterupdatesactions(
1094 repo, wctx, mctx, branchmerge, mresult.actions
1117 repo, wctx, mctx, branchmerge, mresult.actions
1095 )
1118 )
1096 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult.actions)
1119 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult.actions)
1097
1120
1098 mresult.setactions(prunedactions)
1121 mresult.setactions(prunedactions)
1099 return mresult
1122 return mresult
1100
1123
1101
1124
1102 def _getcwd():
1125 def _getcwd():
1103 try:
1126 try:
1104 return encoding.getcwd()
1127 return encoding.getcwd()
1105 except OSError as err:
1128 except OSError as err:
1106 if err.errno == errno.ENOENT:
1129 if err.errno == errno.ENOENT:
1107 return None
1130 return None
1108 raise
1131 raise
1109
1132
1110
1133
1111 def batchremove(repo, wctx, actions):
1134 def batchremove(repo, wctx, actions):
1112 """apply removes to the working directory
1135 """apply removes to the working directory
1113
1136
1114 yields tuples for progress updates
1137 yields tuples for progress updates
1115 """
1138 """
1116 verbose = repo.ui.verbose
1139 verbose = repo.ui.verbose
1117 cwd = _getcwd()
1140 cwd = _getcwd()
1118 i = 0
1141 i = 0
1119 for f, args, msg in actions:
1142 for f, args, msg in actions:
1120 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1143 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1121 if verbose:
1144 if verbose:
1122 repo.ui.note(_(b"removing %s\n") % f)
1145 repo.ui.note(_(b"removing %s\n") % f)
1123 wctx[f].audit()
1146 wctx[f].audit()
1124 try:
1147 try:
1125 wctx[f].remove(ignoremissing=True)
1148 wctx[f].remove(ignoremissing=True)
1126 except OSError as inst:
1149 except OSError as inst:
1127 repo.ui.warn(
1150 repo.ui.warn(
1128 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1151 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1129 )
1152 )
1130 if i == 100:
1153 if i == 100:
1131 yield i, f
1154 yield i, f
1132 i = 0
1155 i = 0
1133 i += 1
1156 i += 1
1134 if i > 0:
1157 if i > 0:
1135 yield i, f
1158 yield i, f
1136
1159
1137 if cwd and not _getcwd():
1160 if cwd and not _getcwd():
1138 # cwd was removed in the course of removing files; print a helpful
1161 # cwd was removed in the course of removing files; print a helpful
1139 # warning.
1162 # warning.
1140 repo.ui.warn(
1163 repo.ui.warn(
1141 _(
1164 _(
1142 b"current directory was removed\n"
1165 b"current directory was removed\n"
1143 b"(consider changing to repo root: %s)\n"
1166 b"(consider changing to repo root: %s)\n"
1144 )
1167 )
1145 % repo.root
1168 % repo.root
1146 )
1169 )
1147
1170
1148
1171
1149 def batchget(repo, mctx, wctx, wantfiledata, actions):
1172 def batchget(repo, mctx, wctx, wantfiledata, actions):
1150 """apply gets to the working directory
1173 """apply gets to the working directory
1151
1174
1152 mctx is the context to get from
1175 mctx is the context to get from
1153
1176
1154 Yields arbitrarily many (False, tuple) for progress updates, followed by
1177 Yields arbitrarily many (False, tuple) for progress updates, followed by
1155 exactly one (True, filedata). When wantfiledata is false, filedata is an
1178 exactly one (True, filedata). When wantfiledata is false, filedata is an
1156 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1179 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1157 mtime) of the file f written for each action.
1180 mtime) of the file f written for each action.
1158 """
1181 """
1159 filedata = {}
1182 filedata = {}
1160 verbose = repo.ui.verbose
1183 verbose = repo.ui.verbose
1161 fctx = mctx.filectx
1184 fctx = mctx.filectx
1162 ui = repo.ui
1185 ui = repo.ui
1163 i = 0
1186 i = 0
1164 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1187 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1165 for f, (flags, backup), msg in actions:
1188 for f, (flags, backup), msg in actions:
1166 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1189 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1167 if verbose:
1190 if verbose:
1168 repo.ui.note(_(b"getting %s\n") % f)
1191 repo.ui.note(_(b"getting %s\n") % f)
1169
1192
1170 if backup:
1193 if backup:
1171 # If a file or directory exists with the same name, back that
1194 # If a file or directory exists with the same name, back that
1172 # up. Otherwise, look to see if there is a file that conflicts
1195 # up. Otherwise, look to see if there is a file that conflicts
1173 # with a directory this file is in, and if so, back that up.
1196 # with a directory this file is in, and if so, back that up.
1174 conflicting = f
1197 conflicting = f
1175 if not repo.wvfs.lexists(f):
1198 if not repo.wvfs.lexists(f):
1176 for p in pathutil.finddirs(f):
1199 for p in pathutil.finddirs(f):
1177 if repo.wvfs.isfileorlink(p):
1200 if repo.wvfs.isfileorlink(p):
1178 conflicting = p
1201 conflicting = p
1179 break
1202 break
1180 if repo.wvfs.lexists(conflicting):
1203 if repo.wvfs.lexists(conflicting):
1181 orig = scmutil.backuppath(ui, repo, conflicting)
1204 orig = scmutil.backuppath(ui, repo, conflicting)
1182 util.rename(repo.wjoin(conflicting), orig)
1205 util.rename(repo.wjoin(conflicting), orig)
1183 wfctx = wctx[f]
1206 wfctx = wctx[f]
1184 wfctx.clearunknown()
1207 wfctx.clearunknown()
1185 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1208 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1186 size = wfctx.write(
1209 size = wfctx.write(
1187 fctx(f).data(),
1210 fctx(f).data(),
1188 flags,
1211 flags,
1189 backgroundclose=True,
1212 backgroundclose=True,
1190 atomictemp=atomictemp,
1213 atomictemp=atomictemp,
1191 )
1214 )
1192 if wantfiledata:
1215 if wantfiledata:
1193 s = wfctx.lstat()
1216 s = wfctx.lstat()
1194 mode = s.st_mode
1217 mode = s.st_mode
1195 mtime = s[stat.ST_MTIME]
1218 mtime = s[stat.ST_MTIME]
1196 filedata[f] = (mode, size, mtime) # for dirstate.normal
1219 filedata[f] = (mode, size, mtime) # for dirstate.normal
1197 if i == 100:
1220 if i == 100:
1198 yield False, (i, f)
1221 yield False, (i, f)
1199 i = 0
1222 i = 0
1200 i += 1
1223 i += 1
1201 if i > 0:
1224 if i > 0:
1202 yield False, (i, f)
1225 yield False, (i, f)
1203 yield True, filedata
1226 yield True, filedata
1204
1227
1205
1228
1206 def _prefetchfiles(repo, ctx, actions):
1229 def _prefetchfiles(repo, ctx, actions):
1207 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1230 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1208 of merge actions. ``ctx`` is the context being merged in."""
1231 of merge actions. ``ctx`` is the context being merged in."""
1209
1232
1210 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1233 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1211 # don't touch the context to be merged in. 'cd' is skipped, because
1234 # don't touch the context to be merged in. 'cd' is skipped, because
1212 # changed/deleted never resolves to something from the remote side.
1235 # changed/deleted never resolves to something from the remote side.
1213 oplist = [
1236 oplist = [
1214 actions[a]
1237 actions[a]
1215 for a in (
1238 for a in (
1216 mergestatemod.ACTION_GET,
1239 mergestatemod.ACTION_GET,
1217 mergestatemod.ACTION_DELETED_CHANGED,
1240 mergestatemod.ACTION_DELETED_CHANGED,
1218 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1241 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1219 mergestatemod.ACTION_MERGE,
1242 mergestatemod.ACTION_MERGE,
1220 )
1243 )
1221 ]
1244 ]
1222 prefetch = scmutil.prefetchfiles
1245 prefetch = scmutil.prefetchfiles
1223 matchfiles = scmutil.matchfiles
1246 matchfiles = scmutil.matchfiles
1224 prefetch(
1247 prefetch(
1225 repo,
1248 repo,
1226 [
1249 [
1227 (
1250 (
1228 ctx.rev(),
1251 ctx.rev(),
1229 matchfiles(
1252 matchfiles(
1230 repo, [f for sublist in oplist for f, args, msg in sublist]
1253 repo, [f for sublist in oplist for f, args, msg in sublist]
1231 ),
1254 ),
1232 )
1255 )
1233 ],
1256 ],
1234 )
1257 )
1235
1258
1236
1259
1237 @attr.s(frozen=True)
1260 @attr.s(frozen=True)
1238 class updateresult(object):
1261 class updateresult(object):
1239 updatedcount = attr.ib()
1262 updatedcount = attr.ib()
1240 mergedcount = attr.ib()
1263 mergedcount = attr.ib()
1241 removedcount = attr.ib()
1264 removedcount = attr.ib()
1242 unresolvedcount = attr.ib()
1265 unresolvedcount = attr.ib()
1243
1266
1244 def isempty(self):
1267 def isempty(self):
1245 return not (
1268 return not (
1246 self.updatedcount
1269 self.updatedcount
1247 or self.mergedcount
1270 or self.mergedcount
1248 or self.removedcount
1271 or self.removedcount
1249 or self.unresolvedcount
1272 or self.unresolvedcount
1250 )
1273 )
1251
1274
1252
1275
1253 def emptyactions():
1276 def emptyactions():
1254 """create an actions dict, to be populated and passed to applyupdates()"""
1277 """create an actions dict, to be populated and passed to applyupdates()"""
1255 return {
1278 return {
1256 m: []
1279 m: []
1257 for m in (
1280 for m in (
1258 mergestatemod.ACTION_ADD,
1281 mergestatemod.ACTION_ADD,
1259 mergestatemod.ACTION_ADD_MODIFIED,
1282 mergestatemod.ACTION_ADD_MODIFIED,
1260 mergestatemod.ACTION_FORGET,
1283 mergestatemod.ACTION_FORGET,
1261 mergestatemod.ACTION_GET,
1284 mergestatemod.ACTION_GET,
1262 mergestatemod.ACTION_CHANGED_DELETED,
1285 mergestatemod.ACTION_CHANGED_DELETED,
1263 mergestatemod.ACTION_DELETED_CHANGED,
1286 mergestatemod.ACTION_DELETED_CHANGED,
1264 mergestatemod.ACTION_REMOVE,
1287 mergestatemod.ACTION_REMOVE,
1265 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1288 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1266 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1289 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1267 mergestatemod.ACTION_MERGE,
1290 mergestatemod.ACTION_MERGE,
1268 mergestatemod.ACTION_EXEC,
1291 mergestatemod.ACTION_EXEC,
1269 mergestatemod.ACTION_KEEP,
1292 mergestatemod.ACTION_KEEP,
1270 mergestatemod.ACTION_PATH_CONFLICT,
1293 mergestatemod.ACTION_PATH_CONFLICT,
1271 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1294 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1272 )
1295 )
1273 }
1296 }
1274
1297
1275
1298
1276 def applyupdates(
1299 def applyupdates(
1277 repo,
1300 repo,
1278 actions,
1301 actions,
1279 wctx,
1302 wctx,
1280 mctx,
1303 mctx,
1281 overwrite,
1304 overwrite,
1282 wantfiledata,
1305 wantfiledata,
1283 labels=None,
1306 labels=None,
1284 commitinfo=None,
1307 commitinfo=None,
1285 ):
1308 ):
1286 """apply the merge action list to the working directory
1309 """apply the merge action list to the working directory
1287
1310
1288 wctx is the working copy context
1311 wctx is the working copy context
1289 mctx is the context to be merged into the working copy
1312 mctx is the context to be merged into the working copy
1290 commitinfo is a mapping of information which needs to be stored somewhere
1313 commitinfo is a mapping of information which needs to be stored somewhere
1291 (probably mergestate) so that it can be used at commit time.
1314 (probably mergestate) so that it can be used at commit time.
1292
1315
1293 Return a tuple of (counts, filedata), where counts is a tuple
1316 Return a tuple of (counts, filedata), where counts is a tuple
1294 (updated, merged, removed, unresolved) that describes how many
1317 (updated, merged, removed, unresolved) that describes how many
1295 files were affected by the update, and filedata is as described in
1318 files were affected by the update, and filedata is as described in
1296 batchget.
1319 batchget.
1297 """
1320 """
1298
1321
1299 _prefetchfiles(repo, mctx, actions)
1322 _prefetchfiles(repo, mctx, actions)
1300
1323
1301 updated, merged, removed = 0, 0, 0
1324 updated, merged, removed = 0, 0, 0
1302 ms = mergestatemod.mergestate.clean(
1325 ms = mergestatemod.mergestate.clean(
1303 repo, wctx.p1().node(), mctx.node(), labels
1326 repo, wctx.p1().node(), mctx.node(), labels
1304 )
1327 )
1305
1328
1306 if commitinfo is None:
1329 if commitinfo is None:
1307 commitinfo = {}
1330 commitinfo = {}
1308
1331
1309 for f, op in pycompat.iteritems(commitinfo):
1332 for f, op in pycompat.iteritems(commitinfo):
1310 # the other side of filenode was choosen while merging, store this in
1333 # the other side of filenode was choosen while merging, store this in
1311 # mergestate so that it can be reused on commit
1334 # mergestate so that it can be reused on commit
1312 if op == b'other':
1335 if op == b'other':
1313 ms.addmergedother(f)
1336 ms.addmergedother(f)
1314
1337
1315 moves = []
1338 moves = []
1316 for m, l in actions.items():
1339 for m, l in actions.items():
1317 l.sort()
1340 l.sort()
1318
1341
1319 # 'cd' and 'dc' actions are treated like other merge conflicts
1342 # 'cd' and 'dc' actions are treated like other merge conflicts
1320 mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
1343 mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
1321 mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
1344 mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
1322 mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
1345 mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
1323 for f, args, msg in mergeactions:
1346 for f, args, msg in mergeactions:
1324 f1, f2, fa, move, anc = args
1347 f1, f2, fa, move, anc = args
1325 if f == b'.hgsubstate': # merged internally
1348 if f == b'.hgsubstate': # merged internally
1326 continue
1349 continue
1327 if f1 is None:
1350 if f1 is None:
1328 fcl = filemerge.absentfilectx(wctx, fa)
1351 fcl = filemerge.absentfilectx(wctx, fa)
1329 else:
1352 else:
1330 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1353 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1331 fcl = wctx[f1]
1354 fcl = wctx[f1]
1332 if f2 is None:
1355 if f2 is None:
1333 fco = filemerge.absentfilectx(mctx, fa)
1356 fco = filemerge.absentfilectx(mctx, fa)
1334 else:
1357 else:
1335 fco = mctx[f2]
1358 fco = mctx[f2]
1336 actx = repo[anc]
1359 actx = repo[anc]
1337 if fa in actx:
1360 if fa in actx:
1338 fca = actx[fa]
1361 fca = actx[fa]
1339 else:
1362 else:
1340 # TODO: move to absentfilectx
1363 # TODO: move to absentfilectx
1341 fca = repo.filectx(f1, fileid=nullrev)
1364 fca = repo.filectx(f1, fileid=nullrev)
1342 ms.add(fcl, fco, fca, f)
1365 ms.add(fcl, fco, fca, f)
1343 if f1 != f and move:
1366 if f1 != f and move:
1344 moves.append(f1)
1367 moves.append(f1)
1345
1368
1346 # remove renamed files after safely stored
1369 # remove renamed files after safely stored
1347 for f in moves:
1370 for f in moves:
1348 if wctx[f].lexists():
1371 if wctx[f].lexists():
1349 repo.ui.debug(b"removing %s\n" % f)
1372 repo.ui.debug(b"removing %s\n" % f)
1350 wctx[f].audit()
1373 wctx[f].audit()
1351 wctx[f].remove()
1374 wctx[f].remove()
1352
1375
1353 numupdates = sum(
1376 numupdates = sum(
1354 len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
1377 len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
1355 )
1378 )
1356 progress = repo.ui.makeprogress(
1379 progress = repo.ui.makeprogress(
1357 _(b'updating'), unit=_(b'files'), total=numupdates
1380 _(b'updating'), unit=_(b'files'), total=numupdates
1358 )
1381 )
1359
1382
1360 if [
1383 if [
1361 a
1384 a
1362 for a in actions[mergestatemod.ACTION_REMOVE]
1385 for a in actions[mergestatemod.ACTION_REMOVE]
1363 if a[0] == b'.hgsubstate'
1386 if a[0] == b'.hgsubstate'
1364 ]:
1387 ]:
1365 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1388 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1366
1389
1367 # record path conflicts
1390 # record path conflicts
1368 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
1391 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
1369 f1, fo = args
1392 f1, fo = args
1370 s = repo.ui.status
1393 s = repo.ui.status
1371 s(
1394 s(
1372 _(
1395 _(
1373 b"%s: path conflict - a file or link has the same name as a "
1396 b"%s: path conflict - a file or link has the same name as a "
1374 b"directory\n"
1397 b"directory\n"
1375 )
1398 )
1376 % f
1399 % f
1377 )
1400 )
1378 if fo == b'l':
1401 if fo == b'l':
1379 s(_(b"the local file has been renamed to %s\n") % f1)
1402 s(_(b"the local file has been renamed to %s\n") % f1)
1380 else:
1403 else:
1381 s(_(b"the remote file has been renamed to %s\n") % f1)
1404 s(_(b"the remote file has been renamed to %s\n") % f1)
1382 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1405 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1383 ms.addpathconflict(f, f1, fo)
1406 ms.addpathconflict(f, f1, fo)
1384 progress.increment(item=f)
1407 progress.increment(item=f)
1385
1408
1386 # When merging in-memory, we can't support worker processes, so set the
1409 # When merging in-memory, we can't support worker processes, so set the
1387 # per-item cost at 0 in that case.
1410 # per-item cost at 0 in that case.
1388 cost = 0 if wctx.isinmemory() else 0.001
1411 cost = 0 if wctx.isinmemory() else 0.001
1389
1412
1390 # remove in parallel (must come before resolving path conflicts and getting)
1413 # remove in parallel (must come before resolving path conflicts and getting)
1391 prog = worker.worker(
1414 prog = worker.worker(
1392 repo.ui,
1415 repo.ui,
1393 cost,
1416 cost,
1394 batchremove,
1417 batchremove,
1395 (repo, wctx),
1418 (repo, wctx),
1396 actions[mergestatemod.ACTION_REMOVE],
1419 actions[mergestatemod.ACTION_REMOVE],
1397 )
1420 )
1398 for i, item in prog:
1421 for i, item in prog:
1399 progress.increment(step=i, item=item)
1422 progress.increment(step=i, item=item)
1400 removed = len(actions[mergestatemod.ACTION_REMOVE])
1423 removed = len(actions[mergestatemod.ACTION_REMOVE])
1401
1424
1402 # resolve path conflicts (must come before getting)
1425 # resolve path conflicts (must come before getting)
1403 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
1426 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
1404 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1427 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1405 (f0, origf0) = args
1428 (f0, origf0) = args
1406 if wctx[f0].lexists():
1429 if wctx[f0].lexists():
1407 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1430 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1408 wctx[f].audit()
1431 wctx[f].audit()
1409 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1432 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1410 wctx[f0].remove()
1433 wctx[f0].remove()
1411 progress.increment(item=f)
1434 progress.increment(item=f)
1412
1435
1413 # get in parallel.
1436 # get in parallel.
1414 threadsafe = repo.ui.configbool(
1437 threadsafe = repo.ui.configbool(
1415 b'experimental', b'worker.wdir-get-thread-safe'
1438 b'experimental', b'worker.wdir-get-thread-safe'
1416 )
1439 )
1417 prog = worker.worker(
1440 prog = worker.worker(
1418 repo.ui,
1441 repo.ui,
1419 cost,
1442 cost,
1420 batchget,
1443 batchget,
1421 (repo, mctx, wctx, wantfiledata),
1444 (repo, mctx, wctx, wantfiledata),
1422 actions[mergestatemod.ACTION_GET],
1445 actions[mergestatemod.ACTION_GET],
1423 threadsafe=threadsafe,
1446 threadsafe=threadsafe,
1424 hasretval=True,
1447 hasretval=True,
1425 )
1448 )
1426 getfiledata = {}
1449 getfiledata = {}
1427 for final, res in prog:
1450 for final, res in prog:
1428 if final:
1451 if final:
1429 getfiledata = res
1452 getfiledata = res
1430 else:
1453 else:
1431 i, item = res
1454 i, item = res
1432 progress.increment(step=i, item=item)
1455 progress.increment(step=i, item=item)
1433 updated = len(actions[mergestatemod.ACTION_GET])
1456 updated = len(actions[mergestatemod.ACTION_GET])
1434
1457
1435 if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
1458 if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
1436 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1459 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1437
1460
1438 # forget (manifest only, just log it) (must come first)
1461 # forget (manifest only, just log it) (must come first)
1439 for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
1462 for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
1440 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1463 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1441 progress.increment(item=f)
1464 progress.increment(item=f)
1442
1465
1443 # re-add (manifest only, just log it)
1466 # re-add (manifest only, just log it)
1444 for f, args, msg in actions[mergestatemod.ACTION_ADD]:
1467 for f, args, msg in actions[mergestatemod.ACTION_ADD]:
1445 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1468 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1446 progress.increment(item=f)
1469 progress.increment(item=f)
1447
1470
1448 # re-add/mark as modified (manifest only, just log it)
1471 # re-add/mark as modified (manifest only, just log it)
1449 for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
1472 for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
1450 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1473 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1451 progress.increment(item=f)
1474 progress.increment(item=f)
1452
1475
1453 # keep (noop, just log it)
1476 # keep (noop, just log it)
1454 for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
1477 for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
1455 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1478 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1456 # no progress
1479 # no progress
1457
1480
1458 # directory rename, move local
1481 # directory rename, move local
1459 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
1482 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
1460 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1483 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1461 progress.increment(item=f)
1484 progress.increment(item=f)
1462 f0, flags = args
1485 f0, flags = args
1463 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1486 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1464 wctx[f].audit()
1487 wctx[f].audit()
1465 wctx[f].write(wctx.filectx(f0).data(), flags)
1488 wctx[f].write(wctx.filectx(f0).data(), flags)
1466 wctx[f0].remove()
1489 wctx[f0].remove()
1467 updated += 1
1490 updated += 1
1468
1491
1469 # local directory rename, get
1492 # local directory rename, get
1470 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
1493 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
1471 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1494 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1472 progress.increment(item=f)
1495 progress.increment(item=f)
1473 f0, flags = args
1496 f0, flags = args
1474 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1497 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1475 wctx[f].write(mctx.filectx(f0).data(), flags)
1498 wctx[f].write(mctx.filectx(f0).data(), flags)
1476 updated += 1
1499 updated += 1
1477
1500
1478 # exec
1501 # exec
1479 for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
1502 for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
1480 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1503 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1481 progress.increment(item=f)
1504 progress.increment(item=f)
1482 (flags,) = args
1505 (flags,) = args
1483 wctx[f].audit()
1506 wctx[f].audit()
1484 wctx[f].setflags(b'l' in flags, b'x' in flags)
1507 wctx[f].setflags(b'l' in flags, b'x' in flags)
1485 updated += 1
1508 updated += 1
1486
1509
1487 # the ordering is important here -- ms.mergedriver will raise if the merge
1510 # the ordering is important here -- ms.mergedriver will raise if the merge
1488 # driver has changed, and we want to be able to bypass it when overwrite is
1511 # driver has changed, and we want to be able to bypass it when overwrite is
1489 # True
1512 # True
1490 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1513 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1491
1514
1492 if usemergedriver:
1515 if usemergedriver:
1493 if wctx.isinmemory():
1516 if wctx.isinmemory():
1494 raise error.InMemoryMergeConflictsError(
1517 raise error.InMemoryMergeConflictsError(
1495 b"in-memory merge does not support mergedriver"
1518 b"in-memory merge does not support mergedriver"
1496 )
1519 )
1497 ms.commit()
1520 ms.commit()
1498 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1521 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1499 # the driver might leave some files unresolved
1522 # the driver might leave some files unresolved
1500 unresolvedf = set(ms.unresolved())
1523 unresolvedf = set(ms.unresolved())
1501 if not proceed:
1524 if not proceed:
1502 # XXX setting unresolved to at least 1 is a hack to make sure we
1525 # XXX setting unresolved to at least 1 is a hack to make sure we
1503 # error out
1526 # error out
1504 return updateresult(
1527 return updateresult(
1505 updated, merged, removed, max(len(unresolvedf), 1)
1528 updated, merged, removed, max(len(unresolvedf), 1)
1506 )
1529 )
1507 newactions = []
1530 newactions = []
1508 for f, args, msg in mergeactions:
1531 for f, args, msg in mergeactions:
1509 if f in unresolvedf:
1532 if f in unresolvedf:
1510 newactions.append((f, args, msg))
1533 newactions.append((f, args, msg))
1511 mergeactions = newactions
1534 mergeactions = newactions
1512
1535
1513 try:
1536 try:
1514 # premerge
1537 # premerge
1515 tocomplete = []
1538 tocomplete = []
1516 for f, args, msg in mergeactions:
1539 for f, args, msg in mergeactions:
1517 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1540 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1518 progress.increment(item=f)
1541 progress.increment(item=f)
1519 if f == b'.hgsubstate': # subrepo states need updating
1542 if f == b'.hgsubstate': # subrepo states need updating
1520 subrepoutil.submerge(
1543 subrepoutil.submerge(
1521 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1544 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1522 )
1545 )
1523 continue
1546 continue
1524 wctx[f].audit()
1547 wctx[f].audit()
1525 complete, r = ms.preresolve(f, wctx)
1548 complete, r = ms.preresolve(f, wctx)
1526 if not complete:
1549 if not complete:
1527 numupdates += 1
1550 numupdates += 1
1528 tocomplete.append((f, args, msg))
1551 tocomplete.append((f, args, msg))
1529
1552
1530 # merge
1553 # merge
1531 for f, args, msg in tocomplete:
1554 for f, args, msg in tocomplete:
1532 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1555 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1533 progress.increment(item=f, total=numupdates)
1556 progress.increment(item=f, total=numupdates)
1534 ms.resolve(f, wctx)
1557 ms.resolve(f, wctx)
1535
1558
1536 finally:
1559 finally:
1537 ms.commit()
1560 ms.commit()
1538
1561
1539 unresolved = ms.unresolvedcount()
1562 unresolved = ms.unresolvedcount()
1540
1563
1541 if (
1564 if (
1542 usemergedriver
1565 usemergedriver
1543 and not unresolved
1566 and not unresolved
1544 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1567 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1545 ):
1568 ):
1546 if not driverconclude(repo, ms, wctx, labels=labels):
1569 if not driverconclude(repo, ms, wctx, labels=labels):
1547 # XXX setting unresolved to at least 1 is a hack to make sure we
1570 # XXX setting unresolved to at least 1 is a hack to make sure we
1548 # error out
1571 # error out
1549 unresolved = max(unresolved, 1)
1572 unresolved = max(unresolved, 1)
1550
1573
1551 ms.commit()
1574 ms.commit()
1552
1575
1553 msupdated, msmerged, msremoved = ms.counts()
1576 msupdated, msmerged, msremoved = ms.counts()
1554 updated += msupdated
1577 updated += msupdated
1555 merged += msmerged
1578 merged += msmerged
1556 removed += msremoved
1579 removed += msremoved
1557
1580
1558 extraactions = ms.actions()
1581 extraactions = ms.actions()
1559 if extraactions:
1582 if extraactions:
1560 mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
1583 mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
1561 for k, acts in pycompat.iteritems(extraactions):
1584 for k, acts in pycompat.iteritems(extraactions):
1562 actions[k].extend(acts)
1585 actions[k].extend(acts)
1563 if k == mergestatemod.ACTION_GET and wantfiledata:
1586 if k == mergestatemod.ACTION_GET and wantfiledata:
1564 # no filedata until mergestate is updated to provide it
1587 # no filedata until mergestate is updated to provide it
1565 for a in acts:
1588 for a in acts:
1566 getfiledata[a[0]] = None
1589 getfiledata[a[0]] = None
1567 # Remove these files from actions[ACTION_MERGE] as well. This is
1590 # Remove these files from actions[ACTION_MERGE] as well. This is
1568 # important because in recordupdates, files in actions[ACTION_MERGE]
1591 # important because in recordupdates, files in actions[ACTION_MERGE]
1569 # are processed after files in other actions, and the merge driver
1592 # are processed after files in other actions, and the merge driver
1570 # might add files to those actions via extraactions above. This can
1593 # might add files to those actions via extraactions above. This can
1571 # lead to a file being recorded twice, with poor results. This is
1594 # lead to a file being recorded twice, with poor results. This is
1572 # especially problematic for actions[ACTION_REMOVE] (currently only
1595 # especially problematic for actions[ACTION_REMOVE] (currently only
1573 # possible with the merge driver in the initial merge process;
1596 # possible with the merge driver in the initial merge process;
1574 # interrupted merges don't go through this flow).
1597 # interrupted merges don't go through this flow).
1575 #
1598 #
1576 # The real fix here is to have indexes by both file and action so
1599 # The real fix here is to have indexes by both file and action so
1577 # that when the action for a file is changed it is automatically
1600 # that when the action for a file is changed it is automatically
1578 # reflected in the other action lists. But that involves a more
1601 # reflected in the other action lists. But that involves a more
1579 # complex data structure, so this will do for now.
1602 # complex data structure, so this will do for now.
1580 #
1603 #
1581 # We don't need to do the same operation for 'dc' and 'cd' because
1604 # We don't need to do the same operation for 'dc' and 'cd' because
1582 # those lists aren't consulted again.
1605 # those lists aren't consulted again.
1583 mfiles.difference_update(a[0] for a in acts)
1606 mfiles.difference_update(a[0] for a in acts)
1584
1607
1585 actions[mergestatemod.ACTION_MERGE] = [
1608 actions[mergestatemod.ACTION_MERGE] = [
1586 a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
1609 a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
1587 ]
1610 ]
1588
1611
1589 progress.complete()
1612 progress.complete()
1590 assert len(getfiledata) == (
1613 assert len(getfiledata) == (
1591 len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
1614 len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
1592 )
1615 )
1593 return updateresult(updated, merged, removed, unresolved), getfiledata
1616 return updateresult(updated, merged, removed, unresolved), getfiledata
1594
1617
1595
1618
1596 def _advertisefsmonitor(repo, num_gets, p1node):
1619 def _advertisefsmonitor(repo, num_gets, p1node):
1597 # Advertise fsmonitor when its presence could be useful.
1620 # Advertise fsmonitor when its presence could be useful.
1598 #
1621 #
1599 # We only advertise when performing an update from an empty working
1622 # We only advertise when performing an update from an empty working
1600 # directory. This typically only occurs during initial clone.
1623 # directory. This typically only occurs during initial clone.
1601 #
1624 #
1602 # We give users a mechanism to disable the warning in case it is
1625 # We give users a mechanism to disable the warning in case it is
1603 # annoying.
1626 # annoying.
1604 #
1627 #
1605 # We only allow on Linux and MacOS because that's where fsmonitor is
1628 # We only allow on Linux and MacOS because that's where fsmonitor is
1606 # considered stable.
1629 # considered stable.
1607 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1630 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1608 fsmonitorthreshold = repo.ui.configint(
1631 fsmonitorthreshold = repo.ui.configint(
1609 b'fsmonitor', b'warn_update_file_count'
1632 b'fsmonitor', b'warn_update_file_count'
1610 )
1633 )
1611 try:
1634 try:
1612 # avoid cycle: extensions -> cmdutil -> merge
1635 # avoid cycle: extensions -> cmdutil -> merge
1613 from . import extensions
1636 from . import extensions
1614
1637
1615 extensions.find(b'fsmonitor')
1638 extensions.find(b'fsmonitor')
1616 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1639 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1617 # We intentionally don't look at whether fsmonitor has disabled
1640 # We intentionally don't look at whether fsmonitor has disabled
1618 # itself because a) fsmonitor may have already printed a warning
1641 # itself because a) fsmonitor may have already printed a warning
1619 # b) we only care about the config state here.
1642 # b) we only care about the config state here.
1620 except KeyError:
1643 except KeyError:
1621 fsmonitorenabled = False
1644 fsmonitorenabled = False
1622
1645
1623 if (
1646 if (
1624 fsmonitorwarning
1647 fsmonitorwarning
1625 and not fsmonitorenabled
1648 and not fsmonitorenabled
1626 and p1node == nullid
1649 and p1node == nullid
1627 and num_gets >= fsmonitorthreshold
1650 and num_gets >= fsmonitorthreshold
1628 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1651 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1629 ):
1652 ):
1630 repo.ui.warn(
1653 repo.ui.warn(
1631 _(
1654 _(
1632 b'(warning: large working directory being used without '
1655 b'(warning: large working directory being used without '
1633 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1656 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1634 b'see "hg help -e fsmonitor")\n'
1657 b'see "hg help -e fsmonitor")\n'
1635 )
1658 )
1636 )
1659 )
1637
1660
1638
1661
1639 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1662 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1640 UPDATECHECK_NONE = b'none'
1663 UPDATECHECK_NONE = b'none'
1641 UPDATECHECK_LINEAR = b'linear'
1664 UPDATECHECK_LINEAR = b'linear'
1642 UPDATECHECK_NO_CONFLICT = b'noconflict'
1665 UPDATECHECK_NO_CONFLICT = b'noconflict'
1643
1666
1644
1667
1645 def update(
1668 def update(
1646 repo,
1669 repo,
1647 node,
1670 node,
1648 branchmerge,
1671 branchmerge,
1649 force,
1672 force,
1650 ancestor=None,
1673 ancestor=None,
1651 mergeancestor=False,
1674 mergeancestor=False,
1652 labels=None,
1675 labels=None,
1653 matcher=None,
1676 matcher=None,
1654 mergeforce=False,
1677 mergeforce=False,
1655 updatedirstate=True,
1678 updatedirstate=True,
1656 updatecheck=None,
1679 updatecheck=None,
1657 wc=None,
1680 wc=None,
1658 ):
1681 ):
1659 """
1682 """
1660 Perform a merge between the working directory and the given node
1683 Perform a merge between the working directory and the given node
1661
1684
1662 node = the node to update to
1685 node = the node to update to
1663 branchmerge = whether to merge between branches
1686 branchmerge = whether to merge between branches
1664 force = whether to force branch merging or file overwriting
1687 force = whether to force branch merging or file overwriting
1665 matcher = a matcher to filter file lists (dirstate not updated)
1688 matcher = a matcher to filter file lists (dirstate not updated)
1666 mergeancestor = whether it is merging with an ancestor. If true,
1689 mergeancestor = whether it is merging with an ancestor. If true,
1667 we should accept the incoming changes for any prompts that occur.
1690 we should accept the incoming changes for any prompts that occur.
1668 If false, merging with an ancestor (fast-forward) is only allowed
1691 If false, merging with an ancestor (fast-forward) is only allowed
1669 between different named branches. This flag is used by rebase extension
1692 between different named branches. This flag is used by rebase extension
1670 as a temporary fix and should be avoided in general.
1693 as a temporary fix and should be avoided in general.
1671 labels = labels to use for base, local and other
1694 labels = labels to use for base, local and other
1672 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1695 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1673 this is True, then 'force' should be True as well.
1696 this is True, then 'force' should be True as well.
1674
1697
1675 The table below shows all the behaviors of the update command given the
1698 The table below shows all the behaviors of the update command given the
1676 -c/--check and -C/--clean or no options, whether the working directory is
1699 -c/--check and -C/--clean or no options, whether the working directory is
1677 dirty, whether a revision is specified, and the relationship of the parent
1700 dirty, whether a revision is specified, and the relationship of the parent
1678 rev to the target rev (linear or not). Match from top first. The -n
1701 rev to the target rev (linear or not). Match from top first. The -n
1679 option doesn't exist on the command line, but represents the
1702 option doesn't exist on the command line, but represents the
1680 experimental.updatecheck=noconflict option.
1703 experimental.updatecheck=noconflict option.
1681
1704
1682 This logic is tested by test-update-branches.t.
1705 This logic is tested by test-update-branches.t.
1683
1706
1684 -c -C -n -m dirty rev linear | result
1707 -c -C -n -m dirty rev linear | result
1685 y y * * * * * | (1)
1708 y y * * * * * | (1)
1686 y * y * * * * | (1)
1709 y * y * * * * | (1)
1687 y * * y * * * | (1)
1710 y * * y * * * | (1)
1688 * y y * * * * | (1)
1711 * y y * * * * | (1)
1689 * y * y * * * | (1)
1712 * y * y * * * | (1)
1690 * * y y * * * | (1)
1713 * * y y * * * | (1)
1691 * * * * * n n | x
1714 * * * * * n n | x
1692 * * * * n * * | ok
1715 * * * * n * * | ok
1693 n n n n y * y | merge
1716 n n n n y * y | merge
1694 n n n n y y n | (2)
1717 n n n n y y n | (2)
1695 n n n y y * * | merge
1718 n n n y y * * | merge
1696 n n y n y * * | merge if no conflict
1719 n n y n y * * | merge if no conflict
1697 n y n n y * * | discard
1720 n y n n y * * | discard
1698 y n n n y * * | (3)
1721 y n n n y * * | (3)
1699
1722
1700 x = can't happen
1723 x = can't happen
1701 * = don't-care
1724 * = don't-care
1702 1 = incompatible options (checked in commands.py)
1725 1 = incompatible options (checked in commands.py)
1703 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1726 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1704 3 = abort: uncommitted changes (checked in commands.py)
1727 3 = abort: uncommitted changes (checked in commands.py)
1705
1728
1706 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1729 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1707 to repo[None] if None is passed.
1730 to repo[None] if None is passed.
1708
1731
1709 Return the same tuple as applyupdates().
1732 Return the same tuple as applyupdates().
1710 """
1733 """
1711 # Avoid cycle.
1734 # Avoid cycle.
1712 from . import sparse
1735 from . import sparse
1713
1736
1714 # This function used to find the default destination if node was None, but
1737 # This function used to find the default destination if node was None, but
1715 # that's now in destutil.py.
1738 # that's now in destutil.py.
1716 assert node is not None
1739 assert node is not None
1717 if not branchmerge and not force:
1740 if not branchmerge and not force:
1718 # TODO: remove the default once all callers that pass branchmerge=False
1741 # TODO: remove the default once all callers that pass branchmerge=False
1719 # and force=False pass a value for updatecheck. We may want to allow
1742 # and force=False pass a value for updatecheck. We may want to allow
1720 # updatecheck='abort' to better suppport some of these callers.
1743 # updatecheck='abort' to better suppport some of these callers.
1721 if updatecheck is None:
1744 if updatecheck is None:
1722 updatecheck = UPDATECHECK_LINEAR
1745 updatecheck = UPDATECHECK_LINEAR
1723 if updatecheck not in (
1746 if updatecheck not in (
1724 UPDATECHECK_NONE,
1747 UPDATECHECK_NONE,
1725 UPDATECHECK_LINEAR,
1748 UPDATECHECK_LINEAR,
1726 UPDATECHECK_NO_CONFLICT,
1749 UPDATECHECK_NO_CONFLICT,
1727 ):
1750 ):
1728 raise ValueError(
1751 raise ValueError(
1729 r'Invalid updatecheck %r (can accept %r)'
1752 r'Invalid updatecheck %r (can accept %r)'
1730 % (
1753 % (
1731 updatecheck,
1754 updatecheck,
1732 (
1755 (
1733 UPDATECHECK_NONE,
1756 UPDATECHECK_NONE,
1734 UPDATECHECK_LINEAR,
1757 UPDATECHECK_LINEAR,
1735 UPDATECHECK_NO_CONFLICT,
1758 UPDATECHECK_NO_CONFLICT,
1736 ),
1759 ),
1737 )
1760 )
1738 )
1761 )
1739 if wc is not None and wc.isinmemory():
1762 if wc is not None and wc.isinmemory():
1740 maybe_wlock = util.nullcontextmanager()
1763 maybe_wlock = util.nullcontextmanager()
1741 else:
1764 else:
1742 maybe_wlock = repo.wlock()
1765 maybe_wlock = repo.wlock()
1743 with maybe_wlock:
1766 with maybe_wlock:
1744 if wc is None:
1767 if wc is None:
1745 wc = repo[None]
1768 wc = repo[None]
1746 pl = wc.parents()
1769 pl = wc.parents()
1747 p1 = pl[0]
1770 p1 = pl[0]
1748 p2 = repo[node]
1771 p2 = repo[node]
1749 if ancestor is not None:
1772 if ancestor is not None:
1750 pas = [repo[ancestor]]
1773 pas = [repo[ancestor]]
1751 else:
1774 else:
1752 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1775 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1753 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1776 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1754 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1777 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1755 else:
1778 else:
1756 pas = [p1.ancestor(p2, warn=branchmerge)]
1779 pas = [p1.ancestor(p2, warn=branchmerge)]
1757
1780
1758 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1781 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1759
1782
1760 overwrite = force and not branchmerge
1783 overwrite = force and not branchmerge
1761 ### check phase
1784 ### check phase
1762 if not overwrite:
1785 if not overwrite:
1763 if len(pl) > 1:
1786 if len(pl) > 1:
1764 raise error.Abort(_(b"outstanding uncommitted merge"))
1787 raise error.Abort(_(b"outstanding uncommitted merge"))
1765 ms = mergestatemod.mergestate.read(repo)
1788 ms = mergestatemod.mergestate.read(repo)
1766 if list(ms.unresolved()):
1789 if list(ms.unresolved()):
1767 raise error.Abort(
1790 raise error.Abort(
1768 _(b"outstanding merge conflicts"),
1791 _(b"outstanding merge conflicts"),
1769 hint=_(b"use 'hg resolve' to resolve"),
1792 hint=_(b"use 'hg resolve' to resolve"),
1770 )
1793 )
1771 if branchmerge:
1794 if branchmerge:
1772 if pas == [p2]:
1795 if pas == [p2]:
1773 raise error.Abort(
1796 raise error.Abort(
1774 _(
1797 _(
1775 b"merging with a working directory ancestor"
1798 b"merging with a working directory ancestor"
1776 b" has no effect"
1799 b" has no effect"
1777 )
1800 )
1778 )
1801 )
1779 elif pas == [p1]:
1802 elif pas == [p1]:
1780 if not mergeancestor and wc.branch() == p2.branch():
1803 if not mergeancestor and wc.branch() == p2.branch():
1781 raise error.Abort(
1804 raise error.Abort(
1782 _(b"nothing to merge"),
1805 _(b"nothing to merge"),
1783 hint=_(b"use 'hg update' or check 'hg heads'"),
1806 hint=_(b"use 'hg update' or check 'hg heads'"),
1784 )
1807 )
1785 if not force and (wc.files() or wc.deleted()):
1808 if not force and (wc.files() or wc.deleted()):
1786 raise error.Abort(
1809 raise error.Abort(
1787 _(b"uncommitted changes"),
1810 _(b"uncommitted changes"),
1788 hint=_(b"use 'hg status' to list changes"),
1811 hint=_(b"use 'hg status' to list changes"),
1789 )
1812 )
1790 if not wc.isinmemory():
1813 if not wc.isinmemory():
1791 for s in sorted(wc.substate):
1814 for s in sorted(wc.substate):
1792 wc.sub(s).bailifchanged()
1815 wc.sub(s).bailifchanged()
1793
1816
1794 elif not overwrite:
1817 elif not overwrite:
1795 if p1 == p2: # no-op update
1818 if p1 == p2: # no-op update
1796 # call the hooks and exit early
1819 # call the hooks and exit early
1797 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1820 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1798 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1821 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1799 return updateresult(0, 0, 0, 0)
1822 return updateresult(0, 0, 0, 0)
1800
1823
1801 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1824 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1802 [p1],
1825 [p1],
1803 [p2],
1826 [p2],
1804 ): # nonlinear
1827 ): # nonlinear
1805 dirty = wc.dirty(missing=True)
1828 dirty = wc.dirty(missing=True)
1806 if dirty:
1829 if dirty:
1807 # Branching is a bit strange to ensure we do the minimal
1830 # Branching is a bit strange to ensure we do the minimal
1808 # amount of call to obsutil.foreground.
1831 # amount of call to obsutil.foreground.
1809 foreground = obsutil.foreground(repo, [p1.node()])
1832 foreground = obsutil.foreground(repo, [p1.node()])
1810 # note: the <node> variable contains a random identifier
1833 # note: the <node> variable contains a random identifier
1811 if repo[node].node() in foreground:
1834 if repo[node].node() in foreground:
1812 pass # allow updating to successors
1835 pass # allow updating to successors
1813 else:
1836 else:
1814 msg = _(b"uncommitted changes")
1837 msg = _(b"uncommitted changes")
1815 hint = _(b"commit or update --clean to discard changes")
1838 hint = _(b"commit or update --clean to discard changes")
1816 raise error.UpdateAbort(msg, hint=hint)
1839 raise error.UpdateAbort(msg, hint=hint)
1817 else:
1840 else:
1818 # Allow jumping branches if clean and specific rev given
1841 # Allow jumping branches if clean and specific rev given
1819 pass
1842 pass
1820
1843
1821 if overwrite:
1844 if overwrite:
1822 pas = [wc]
1845 pas = [wc]
1823 elif not branchmerge:
1846 elif not branchmerge:
1824 pas = [p1]
1847 pas = [p1]
1825
1848
1826 # deprecated config: merge.followcopies
1849 # deprecated config: merge.followcopies
1827 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1850 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1828 if overwrite:
1851 if overwrite:
1829 followcopies = False
1852 followcopies = False
1830 elif not pas[0]:
1853 elif not pas[0]:
1831 followcopies = False
1854 followcopies = False
1832 if not branchmerge and not wc.dirty(missing=True):
1855 if not branchmerge and not wc.dirty(missing=True):
1833 followcopies = False
1856 followcopies = False
1834
1857
1835 ### calculate phase
1858 ### calculate phase
1836 mresult = calculateupdates(
1859 mresult = calculateupdates(
1837 repo,
1860 repo,
1838 wc,
1861 wc,
1839 p2,
1862 p2,
1840 pas,
1863 pas,
1841 branchmerge,
1864 branchmerge,
1842 force,
1865 force,
1843 mergeancestor,
1866 mergeancestor,
1844 followcopies,
1867 followcopies,
1845 matcher=matcher,
1868 matcher=matcher,
1846 mergeforce=mergeforce,
1869 mergeforce=mergeforce,
1847 )
1870 )
1848
1871
1849 if updatecheck == UPDATECHECK_NO_CONFLICT:
1872 if updatecheck == UPDATECHECK_NO_CONFLICT:
1850 if mresult.hasconflicts():
1873 if mresult.hasconflicts():
1851 msg = _(b"conflicting changes")
1874 msg = _(b"conflicting changes")
1852 hint = _(b"commit or update --clean to discard changes")
1875 hint = _(b"commit or update --clean to discard changes")
1853 raise error.Abort(msg, hint=hint)
1876 raise error.Abort(msg, hint=hint)
1854
1877
1855 # Prompt and create actions. Most of this is in the resolve phase
1878 # Prompt and create actions. Most of this is in the resolve phase
1856 # already, but we can't handle .hgsubstate in filemerge or
1879 # already, but we can't handle .hgsubstate in filemerge or
1857 # subrepoutil.submerge yet so we have to keep prompting for it.
1880 # subrepoutil.submerge yet so we have to keep prompting for it.
1858 if b'.hgsubstate' in mresult.actions:
1881 if b'.hgsubstate' in mresult.actions:
1859 f = b'.hgsubstate'
1882 f = b'.hgsubstate'
1860 m, args, msg = mresult.actions[f]
1883 m, args, msg = mresult.actions[f]
1861 prompts = filemerge.partextras(labels)
1884 prompts = filemerge.partextras(labels)
1862 prompts[b'f'] = f
1885 prompts[b'f'] = f
1863 if m == mergestatemod.ACTION_CHANGED_DELETED:
1886 if m == mergestatemod.ACTION_CHANGED_DELETED:
1864 if repo.ui.promptchoice(
1887 if repo.ui.promptchoice(
1865 _(
1888 _(
1866 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1889 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1867 b"use (c)hanged version or (d)elete?"
1890 b"use (c)hanged version or (d)elete?"
1868 b"$$ &Changed $$ &Delete"
1891 b"$$ &Changed $$ &Delete"
1869 )
1892 )
1870 % prompts,
1893 % prompts,
1871 0,
1894 0,
1872 ):
1895 ):
1873 mresult.actions[f] = (
1896 mresult.addfile(
1874 mergestatemod.ACTION_REMOVE,
1897 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1875 None,
1876 b'prompt delete',
1877 )
1898 )
1878 elif f in p1:
1899 elif f in p1:
1879 mresult.actions[f] = (
1900 mresult.addfile(
1901 f,
1880 mergestatemod.ACTION_ADD_MODIFIED,
1902 mergestatemod.ACTION_ADD_MODIFIED,
1881 None,
1903 None,
1882 b'prompt keep',
1904 b'prompt keep',
1883 )
1905 )
1884 else:
1906 else:
1885 mresult.actions[f] = (
1907 mresult.addfile(
1886 mergestatemod.ACTION_ADD,
1908 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
1887 None,
1888 b'prompt keep',
1889 )
1909 )
1890 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1910 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1891 f1, f2, fa, move, anc = args
1911 f1, f2, fa, move, anc = args
1892 flags = p2[f2].flags()
1912 flags = p2[f2].flags()
1893 if (
1913 if (
1894 repo.ui.promptchoice(
1914 repo.ui.promptchoice(
1895 _(
1915 _(
1896 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1916 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1897 b"use (c)hanged version or leave (d)eleted?"
1917 b"use (c)hanged version or leave (d)eleted?"
1898 b"$$ &Changed $$ &Deleted"
1918 b"$$ &Changed $$ &Deleted"
1899 )
1919 )
1900 % prompts,
1920 % prompts,
1901 0,
1921 0,
1902 )
1922 )
1903 == 0
1923 == 0
1904 ):
1924 ):
1905 mresult.actions[f] = (
1925 mresult.addfile(
1926 f,
1906 mergestatemod.ACTION_GET,
1927 mergestatemod.ACTION_GET,
1907 (flags, False),
1928 (flags, False),
1908 b'prompt recreating',
1929 b'prompt recreating',
1909 )
1930 )
1910 else:
1931 else:
1911 del mresult.actions[f]
1932 del mresult.actions[f]
1912
1933
1913 # Convert to dictionary-of-lists format
1934 # Convert to dictionary-of-lists format
1914 actions = mresult.actionsdict
1935 actions = mresult.actionsdict
1915
1936
1916 if not util.fscasesensitive(repo.path):
1937 if not util.fscasesensitive(repo.path):
1917 # check collision between files only in p2 for clean update
1938 # check collision between files only in p2 for clean update
1918 if not branchmerge and (
1939 if not branchmerge and (
1919 force or not wc.dirty(missing=True, branch=False)
1940 force or not wc.dirty(missing=True, branch=False)
1920 ):
1941 ):
1921 _checkcollision(repo, p2.manifest(), None)
1942 _checkcollision(repo, p2.manifest(), None)
1922 else:
1943 else:
1923 _checkcollision(repo, wc.manifest(), actions)
1944 _checkcollision(repo, wc.manifest(), actions)
1924
1945
1925 # divergent renames
1946 # divergent renames
1926 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
1947 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
1927 repo.ui.warn(
1948 repo.ui.warn(
1928 _(
1949 _(
1929 b"note: possible conflict - %s was renamed "
1950 b"note: possible conflict - %s was renamed "
1930 b"multiple times to:\n"
1951 b"multiple times to:\n"
1931 )
1952 )
1932 % f
1953 % f
1933 )
1954 )
1934 for nf in sorted(fl):
1955 for nf in sorted(fl):
1935 repo.ui.warn(b" %s\n" % nf)
1956 repo.ui.warn(b" %s\n" % nf)
1936
1957
1937 # rename and delete
1958 # rename and delete
1938 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
1959 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
1939 repo.ui.warn(
1960 repo.ui.warn(
1940 _(
1961 _(
1941 b"note: possible conflict - %s was deleted "
1962 b"note: possible conflict - %s was deleted "
1942 b"and renamed to:\n"
1963 b"and renamed to:\n"
1943 )
1964 )
1944 % f
1965 % f
1945 )
1966 )
1946 for nf in sorted(fl):
1967 for nf in sorted(fl):
1947 repo.ui.warn(b" %s\n" % nf)
1968 repo.ui.warn(b" %s\n" % nf)
1948
1969
1949 ### apply phase
1970 ### apply phase
1950 if not branchmerge: # just jump to the new rev
1971 if not branchmerge: # just jump to the new rev
1951 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
1972 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
1952 # If we're doing a partial update, we need to skip updating
1973 # If we're doing a partial update, we need to skip updating
1953 # the dirstate.
1974 # the dirstate.
1954 always = matcher is None or matcher.always()
1975 always = matcher is None or matcher.always()
1955 updatedirstate = updatedirstate and always and not wc.isinmemory()
1976 updatedirstate = updatedirstate and always and not wc.isinmemory()
1956 if updatedirstate:
1977 if updatedirstate:
1957 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
1978 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
1958 # note that we're in the middle of an update
1979 # note that we're in the middle of an update
1959 repo.vfs.write(b'updatestate', p2.hex())
1980 repo.vfs.write(b'updatestate', p2.hex())
1960
1981
1961 _advertisefsmonitor(
1982 _advertisefsmonitor(
1962 repo, len(actions[mergestatemod.ACTION_GET]), p1.node()
1983 repo, len(actions[mergestatemod.ACTION_GET]), p1.node()
1963 )
1984 )
1964
1985
1965 wantfiledata = updatedirstate and not branchmerge
1986 wantfiledata = updatedirstate and not branchmerge
1966 stats, getfiledata = applyupdates(
1987 stats, getfiledata = applyupdates(
1967 repo,
1988 repo,
1968 actions,
1989 actions,
1969 wc,
1990 wc,
1970 p2,
1991 p2,
1971 overwrite,
1992 overwrite,
1972 wantfiledata,
1993 wantfiledata,
1973 labels=labels,
1994 labels=labels,
1974 commitinfo=mresult.commitinfo,
1995 commitinfo=mresult.commitinfo,
1975 )
1996 )
1976
1997
1977 if updatedirstate:
1998 if updatedirstate:
1978 with repo.dirstate.parentchange():
1999 with repo.dirstate.parentchange():
1979 repo.setparents(fp1, fp2)
2000 repo.setparents(fp1, fp2)
1980 mergestatemod.recordupdates(
2001 mergestatemod.recordupdates(
1981 repo, actions, branchmerge, getfiledata
2002 repo, actions, branchmerge, getfiledata
1982 )
2003 )
1983 # update completed, clear state
2004 # update completed, clear state
1984 util.unlink(repo.vfs.join(b'updatestate'))
2005 util.unlink(repo.vfs.join(b'updatestate'))
1985
2006
1986 if not branchmerge:
2007 if not branchmerge:
1987 repo.dirstate.setbranch(p2.branch())
2008 repo.dirstate.setbranch(p2.branch())
1988
2009
1989 # If we're updating to a location, clean up any stale temporary includes
2010 # If we're updating to a location, clean up any stale temporary includes
1990 # (ex: this happens during hg rebase --abort).
2011 # (ex: this happens during hg rebase --abort).
1991 if not branchmerge:
2012 if not branchmerge:
1992 sparse.prunetemporaryincludes(repo)
2013 sparse.prunetemporaryincludes(repo)
1993
2014
1994 if updatedirstate:
2015 if updatedirstate:
1995 repo.hook(
2016 repo.hook(
1996 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2017 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
1997 )
2018 )
1998 return stats
2019 return stats
1999
2020
2000
2021
2001 def merge(ctx, labels=None, force=False, wc=None):
2022 def merge(ctx, labels=None, force=False, wc=None):
2002 """Merge another topological branch into the working copy.
2023 """Merge another topological branch into the working copy.
2003
2024
2004 force = whether the merge was run with 'merge --force' (deprecated)
2025 force = whether the merge was run with 'merge --force' (deprecated)
2005 """
2026 """
2006
2027
2007 return update(
2028 return update(
2008 ctx.repo(),
2029 ctx.repo(),
2009 ctx.rev(),
2030 ctx.rev(),
2010 labels=labels,
2031 labels=labels,
2011 branchmerge=True,
2032 branchmerge=True,
2012 force=force,
2033 force=force,
2013 mergeforce=force,
2034 mergeforce=force,
2014 wc=wc,
2035 wc=wc,
2015 )
2036 )
2016
2037
2017
2038
2018 def clean_update(ctx, wc=None):
2039 def clean_update(ctx, wc=None):
2019 """Do a clean update to the given commit.
2040 """Do a clean update to the given commit.
2020
2041
2021 This involves updating to the commit and discarding any changes in the
2042 This involves updating to the commit and discarding any changes in the
2022 working copy.
2043 working copy.
2023 """
2044 """
2024 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2045 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2025
2046
2026
2047
2027 def revert_to(ctx, matcher=None, wc=None):
2048 def revert_to(ctx, matcher=None, wc=None):
2028 """Revert the working copy to the given commit.
2049 """Revert the working copy to the given commit.
2029
2050
2030 The working copy will keep its current parent(s) but its content will
2051 The working copy will keep its current parent(s) but its content will
2031 be the same as in the given commit.
2052 be the same as in the given commit.
2032 """
2053 """
2033
2054
2034 return update(
2055 return update(
2035 ctx.repo(),
2056 ctx.repo(),
2036 ctx.rev(),
2057 ctx.rev(),
2037 branchmerge=False,
2058 branchmerge=False,
2038 force=True,
2059 force=True,
2039 updatedirstate=False,
2060 updatedirstate=False,
2040 matcher=matcher,
2061 matcher=matcher,
2041 wc=wc,
2062 wc=wc,
2042 )
2063 )
2043
2064
2044
2065
2045 def graft(
2066 def graft(
2046 repo,
2067 repo,
2047 ctx,
2068 ctx,
2048 base=None,
2069 base=None,
2049 labels=None,
2070 labels=None,
2050 keepparent=False,
2071 keepparent=False,
2051 keepconflictparent=False,
2072 keepconflictparent=False,
2052 wctx=None,
2073 wctx=None,
2053 ):
2074 ):
2054 """Do a graft-like merge.
2075 """Do a graft-like merge.
2055
2076
2056 This is a merge where the merge ancestor is chosen such that one
2077 This is a merge where the merge ancestor is chosen such that one
2057 or more changesets are grafted onto the current changeset. In
2078 or more changesets are grafted onto the current changeset. In
2058 addition to the merge, this fixes up the dirstate to include only
2079 addition to the merge, this fixes up the dirstate to include only
2059 a single parent (if keepparent is False) and tries to duplicate any
2080 a single parent (if keepparent is False) and tries to duplicate any
2060 renames/copies appropriately.
2081 renames/copies appropriately.
2061
2082
2062 ctx - changeset to rebase
2083 ctx - changeset to rebase
2063 base - merge base, or ctx.p1() if not specified
2084 base - merge base, or ctx.p1() if not specified
2064 labels - merge labels eg ['local', 'graft']
2085 labels - merge labels eg ['local', 'graft']
2065 keepparent - keep second parent if any
2086 keepparent - keep second parent if any
2066 keepconflictparent - if unresolved, keep parent used for the merge
2087 keepconflictparent - if unresolved, keep parent used for the merge
2067
2088
2068 """
2089 """
2069 # If we're grafting a descendant onto an ancestor, be sure to pass
2090 # If we're grafting a descendant onto an ancestor, be sure to pass
2070 # mergeancestor=True to update. This does two things: 1) allows the merge if
2091 # mergeancestor=True to update. This does two things: 1) allows the merge if
2071 # the destination is the same as the parent of the ctx (so we can use graft
2092 # the destination is the same as the parent of the ctx (so we can use graft
2072 # to copy commits), and 2) informs update that the incoming changes are
2093 # to copy commits), and 2) informs update that the incoming changes are
2073 # newer than the destination so it doesn't prompt about "remote changed foo
2094 # newer than the destination so it doesn't prompt about "remote changed foo
2074 # which local deleted".
2095 # which local deleted".
2075 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2096 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2076 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2097 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2077 wctx = wctx or repo[None]
2098 wctx = wctx or repo[None]
2078 pctx = wctx.p1()
2099 pctx = wctx.p1()
2079 base = base or ctx.p1()
2100 base = base or ctx.p1()
2080 mergeancestor = (
2101 mergeancestor = (
2081 repo.changelog.isancestor(pctx.node(), ctx.node())
2102 repo.changelog.isancestor(pctx.node(), ctx.node())
2082 or pctx.rev() == base.rev()
2103 or pctx.rev() == base.rev()
2083 )
2104 )
2084
2105
2085 stats = update(
2106 stats = update(
2086 repo,
2107 repo,
2087 ctx.node(),
2108 ctx.node(),
2088 True,
2109 True,
2089 True,
2110 True,
2090 base.node(),
2111 base.node(),
2091 mergeancestor=mergeancestor,
2112 mergeancestor=mergeancestor,
2092 labels=labels,
2113 labels=labels,
2093 wc=wctx,
2114 wc=wctx,
2094 )
2115 )
2095
2116
2096 if keepconflictparent and stats.unresolvedcount:
2117 if keepconflictparent and stats.unresolvedcount:
2097 pother = ctx.node()
2118 pother = ctx.node()
2098 else:
2119 else:
2099 pother = nullid
2120 pother = nullid
2100 parents = ctx.parents()
2121 parents = ctx.parents()
2101 if keepparent and len(parents) == 2 and base in parents:
2122 if keepparent and len(parents) == 2 and base in parents:
2102 parents.remove(base)
2123 parents.remove(base)
2103 pother = parents[0].node()
2124 pother = parents[0].node()
2104 # Never set both parents equal to each other
2125 # Never set both parents equal to each other
2105 if pother == pctx.node():
2126 if pother == pctx.node():
2106 pother = nullid
2127 pother = nullid
2107
2128
2108 if wctx.isinmemory():
2129 if wctx.isinmemory():
2109 wctx.setparents(pctx.node(), pother)
2130 wctx.setparents(pctx.node(), pother)
2110 # fix up dirstate for copies and renames
2131 # fix up dirstate for copies and renames
2111 copies.graftcopies(wctx, ctx, base)
2132 copies.graftcopies(wctx, ctx, base)
2112 else:
2133 else:
2113 with repo.dirstate.parentchange():
2134 with repo.dirstate.parentchange():
2114 repo.setparents(pctx.node(), pother)
2135 repo.setparents(pctx.node(), pother)
2115 repo.dirstate.write(repo.currenttransaction())
2136 repo.dirstate.write(repo.currenttransaction())
2116 # fix up dirstate for copies and renames
2137 # fix up dirstate for copies and renames
2117 copies.graftcopies(wctx, ctx, base)
2138 copies.graftcopies(wctx, ctx, base)
2118 return stats
2139 return stats
2119
2140
2120
2141
2121 def purge(
2142 def purge(
2122 repo,
2143 repo,
2123 matcher,
2144 matcher,
2124 unknown=True,
2145 unknown=True,
2125 ignored=False,
2146 ignored=False,
2126 removeemptydirs=True,
2147 removeemptydirs=True,
2127 removefiles=True,
2148 removefiles=True,
2128 abortonerror=False,
2149 abortonerror=False,
2129 noop=False,
2150 noop=False,
2130 ):
2151 ):
2131 """Purge the working directory of untracked files.
2152 """Purge the working directory of untracked files.
2132
2153
2133 ``matcher`` is a matcher configured to scan the working directory -
2154 ``matcher`` is a matcher configured to scan the working directory -
2134 potentially a subset.
2155 potentially a subset.
2135
2156
2136 ``unknown`` controls whether unknown files should be purged.
2157 ``unknown`` controls whether unknown files should be purged.
2137
2158
2138 ``ignored`` controls whether ignored files should be purged.
2159 ``ignored`` controls whether ignored files should be purged.
2139
2160
2140 ``removeemptydirs`` controls whether empty directories should be removed.
2161 ``removeemptydirs`` controls whether empty directories should be removed.
2141
2162
2142 ``removefiles`` controls whether files are removed.
2163 ``removefiles`` controls whether files are removed.
2143
2164
2144 ``abortonerror`` causes an exception to be raised if an error occurs
2165 ``abortonerror`` causes an exception to be raised if an error occurs
2145 deleting a file or directory.
2166 deleting a file or directory.
2146
2167
2147 ``noop`` controls whether to actually remove files. If not defined, actions
2168 ``noop`` controls whether to actually remove files. If not defined, actions
2148 will be taken.
2169 will be taken.
2149
2170
2150 Returns an iterable of relative paths in the working directory that were
2171 Returns an iterable of relative paths in the working directory that were
2151 or would be removed.
2172 or would be removed.
2152 """
2173 """
2153
2174
2154 def remove(removefn, path):
2175 def remove(removefn, path):
2155 try:
2176 try:
2156 removefn(path)
2177 removefn(path)
2157 except OSError:
2178 except OSError:
2158 m = _(b'%s cannot be removed') % path
2179 m = _(b'%s cannot be removed') % path
2159 if abortonerror:
2180 if abortonerror:
2160 raise error.Abort(m)
2181 raise error.Abort(m)
2161 else:
2182 else:
2162 repo.ui.warn(_(b'warning: %s\n') % m)
2183 repo.ui.warn(_(b'warning: %s\n') % m)
2163
2184
2164 # There's no API to copy a matcher. So mutate the passed matcher and
2185 # There's no API to copy a matcher. So mutate the passed matcher and
2165 # restore it when we're done.
2186 # restore it when we're done.
2166 oldtraversedir = matcher.traversedir
2187 oldtraversedir = matcher.traversedir
2167
2188
2168 res = []
2189 res = []
2169
2190
2170 try:
2191 try:
2171 if removeemptydirs:
2192 if removeemptydirs:
2172 directories = []
2193 directories = []
2173 matcher.traversedir = directories.append
2194 matcher.traversedir = directories.append
2174
2195
2175 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2196 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2176
2197
2177 if removefiles:
2198 if removefiles:
2178 for f in sorted(status.unknown + status.ignored):
2199 for f in sorted(status.unknown + status.ignored):
2179 if not noop:
2200 if not noop:
2180 repo.ui.note(_(b'removing file %s\n') % f)
2201 repo.ui.note(_(b'removing file %s\n') % f)
2181 remove(repo.wvfs.unlink, f)
2202 remove(repo.wvfs.unlink, f)
2182 res.append(f)
2203 res.append(f)
2183
2204
2184 if removeemptydirs:
2205 if removeemptydirs:
2185 for f in sorted(directories, reverse=True):
2206 for f in sorted(directories, reverse=True):
2186 if matcher(f) and not repo.wvfs.listdir(f):
2207 if matcher(f) and not repo.wvfs.listdir(f):
2187 if not noop:
2208 if not noop:
2188 repo.ui.note(_(b'removing directory %s\n') % f)
2209 repo.ui.note(_(b'removing directory %s\n') % f)
2189 remove(repo.wvfs.rmdir, f)
2210 remove(repo.wvfs.rmdir, f)
2190 res.append(f)
2211 res.append(f)
2191
2212
2192 return res
2213 return res
2193
2214
2194 finally:
2215 finally:
2195 matcher.traversedir = oldtraversedir
2216 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now