##// END OF EJS Templates
merge: remove emptyactions() and use collections.defaultdict(list) instead...
Pulkit Goyal -
r45908:490607ef default
parent child Browse files
Show More
@@ -1,1836 +1,1828 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 exthelper,
28 exthelper,
29 filemerge,
29 filemerge,
30 hg,
30 hg,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge,
33 merge,
34 mergestate as mergestatemod,
34 mergestate as mergestatemod,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 subrepo,
39 subrepo,
40 upgrade,
40 upgrade,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from . import (
45 from . import (
46 lfcommands,
46 lfcommands,
47 lfutil,
47 lfutil,
48 storefactory,
48 storefactory,
49 )
49 )
50
50
51 eh = exthelper.exthelper()
51 eh = exthelper.exthelper()
52
52
53 lfstatus = lfutil.lfstatus
53 lfstatus = lfutil.lfstatus
54
54
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
56
56
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
58
58
59
59
60 def composelargefilematcher(match, manifest):
60 def composelargefilematcher(match, manifest):
61 '''create a matcher that matches only the largefiles in the original
61 '''create a matcher that matches only the largefiles in the original
62 matcher'''
62 matcher'''
63 m = copy.copy(match)
63 m = copy.copy(match)
64 lfile = lambda f: lfutil.standin(f) in manifest
64 lfile = lambda f: lfutil.standin(f) in manifest
65 m._files = [lf for lf in m._files if lfile(lf)]
65 m._files = [lf for lf in m._files if lfile(lf)]
66 m._fileset = set(m._files)
66 m._fileset = set(m._files)
67 m.always = lambda: False
67 m.always = lambda: False
68 origmatchfn = m.matchfn
68 origmatchfn = m.matchfn
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
70 return m
70 return m
71
71
72
72
73 def composenormalfilematcher(match, manifest, exclude=None):
73 def composenormalfilematcher(match, manifest, exclude=None):
74 excluded = set()
74 excluded = set()
75 if exclude is not None:
75 if exclude is not None:
76 excluded.update(exclude)
76 excluded.update(exclude)
77
77
78 m = copy.copy(match)
78 m = copy.copy(match)
79 notlfile = lambda f: not (
79 notlfile = lambda f: not (
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
81 )
81 )
82 m._files = [lf for lf in m._files if notlfile(lf)]
82 m._files = [lf for lf in m._files if notlfile(lf)]
83 m._fileset = set(m._files)
83 m._fileset = set(m._files)
84 m.always = lambda: False
84 m.always = lambda: False
85 origmatchfn = m.matchfn
85 origmatchfn = m.matchfn
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
87 return m
87 return m
88
88
89
89
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
91 large = opts.get('large')
91 large = opts.get('large')
92 lfsize = lfutil.getminsize(
92 lfsize = lfutil.getminsize(
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
94 )
94 )
95
95
96 lfmatcher = None
96 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
97 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
99 if lfpats:
99 if lfpats:
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
101
101
102 lfnames = []
102 lfnames = []
103 m = matcher
103 m = matcher
104
104
105 wctx = repo[None]
105 wctx = repo[None]
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
107 exact = m.exact(f)
107 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
108 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
109 nfile = f in wctx
110 exists = lfile or nfile
110 exists = lfile or nfile
111
111
112 # Don't warn the user when they attempt to add a normal tracked file.
112 # Don't warn the user when they attempt to add a normal tracked file.
113 # The normal add code will do that for us.
113 # The normal add code will do that for us.
114 if exact and exists:
114 if exact and exists:
115 if lfile:
115 if lfile:
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
117 continue
117 continue
118
118
119 if (exact or not exists) and not lfutil.isstandin(f):
119 if (exact or not exists) and not lfutil.isstandin(f):
120 # In case the file was removed previously, but not committed
120 # In case the file was removed previously, but not committed
121 # (issue3507)
121 # (issue3507)
122 if not repo.wvfs.exists(f):
122 if not repo.wvfs.exists(f):
123 continue
123 continue
124
124
125 abovemin = (
125 abovemin = (
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
127 )
127 )
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
129 lfnames.append(f)
129 lfnames.append(f)
130 if ui.verbose or not exact:
130 if ui.verbose or not exact:
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
132
132
133 bad = []
133 bad = []
134
134
135 # Need to lock, otherwise there could be a race condition between
135 # Need to lock, otherwise there could be a race condition between
136 # when standins are created and added to the repo.
136 # when standins are created and added to the repo.
137 with repo.wlock():
137 with repo.wlock():
138 if not opts.get('dry_run'):
138 if not opts.get('dry_run'):
139 standins = []
139 standins = []
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
141 for f in lfnames:
141 for f in lfnames:
142 standinname = lfutil.standin(f)
142 standinname = lfutil.standin(f)
143 lfutil.writestandin(
143 lfutil.writestandin(
144 repo,
144 repo,
145 standinname,
145 standinname,
146 hash=b'',
146 hash=b'',
147 executable=lfutil.getexecutable(repo.wjoin(f)),
147 executable=lfutil.getexecutable(repo.wjoin(f)),
148 )
148 )
149 standins.append(standinname)
149 standins.append(standinname)
150 if lfdirstate[f] == b'r':
150 if lfdirstate[f] == b'r':
151 lfdirstate.normallookup(f)
151 lfdirstate.normallookup(f)
152 else:
152 else:
153 lfdirstate.add(f)
153 lfdirstate.add(f)
154 lfdirstate.write()
154 lfdirstate.write()
155 bad += [
155 bad += [
156 lfutil.splitstandin(f)
156 lfutil.splitstandin(f)
157 for f in repo[None].add(standins)
157 for f in repo[None].add(standins)
158 if f in m.files()
158 if f in m.files()
159 ]
159 ]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 return added, bad
162 return added, bad
163
163
164
164
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 after = opts.get('after')
166 after = opts.get('after')
167 m = composelargefilematcher(matcher, repo[None].manifest())
167 m = composelargefilematcher(matcher, repo[None].manifest())
168 with lfstatus(repo):
168 with lfstatus(repo):
169 s = repo.status(match=m, clean=not isaddremove)
169 s = repo.status(match=m, clean=not isaddremove)
170 manifest = repo[None].manifest()
170 manifest = repo[None].manifest()
171 modified, added, deleted, clean = [
171 modified, added, deleted, clean = [
172 [f for f in list if lfutil.standin(f) in manifest]
172 [f for f in list if lfutil.standin(f) in manifest]
173 for list in (s.modified, s.added, s.deleted, s.clean)
173 for list in (s.modified, s.added, s.deleted, s.clean)
174 ]
174 ]
175
175
176 def warn(files, msg):
176 def warn(files, msg):
177 for f in files:
177 for f in files:
178 ui.warn(msg % uipathfn(f))
178 ui.warn(msg % uipathfn(f))
179 return int(len(files) > 0)
179 return int(len(files) > 0)
180
180
181 if after:
181 if after:
182 remove = deleted
182 remove = deleted
183 result = warn(
183 result = warn(
184 modified + added + clean, _(b'not removing %s: file still exists\n')
184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 )
185 )
186 else:
186 else:
187 remove = deleted + clean
187 remove = deleted + clean
188 result = warn(
188 result = warn(
189 modified,
189 modified,
190 _(
190 _(
191 b'not removing %s: file is modified (use -f'
191 b'not removing %s: file is modified (use -f'
192 b' to force removal)\n'
192 b' to force removal)\n'
193 ),
193 ),
194 )
194 )
195 result = (
195 result = (
196 warn(
196 warn(
197 added,
197 added,
198 _(
198 _(
199 b'not removing %s: file has been marked for add'
199 b'not removing %s: file has been marked for add'
200 b' (use forget to undo)\n'
200 b' (use forget to undo)\n'
201 ),
201 ),
202 )
202 )
203 or result
203 or result
204 )
204 )
205
205
206 # Need to lock because standin files are deleted then removed from the
206 # Need to lock because standin files are deleted then removed from the
207 # repository and we could race in-between.
207 # repository and we could race in-between.
208 with repo.wlock():
208 with repo.wlock():
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 for f in sorted(remove):
210 for f in sorted(remove):
211 if ui.verbose or not m.exact(f):
211 if ui.verbose or not m.exact(f):
212 ui.status(_(b'removing %s\n') % uipathfn(f))
212 ui.status(_(b'removing %s\n') % uipathfn(f))
213
213
214 if not dryrun:
214 if not dryrun:
215 if not after:
215 if not after:
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217
217
218 if dryrun:
218 if dryrun:
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfutil.synclfdirstate(
230 lfutil.synclfdirstate(
231 repo, lfdirstate, lfutil.splitstandin(f), False
231 repo, lfdirstate, lfutil.splitstandin(f), False
232 )
232 )
233
233
234 lfdirstate.write()
234 lfdirstate.write()
235
235
236 return result
236 return result
237
237
238
238
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
240 # appear at their right place in the manifests.
241 @eh.wrapfunction(webcommands, b'decodepath')
241 @eh.wrapfunction(webcommands, b'decodepath')
242 def decodepath(orig, path):
242 def decodepath(orig, path):
243 return lfutil.splitstandin(path) or path
243 return lfutil.splitstandin(path) or path
244
244
245
245
246 # -- Wrappers: modify existing commands --------------------------------
246 # -- Wrappers: modify existing commands --------------------------------
247
247
248
248
249 @eh.wrapcommand(
249 @eh.wrapcommand(
250 b'add',
250 b'add',
251 opts=[
251 opts=[
252 (b'', b'large', None, _(b'add as largefile')),
252 (b'', b'large', None, _(b'add as largefile')),
253 (b'', b'normal', None, _(b'add as normal file')),
253 (b'', b'normal', None, _(b'add as normal file')),
254 (
254 (
255 b'',
255 b'',
256 b'lfsize',
256 b'lfsize',
257 b'',
257 b'',
258 _(
258 _(
259 b'add all files above this size (in megabytes) '
259 b'add all files above this size (in megabytes) '
260 b'as largefiles (default: 10)'
260 b'as largefiles (default: 10)'
261 ),
261 ),
262 ),
262 ),
263 ],
263 ],
264 )
264 )
265 def overrideadd(orig, ui, repo, *pats, **opts):
265 def overrideadd(orig, ui, repo, *pats, **opts):
266 if opts.get('normal') and opts.get('large'):
266 if opts.get('normal') and opts.get('large'):
267 raise error.Abort(_(b'--normal cannot be used with --large'))
267 raise error.Abort(_(b'--normal cannot be used with --large'))
268 return orig(ui, repo, *pats, **opts)
268 return orig(ui, repo, *pats, **opts)
269
269
270
270
271 @eh.wrapfunction(cmdutil, b'add')
271 @eh.wrapfunction(cmdutil, b'add')
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
273 # The --normal flag short circuits this override
273 # The --normal flag short circuits this override
274 if opts.get('normal'):
274 if opts.get('normal'):
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
276
276
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
278 normalmatcher = composenormalfilematcher(
278 normalmatcher = composenormalfilematcher(
279 matcher, repo[None].manifest(), ladded
279 matcher, repo[None].manifest(), ladded
280 )
280 )
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
282
282
283 bad.extend(f for f in lbad)
283 bad.extend(f for f in lbad)
284 return bad
284 return bad
285
285
286
286
287 @eh.wrapfunction(cmdutil, b'remove')
287 @eh.wrapfunction(cmdutil, b'remove')
288 def cmdutilremove(
288 def cmdutilremove(
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
290 ):
290 ):
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
292 result = orig(
292 result = orig(
293 ui,
293 ui,
294 repo,
294 repo,
295 normalmatcher,
295 normalmatcher,
296 prefix,
296 prefix,
297 uipathfn,
297 uipathfn,
298 after,
298 after,
299 force,
299 force,
300 subrepos,
300 subrepos,
301 dryrun,
301 dryrun,
302 )
302 )
303 return (
303 return (
304 removelargefiles(
304 removelargefiles(
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
306 )
306 )
307 or result
307 or result
308 )
308 )
309
309
310
310
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
312 def overridestatusfn(orig, repo, rev2, **opts):
312 def overridestatusfn(orig, repo, rev2, **opts):
313 with lfstatus(repo._repo):
313 with lfstatus(repo._repo):
314 return orig(repo, rev2, **opts)
314 return orig(repo, rev2, **opts)
315
315
316
316
317 @eh.wrapcommand(b'status')
317 @eh.wrapcommand(b'status')
318 def overridestatus(orig, ui, repo, *pats, **opts):
318 def overridestatus(orig, ui, repo, *pats, **opts):
319 with lfstatus(repo):
319 with lfstatus(repo):
320 return orig(ui, repo, *pats, **opts)
320 return orig(ui, repo, *pats, **opts)
321
321
322
322
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
325 with lfstatus(repo._repo):
325 with lfstatus(repo._repo):
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
327
327
328
328
329 @eh.wrapcommand(b'log')
329 @eh.wrapcommand(b'log')
330 def overridelog(orig, ui, repo, *pats, **opts):
330 def overridelog(orig, ui, repo, *pats, **opts):
331 def overridematchandpats(
331 def overridematchandpats(
332 orig,
332 orig,
333 ctx,
333 ctx,
334 pats=(),
334 pats=(),
335 opts=None,
335 opts=None,
336 globbed=False,
336 globbed=False,
337 default=b'relpath',
337 default=b'relpath',
338 badfn=None,
338 badfn=None,
339 ):
339 ):
340 """Matcher that merges root directory with .hglf, suitable for log.
340 """Matcher that merges root directory with .hglf, suitable for log.
341 It is still possible to match .hglf directly.
341 It is still possible to match .hglf directly.
342 For any listed files run log on the standin too.
342 For any listed files run log on the standin too.
343 matchfn tries both the given filename and with .hglf stripped.
343 matchfn tries both the given filename and with .hglf stripped.
344 """
344 """
345 if opts is None:
345 if opts is None:
346 opts = {}
346 opts = {}
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
348 m, p = copy.copy(matchandpats)
348 m, p = copy.copy(matchandpats)
349
349
350 if m.always():
350 if m.always():
351 # We want to match everything anyway, so there's no benefit trying
351 # We want to match everything anyway, so there's no benefit trying
352 # to add standins.
352 # to add standins.
353 return matchandpats
353 return matchandpats
354
354
355 pats = set(p)
355 pats = set(p)
356
356
357 def fixpats(pat, tostandin=lfutil.standin):
357 def fixpats(pat, tostandin=lfutil.standin):
358 if pat.startswith(b'set:'):
358 if pat.startswith(b'set:'):
359 return pat
359 return pat
360
360
361 kindpat = matchmod._patsplit(pat, None)
361 kindpat = matchmod._patsplit(pat, None)
362
362
363 if kindpat[0] is not None:
363 if kindpat[0] is not None:
364 return kindpat[0] + b':' + tostandin(kindpat[1])
364 return kindpat[0] + b':' + tostandin(kindpat[1])
365 return tostandin(kindpat[1])
365 return tostandin(kindpat[1])
366
366
367 cwd = repo.getcwd()
367 cwd = repo.getcwd()
368 if cwd:
368 if cwd:
369 hglf = lfutil.shortname
369 hglf = lfutil.shortname
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
371
371
372 def tostandin(f):
372 def tostandin(f):
373 # The file may already be a standin, so truncate the back
373 # The file may already be a standin, so truncate the back
374 # prefix and test before mangling it. This avoids turning
374 # prefix and test before mangling it. This avoids turning
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
377 return f
377 return f
378
378
379 # An absolute path is from outside the repo, so truncate the
379 # An absolute path is from outside the repo, so truncate the
380 # path to the root before building the standin. Otherwise cwd
380 # path to the root before building the standin. Otherwise cwd
381 # is somewhere in the repo, relative to root, and needs to be
381 # is somewhere in the repo, relative to root, and needs to be
382 # prepended before building the standin.
382 # prepended before building the standin.
383 if os.path.isabs(cwd):
383 if os.path.isabs(cwd):
384 f = f[len(back) :]
384 f = f[len(back) :]
385 else:
385 else:
386 f = cwd + b'/' + f
386 f = cwd + b'/' + f
387 return back + lfutil.standin(f)
387 return back + lfutil.standin(f)
388
388
389 else:
389 else:
390
390
391 def tostandin(f):
391 def tostandin(f):
392 if lfutil.isstandin(f):
392 if lfutil.isstandin(f):
393 return f
393 return f
394 return lfutil.standin(f)
394 return lfutil.standin(f)
395
395
396 pats.update(fixpats(f, tostandin) for f in p)
396 pats.update(fixpats(f, tostandin) for f in p)
397
397
398 for i in range(0, len(m._files)):
398 for i in range(0, len(m._files)):
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
400 if m._files[i] == b'.':
400 if m._files[i] == b'.':
401 continue
401 continue
402 standin = lfutil.standin(m._files[i])
402 standin = lfutil.standin(m._files[i])
403 # If the "standin" is a directory, append instead of replace to
403 # If the "standin" is a directory, append instead of replace to
404 # support naming a directory on the command line with only
404 # support naming a directory on the command line with only
405 # largefiles. The original directory is kept to support normal
405 # largefiles. The original directory is kept to support normal
406 # files.
406 # files.
407 if standin in ctx:
407 if standin in ctx:
408 m._files[i] = standin
408 m._files[i] = standin
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
410 m._files.append(standin)
410 m._files.append(standin)
411
411
412 m._fileset = set(m._files)
412 m._fileset = set(m._files)
413 m.always = lambda: False
413 m.always = lambda: False
414 origmatchfn = m.matchfn
414 origmatchfn = m.matchfn
415
415
416 def lfmatchfn(f):
416 def lfmatchfn(f):
417 lf = lfutil.splitstandin(f)
417 lf = lfutil.splitstandin(f)
418 if lf is not None and origmatchfn(lf):
418 if lf is not None and origmatchfn(lf):
419 return True
419 return True
420 r = origmatchfn(f)
420 r = origmatchfn(f)
421 return r
421 return r
422
422
423 m.matchfn = lfmatchfn
423 m.matchfn = lfmatchfn
424
424
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
426 return m, pats
426 return m, pats
427
427
428 # For hg log --patch, the match object is used in two different senses:
428 # For hg log --patch, the match object is used in two different senses:
429 # (1) to determine what revisions should be printed out, and
429 # (1) to determine what revisions should be printed out, and
430 # (2) to determine what files to print out diffs for.
430 # (2) to determine what files to print out diffs for.
431 # The magic matchandpats override should be used for case (1) but not for
431 # The magic matchandpats override should be used for case (1) but not for
432 # case (2).
432 # case (2).
433 oldmatchandpats = scmutil.matchandpats
433 oldmatchandpats = scmutil.matchandpats
434
434
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
436 wctx = repo[None]
436 wctx = repo[None]
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
438 return lambda ctx: match
438 return lambda ctx: match
439
439
440 wrappedmatchandpats = extensions.wrappedfunction(
440 wrappedmatchandpats = extensions.wrappedfunction(
441 scmutil, b'matchandpats', overridematchandpats
441 scmutil, b'matchandpats', overridematchandpats
442 )
442 )
443 wrappedmakefilematcher = extensions.wrappedfunction(
443 wrappedmakefilematcher = extensions.wrappedfunction(
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
445 )
445 )
446 with wrappedmatchandpats, wrappedmakefilematcher:
446 with wrappedmatchandpats, wrappedmakefilematcher:
447 return orig(ui, repo, *pats, **opts)
447 return orig(ui, repo, *pats, **opts)
448
448
449
449
450 @eh.wrapcommand(
450 @eh.wrapcommand(
451 b'verify',
451 b'verify',
452 opts=[
452 opts=[
453 (
453 (
454 b'',
454 b'',
455 b'large',
455 b'large',
456 None,
456 None,
457 _(b'verify that all largefiles in current revision exists'),
457 _(b'verify that all largefiles in current revision exists'),
458 ),
458 ),
459 (
459 (
460 b'',
460 b'',
461 b'lfa',
461 b'lfa',
462 None,
462 None,
463 _(b'verify largefiles in all revisions, not just current'),
463 _(b'verify largefiles in all revisions, not just current'),
464 ),
464 ),
465 (
465 (
466 b'',
466 b'',
467 b'lfc',
467 b'lfc',
468 None,
468 None,
469 _(b'verify local largefile contents, not just existence'),
469 _(b'verify local largefile contents, not just existence'),
470 ),
470 ),
471 ],
471 ],
472 )
472 )
473 def overrideverify(orig, ui, repo, *pats, **opts):
473 def overrideverify(orig, ui, repo, *pats, **opts):
474 large = opts.pop('large', False)
474 large = opts.pop('large', False)
475 all = opts.pop('lfa', False)
475 all = opts.pop('lfa', False)
476 contents = opts.pop('lfc', False)
476 contents = opts.pop('lfc', False)
477
477
478 result = orig(ui, repo, *pats, **opts)
478 result = orig(ui, repo, *pats, **opts)
479 if large or all or contents:
479 if large or all or contents:
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
481 return result
481 return result
482
482
483
483
484 @eh.wrapcommand(
484 @eh.wrapcommand(
485 b'debugstate',
485 b'debugstate',
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
487 )
487 )
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
489 large = opts.pop('large', False)
489 large = opts.pop('large', False)
490 if large:
490 if large:
491
491
492 class fakerepo(object):
492 class fakerepo(object):
493 dirstate = lfutil.openlfdirstate(ui, repo)
493 dirstate = lfutil.openlfdirstate(ui, repo)
494
494
495 orig(ui, fakerepo, *pats, **opts)
495 orig(ui, fakerepo, *pats, **opts)
496 else:
496 else:
497 orig(ui, repo, *pats, **opts)
497 orig(ui, repo, *pats, **opts)
498
498
499
499
500 # Register the MERGE_ACTION_LARGEFILE_MARK_REMOVED in emptyactions() return type
501 @eh.wrapfunction(merge, b'emptyactions')
502 def overrideemptyactions(origfn):
503 ret = origfn()
504 ret[MERGE_ACTION_LARGEFILE_MARK_REMOVED] = []
505 return ret
506
507
508 # Before starting the manifest merge, merge.updates will call
500 # Before starting the manifest merge, merge.updates will call
509 # _checkunknownfile to check if there are any files in the merged-in
501 # _checkunknownfile to check if there are any files in the merged-in
510 # changeset that collide with unknown files in the working copy.
502 # changeset that collide with unknown files in the working copy.
511 #
503 #
512 # The largefiles are seen as unknown, so this prevents us from merging
504 # The largefiles are seen as unknown, so this prevents us from merging
513 # in a file 'foo' if we already have a largefile with the same name.
505 # in a file 'foo' if we already have a largefile with the same name.
514 #
506 #
515 # The overridden function filters the unknown files by removing any
507 # The overridden function filters the unknown files by removing any
516 # largefiles. This makes the merge proceed and we can then handle this
508 # largefiles. This makes the merge proceed and we can then handle this
517 # case further in the overridden calculateupdates function below.
509 # case further in the overridden calculateupdates function below.
518 @eh.wrapfunction(merge, b'_checkunknownfile')
510 @eh.wrapfunction(merge, b'_checkunknownfile')
519 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
511 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
520 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
512 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
521 return False
513 return False
522 return origfn(repo, wctx, mctx, f, f2)
514 return origfn(repo, wctx, mctx, f, f2)
523
515
524
516
525 # The manifest merge handles conflicts on the manifest level. We want
517 # The manifest merge handles conflicts on the manifest level. We want
526 # to handle changes in largefile-ness of files at this level too.
518 # to handle changes in largefile-ness of files at this level too.
527 #
519 #
528 # The strategy is to run the original calculateupdates and then process
520 # The strategy is to run the original calculateupdates and then process
529 # the action list it outputs. There are two cases we need to deal with:
521 # the action list it outputs. There are two cases we need to deal with:
530 #
522 #
531 # 1. Normal file in p1, largefile in p2. Here the largefile is
523 # 1. Normal file in p1, largefile in p2. Here the largefile is
532 # detected via its standin file, which will enter the working copy
524 # detected via its standin file, which will enter the working copy
533 # with a "get" action. It is not "merge" since the standin is all
525 # with a "get" action. It is not "merge" since the standin is all
534 # Mercurial is concerned with at this level -- the link to the
526 # Mercurial is concerned with at this level -- the link to the
535 # existing normal file is not relevant here.
527 # existing normal file is not relevant here.
536 #
528 #
537 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
529 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
538 # since the largefile will be present in the working copy and
530 # since the largefile will be present in the working copy and
539 # different from the normal file in p2. Mercurial therefore
531 # different from the normal file in p2. Mercurial therefore
540 # triggers a merge action.
532 # triggers a merge action.
541 #
533 #
542 # In both cases, we prompt the user and emit new actions to either
534 # In both cases, we prompt the user and emit new actions to either
543 # remove the standin (if the normal file was kept) or to remove the
535 # remove the standin (if the normal file was kept) or to remove the
544 # normal file and get the standin (if the largefile was kept). The
536 # normal file and get the standin (if the largefile was kept). The
545 # default prompt answer is to use the largefile version since it was
537 # default prompt answer is to use the largefile version since it was
546 # presumably changed on purpose.
538 # presumably changed on purpose.
547 #
539 #
548 # Finally, the merge.applyupdates function will then take care of
540 # Finally, the merge.applyupdates function will then take care of
549 # writing the files into the working copy and lfcommands.updatelfiles
541 # writing the files into the working copy and lfcommands.updatelfiles
550 # will update the largefiles.
542 # will update the largefiles.
551 @eh.wrapfunction(merge, b'calculateupdates')
543 @eh.wrapfunction(merge, b'calculateupdates')
552 def overridecalculateupdates(
544 def overridecalculateupdates(
553 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
545 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
554 ):
546 ):
555 overwrite = force and not branchmerge
547 overwrite = force and not branchmerge
556 mresult = origfn(
548 mresult = origfn(
557 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
549 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
558 )
550 )
559
551
560 if overwrite:
552 if overwrite:
561 return mresult
553 return mresult
562
554
563 # Convert to dictionary with filename as key and action as value.
555 # Convert to dictionary with filename as key and action as value.
564 lfiles = set()
556 lfiles = set()
565 for f in mresult.files():
557 for f in mresult.files():
566 splitstandin = lfutil.splitstandin(f)
558 splitstandin = lfutil.splitstandin(f)
567 if splitstandin is not None and splitstandin in p1:
559 if splitstandin is not None and splitstandin in p1:
568 lfiles.add(splitstandin)
560 lfiles.add(splitstandin)
569 elif lfutil.standin(f) in p1:
561 elif lfutil.standin(f) in p1:
570 lfiles.add(f)
562 lfiles.add(f)
571
563
572 for lfile in sorted(lfiles):
564 for lfile in sorted(lfiles):
573 standin = lfutil.standin(lfile)
565 standin = lfutil.standin(lfile)
574 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
566 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
575 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
567 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
576 if sm in (b'g', b'dc') and lm != b'r':
568 if sm in (b'g', b'dc') and lm != b'r':
577 if sm == b'dc':
569 if sm == b'dc':
578 f1, f2, fa, move, anc = sargs
570 f1, f2, fa, move, anc = sargs
579 sargs = (p2[f2].flags(), False)
571 sargs = (p2[f2].flags(), False)
580 # Case 1: normal file in the working copy, largefile in
572 # Case 1: normal file in the working copy, largefile in
581 # the second parent
573 # the second parent
582 usermsg = (
574 usermsg = (
583 _(
575 _(
584 b'remote turned local normal file %s into a largefile\n'
576 b'remote turned local normal file %s into a largefile\n'
585 b'use (l)argefile or keep (n)ormal file?'
577 b'use (l)argefile or keep (n)ormal file?'
586 b'$$ &Largefile $$ &Normal file'
578 b'$$ &Largefile $$ &Normal file'
587 )
579 )
588 % lfile
580 % lfile
589 )
581 )
590 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
582 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
591 mresult.addfile(lfile, b'r', None, b'replaced by standin')
583 mresult.addfile(lfile, b'r', None, b'replaced by standin')
592 mresult.addfile(standin, b'g', sargs, b'replaces standin')
584 mresult.addfile(standin, b'g', sargs, b'replaces standin')
593 else: # keep local normal file
585 else: # keep local normal file
594 mresult.addfile(lfile, b'k', None, b'replaces standin')
586 mresult.addfile(lfile, b'k', None, b'replaces standin')
595 if branchmerge:
587 if branchmerge:
596 mresult.addfile(
588 mresult.addfile(
597 standin, b'k', None, b'replaced by non-standin',
589 standin, b'k', None, b'replaced by non-standin',
598 )
590 )
599 else:
591 else:
600 mresult.addfile(
592 mresult.addfile(
601 standin, b'r', None, b'replaced by non-standin',
593 standin, b'r', None, b'replaced by non-standin',
602 )
594 )
603 elif lm in (b'g', b'dc') and sm != b'r':
595 elif lm in (b'g', b'dc') and sm != b'r':
604 if lm == b'dc':
596 if lm == b'dc':
605 f1, f2, fa, move, anc = largs
597 f1, f2, fa, move, anc = largs
606 largs = (p2[f2].flags(), False)
598 largs = (p2[f2].flags(), False)
607 # Case 2: largefile in the working copy, normal file in
599 # Case 2: largefile in the working copy, normal file in
608 # the second parent
600 # the second parent
609 usermsg = (
601 usermsg = (
610 _(
602 _(
611 b'remote turned local largefile %s into a normal file\n'
603 b'remote turned local largefile %s into a normal file\n'
612 b'keep (l)argefile or use (n)ormal file?'
604 b'keep (l)argefile or use (n)ormal file?'
613 b'$$ &Largefile $$ &Normal file'
605 b'$$ &Largefile $$ &Normal file'
614 )
606 )
615 % lfile
607 % lfile
616 )
608 )
617 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
609 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
618 if branchmerge:
610 if branchmerge:
619 # largefile can be restored from standin safely
611 # largefile can be restored from standin safely
620 mresult.addfile(
612 mresult.addfile(
621 lfile, b'k', None, b'replaced by standin',
613 lfile, b'k', None, b'replaced by standin',
622 )
614 )
623 mresult.addfile(standin, b'k', None, b'replaces standin')
615 mresult.addfile(standin, b'k', None, b'replaces standin')
624 else:
616 else:
625 # "lfile" should be marked as "removed" without
617 # "lfile" should be marked as "removed" without
626 # removal of itself
618 # removal of itself
627 mresult.addfile(
619 mresult.addfile(
628 lfile,
620 lfile,
629 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
621 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
630 None,
622 None,
631 b'forget non-standin largefile',
623 b'forget non-standin largefile',
632 )
624 )
633
625
634 # linear-merge should treat this largefile as 're-added'
626 # linear-merge should treat this largefile as 're-added'
635 mresult.addfile(standin, b'a', None, b'keep standin')
627 mresult.addfile(standin, b'a', None, b'keep standin')
636 else: # pick remote normal file
628 else: # pick remote normal file
637 mresult.addfile(lfile, b'g', largs, b'replaces standin')
629 mresult.addfile(lfile, b'g', largs, b'replaces standin')
638 mresult.addfile(
630 mresult.addfile(
639 standin, b'r', None, b'replaced by non-standin',
631 standin, b'r', None, b'replaced by non-standin',
640 )
632 )
641
633
642 return mresult
634 return mresult
643
635
644
636
645 @eh.wrapfunction(mergestatemod, b'recordupdates')
637 @eh.wrapfunction(mergestatemod, b'recordupdates')
646 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
638 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
647 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
639 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
648 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
640 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
649 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
641 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
650 # this should be executed before 'orig', to execute 'remove'
642 # this should be executed before 'orig', to execute 'remove'
651 # before all other actions
643 # before all other actions
652 repo.dirstate.remove(lfile)
644 repo.dirstate.remove(lfile)
653 # make sure lfile doesn't get synclfdirstate'd as normal
645 # make sure lfile doesn't get synclfdirstate'd as normal
654 lfdirstate.add(lfile)
646 lfdirstate.add(lfile)
655 lfdirstate.write()
647 lfdirstate.write()
656
648
657 return orig(repo, actions, branchmerge, getfiledata)
649 return orig(repo, actions, branchmerge, getfiledata)
658
650
659
651
660 # Override filemerge to prompt the user about how they wish to merge
652 # Override filemerge to prompt the user about how they wish to merge
661 # largefiles. This will handle identical edits without prompting the user.
653 # largefiles. This will handle identical edits without prompting the user.
662 @eh.wrapfunction(filemerge, b'_filemerge')
654 @eh.wrapfunction(filemerge, b'_filemerge')
663 def overridefilemerge(
655 def overridefilemerge(
664 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
656 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
665 ):
657 ):
666 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
658 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
667 return origfn(
659 return origfn(
668 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
660 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
669 )
661 )
670
662
671 ahash = lfutil.readasstandin(fca).lower()
663 ahash = lfutil.readasstandin(fca).lower()
672 dhash = lfutil.readasstandin(fcd).lower()
664 dhash = lfutil.readasstandin(fcd).lower()
673 ohash = lfutil.readasstandin(fco).lower()
665 ohash = lfutil.readasstandin(fco).lower()
674 if (
666 if (
675 ohash != ahash
667 ohash != ahash
676 and ohash != dhash
668 and ohash != dhash
677 and (
669 and (
678 dhash == ahash
670 dhash == ahash
679 or repo.ui.promptchoice(
671 or repo.ui.promptchoice(
680 _(
672 _(
681 b'largefile %s has a merge conflict\nancestor was %s\n'
673 b'largefile %s has a merge conflict\nancestor was %s\n'
682 b'you can keep (l)ocal %s or take (o)ther %s.\n'
674 b'you can keep (l)ocal %s or take (o)ther %s.\n'
683 b'what do you want to do?'
675 b'what do you want to do?'
684 b'$$ &Local $$ &Other'
676 b'$$ &Local $$ &Other'
685 )
677 )
686 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
678 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
687 0,
679 0,
688 )
680 )
689 == 1
681 == 1
690 )
682 )
691 ):
683 ):
692 repo.wwrite(fcd.path(), fco.data(), fco.flags())
684 repo.wwrite(fcd.path(), fco.data(), fco.flags())
693 return True, 0, False
685 return True, 0, False
694
686
695
687
696 @eh.wrapfunction(copiesmod, b'pathcopies')
688 @eh.wrapfunction(copiesmod, b'pathcopies')
697 def copiespathcopies(orig, ctx1, ctx2, match=None):
689 def copiespathcopies(orig, ctx1, ctx2, match=None):
698 copies = orig(ctx1, ctx2, match=match)
690 copies = orig(ctx1, ctx2, match=match)
699 updated = {}
691 updated = {}
700
692
701 for k, v in pycompat.iteritems(copies):
693 for k, v in pycompat.iteritems(copies):
702 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
694 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
703
695
704 return updated
696 return updated
705
697
706
698
707 # Copy first changes the matchers to match standins instead of
699 # Copy first changes the matchers to match standins instead of
708 # largefiles. Then it overrides util.copyfile in that function it
700 # largefiles. Then it overrides util.copyfile in that function it
709 # checks if the destination largefile already exists. It also keeps a
701 # checks if the destination largefile already exists. It also keeps a
710 # list of copied files so that the largefiles can be copied and the
702 # list of copied files so that the largefiles can be copied and the
711 # dirstate updated.
703 # dirstate updated.
712 @eh.wrapfunction(cmdutil, b'copy')
704 @eh.wrapfunction(cmdutil, b'copy')
713 def overridecopy(orig, ui, repo, pats, opts, rename=False):
705 def overridecopy(orig, ui, repo, pats, opts, rename=False):
714 # doesn't remove largefile on rename
706 # doesn't remove largefile on rename
715 if len(pats) < 2:
707 if len(pats) < 2:
716 # this isn't legal, let the original function deal with it
708 # this isn't legal, let the original function deal with it
717 return orig(ui, repo, pats, opts, rename)
709 return orig(ui, repo, pats, opts, rename)
718
710
719 # This could copy both lfiles and normal files in one command,
711 # This could copy both lfiles and normal files in one command,
720 # but we don't want to do that. First replace their matcher to
712 # but we don't want to do that. First replace their matcher to
721 # only match normal files and run it, then replace it to just
713 # only match normal files and run it, then replace it to just
722 # match largefiles and run it again.
714 # match largefiles and run it again.
723 nonormalfiles = False
715 nonormalfiles = False
724 nolfiles = False
716 nolfiles = False
725 manifest = repo[None].manifest()
717 manifest = repo[None].manifest()
726
718
727 def normalfilesmatchfn(
719 def normalfilesmatchfn(
728 orig,
720 orig,
729 ctx,
721 ctx,
730 pats=(),
722 pats=(),
731 opts=None,
723 opts=None,
732 globbed=False,
724 globbed=False,
733 default=b'relpath',
725 default=b'relpath',
734 badfn=None,
726 badfn=None,
735 ):
727 ):
736 if opts is None:
728 if opts is None:
737 opts = {}
729 opts = {}
738 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
730 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
739 return composenormalfilematcher(match, manifest)
731 return composenormalfilematcher(match, manifest)
740
732
741 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
733 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
742 try:
734 try:
743 result = orig(ui, repo, pats, opts, rename)
735 result = orig(ui, repo, pats, opts, rename)
744 except error.Abort as e:
736 except error.Abort as e:
745 if pycompat.bytestr(e) != _(b'no files to copy'):
737 if pycompat.bytestr(e) != _(b'no files to copy'):
746 raise e
738 raise e
747 else:
739 else:
748 nonormalfiles = True
740 nonormalfiles = True
749 result = 0
741 result = 0
750
742
751 # The first rename can cause our current working directory to be removed.
743 # The first rename can cause our current working directory to be removed.
752 # In that case there is nothing left to copy/rename so just quit.
744 # In that case there is nothing left to copy/rename so just quit.
753 try:
745 try:
754 repo.getcwd()
746 repo.getcwd()
755 except OSError:
747 except OSError:
756 return result
748 return result
757
749
758 def makestandin(relpath):
750 def makestandin(relpath):
759 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
751 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
760 return repo.wvfs.join(lfutil.standin(path))
752 return repo.wvfs.join(lfutil.standin(path))
761
753
762 fullpats = scmutil.expandpats(pats)
754 fullpats = scmutil.expandpats(pats)
763 dest = fullpats[-1]
755 dest = fullpats[-1]
764
756
765 if os.path.isdir(dest):
757 if os.path.isdir(dest):
766 if not os.path.isdir(makestandin(dest)):
758 if not os.path.isdir(makestandin(dest)):
767 os.makedirs(makestandin(dest))
759 os.makedirs(makestandin(dest))
768
760
769 try:
761 try:
770 # When we call orig below it creates the standins but we don't add
762 # When we call orig below it creates the standins but we don't add
771 # them to the dir state until later so lock during that time.
763 # them to the dir state until later so lock during that time.
772 wlock = repo.wlock()
764 wlock = repo.wlock()
773
765
774 manifest = repo[None].manifest()
766 manifest = repo[None].manifest()
775
767
776 def overridematch(
768 def overridematch(
777 orig,
769 orig,
778 ctx,
770 ctx,
779 pats=(),
771 pats=(),
780 opts=None,
772 opts=None,
781 globbed=False,
773 globbed=False,
782 default=b'relpath',
774 default=b'relpath',
783 badfn=None,
775 badfn=None,
784 ):
776 ):
785 if opts is None:
777 if opts is None:
786 opts = {}
778 opts = {}
787 newpats = []
779 newpats = []
788 # The patterns were previously mangled to add the standin
780 # The patterns were previously mangled to add the standin
789 # directory; we need to remove that now
781 # directory; we need to remove that now
790 for pat in pats:
782 for pat in pats:
791 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
783 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
792 newpats.append(pat.replace(lfutil.shortname, b''))
784 newpats.append(pat.replace(lfutil.shortname, b''))
793 else:
785 else:
794 newpats.append(pat)
786 newpats.append(pat)
795 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
787 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
796 m = copy.copy(match)
788 m = copy.copy(match)
797 lfile = lambda f: lfutil.standin(f) in manifest
789 lfile = lambda f: lfutil.standin(f) in manifest
798 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
790 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
799 m._fileset = set(m._files)
791 m._fileset = set(m._files)
800 origmatchfn = m.matchfn
792 origmatchfn = m.matchfn
801
793
802 def matchfn(f):
794 def matchfn(f):
803 lfile = lfutil.splitstandin(f)
795 lfile = lfutil.splitstandin(f)
804 return (
796 return (
805 lfile is not None
797 lfile is not None
806 and (f in manifest)
798 and (f in manifest)
807 and origmatchfn(lfile)
799 and origmatchfn(lfile)
808 or None
800 or None
809 )
801 )
810
802
811 m.matchfn = matchfn
803 m.matchfn = matchfn
812 return m
804 return m
813
805
814 listpats = []
806 listpats = []
815 for pat in pats:
807 for pat in pats:
816 if matchmod.patkind(pat) is not None:
808 if matchmod.patkind(pat) is not None:
817 listpats.append(pat)
809 listpats.append(pat)
818 else:
810 else:
819 listpats.append(makestandin(pat))
811 listpats.append(makestandin(pat))
820
812
821 copiedfiles = []
813 copiedfiles = []
822
814
823 def overridecopyfile(orig, src, dest, *args, **kwargs):
815 def overridecopyfile(orig, src, dest, *args, **kwargs):
824 if lfutil.shortname in src and dest.startswith(
816 if lfutil.shortname in src and dest.startswith(
825 repo.wjoin(lfutil.shortname)
817 repo.wjoin(lfutil.shortname)
826 ):
818 ):
827 destlfile = dest.replace(lfutil.shortname, b'')
819 destlfile = dest.replace(lfutil.shortname, b'')
828 if not opts[b'force'] and os.path.exists(destlfile):
820 if not opts[b'force'] and os.path.exists(destlfile):
829 raise IOError(
821 raise IOError(
830 b'', _(b'destination largefile already exists')
822 b'', _(b'destination largefile already exists')
831 )
823 )
832 copiedfiles.append((src, dest))
824 copiedfiles.append((src, dest))
833 orig(src, dest, *args, **kwargs)
825 orig(src, dest, *args, **kwargs)
834
826
835 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
827 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
836 with extensions.wrappedfunction(scmutil, b'match', overridematch):
828 with extensions.wrappedfunction(scmutil, b'match', overridematch):
837 result += orig(ui, repo, listpats, opts, rename)
829 result += orig(ui, repo, listpats, opts, rename)
838
830
839 lfdirstate = lfutil.openlfdirstate(ui, repo)
831 lfdirstate = lfutil.openlfdirstate(ui, repo)
840 for (src, dest) in copiedfiles:
832 for (src, dest) in copiedfiles:
841 if lfutil.shortname in src and dest.startswith(
833 if lfutil.shortname in src and dest.startswith(
842 repo.wjoin(lfutil.shortname)
834 repo.wjoin(lfutil.shortname)
843 ):
835 ):
844 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
836 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
845 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
837 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
846 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
838 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
847 if not os.path.isdir(destlfiledir):
839 if not os.path.isdir(destlfiledir):
848 os.makedirs(destlfiledir)
840 os.makedirs(destlfiledir)
849 if rename:
841 if rename:
850 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
842 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
851
843
852 # The file is gone, but this deletes any empty parent
844 # The file is gone, but this deletes any empty parent
853 # directories as a side-effect.
845 # directories as a side-effect.
854 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
846 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
855 lfdirstate.remove(srclfile)
847 lfdirstate.remove(srclfile)
856 else:
848 else:
857 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
849 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
858
850
859 lfdirstate.add(destlfile)
851 lfdirstate.add(destlfile)
860 lfdirstate.write()
852 lfdirstate.write()
861 except error.Abort as e:
853 except error.Abort as e:
862 if pycompat.bytestr(e) != _(b'no files to copy'):
854 if pycompat.bytestr(e) != _(b'no files to copy'):
863 raise e
855 raise e
864 else:
856 else:
865 nolfiles = True
857 nolfiles = True
866 finally:
858 finally:
867 wlock.release()
859 wlock.release()
868
860
869 if nolfiles and nonormalfiles:
861 if nolfiles and nonormalfiles:
870 raise error.Abort(_(b'no files to copy'))
862 raise error.Abort(_(b'no files to copy'))
871
863
872 return result
864 return result
873
865
874
866
875 # When the user calls revert, we have to be careful to not revert any
867 # When the user calls revert, we have to be careful to not revert any
876 # changes to other largefiles accidentally. This means we have to keep
868 # changes to other largefiles accidentally. This means we have to keep
877 # track of the largefiles that are being reverted so we only pull down
869 # track of the largefiles that are being reverted so we only pull down
878 # the necessary largefiles.
870 # the necessary largefiles.
879 #
871 #
880 # Standins are only updated (to match the hash of largefiles) before
872 # Standins are only updated (to match the hash of largefiles) before
881 # commits. Update the standins then run the original revert, changing
873 # commits. Update the standins then run the original revert, changing
882 # the matcher to hit standins instead of largefiles. Based on the
874 # the matcher to hit standins instead of largefiles. Based on the
883 # resulting standins update the largefiles.
875 # resulting standins update the largefiles.
884 @eh.wrapfunction(cmdutil, b'revert')
876 @eh.wrapfunction(cmdutil, b'revert')
885 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
877 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
886 # Because we put the standins in a bad state (by updating them)
878 # Because we put the standins in a bad state (by updating them)
887 # and then return them to a correct state we need to lock to
879 # and then return them to a correct state we need to lock to
888 # prevent others from changing them in their incorrect state.
880 # prevent others from changing them in their incorrect state.
889 with repo.wlock():
881 with repo.wlock():
890 lfdirstate = lfutil.openlfdirstate(ui, repo)
882 lfdirstate = lfutil.openlfdirstate(ui, repo)
891 s = lfutil.lfdirstatestatus(lfdirstate, repo)
883 s = lfutil.lfdirstatestatus(lfdirstate, repo)
892 lfdirstate.write()
884 lfdirstate.write()
893 for lfile in s.modified:
885 for lfile in s.modified:
894 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
886 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
895 for lfile in s.deleted:
887 for lfile in s.deleted:
896 fstandin = lfutil.standin(lfile)
888 fstandin = lfutil.standin(lfile)
897 if repo.wvfs.exists(fstandin):
889 if repo.wvfs.exists(fstandin):
898 repo.wvfs.unlink(fstandin)
890 repo.wvfs.unlink(fstandin)
899
891
900 oldstandins = lfutil.getstandinsstate(repo)
892 oldstandins = lfutil.getstandinsstate(repo)
901
893
902 def overridematch(
894 def overridematch(
903 orig,
895 orig,
904 mctx,
896 mctx,
905 pats=(),
897 pats=(),
906 opts=None,
898 opts=None,
907 globbed=False,
899 globbed=False,
908 default=b'relpath',
900 default=b'relpath',
909 badfn=None,
901 badfn=None,
910 ):
902 ):
911 if opts is None:
903 if opts is None:
912 opts = {}
904 opts = {}
913 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
905 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
914 m = copy.copy(match)
906 m = copy.copy(match)
915
907
916 # revert supports recursing into subrepos, and though largefiles
908 # revert supports recursing into subrepos, and though largefiles
917 # currently doesn't work correctly in that case, this match is
909 # currently doesn't work correctly in that case, this match is
918 # called, so the lfdirstate above may not be the correct one for
910 # called, so the lfdirstate above may not be the correct one for
919 # this invocation of match.
911 # this invocation of match.
920 lfdirstate = lfutil.openlfdirstate(
912 lfdirstate = lfutil.openlfdirstate(
921 mctx.repo().ui, mctx.repo(), False
913 mctx.repo().ui, mctx.repo(), False
922 )
914 )
923
915
924 wctx = repo[None]
916 wctx = repo[None]
925 matchfiles = []
917 matchfiles = []
926 for f in m._files:
918 for f in m._files:
927 standin = lfutil.standin(f)
919 standin = lfutil.standin(f)
928 if standin in ctx or standin in mctx:
920 if standin in ctx or standin in mctx:
929 matchfiles.append(standin)
921 matchfiles.append(standin)
930 elif standin in wctx or lfdirstate[f] == b'r':
922 elif standin in wctx or lfdirstate[f] == b'r':
931 continue
923 continue
932 else:
924 else:
933 matchfiles.append(f)
925 matchfiles.append(f)
934 m._files = matchfiles
926 m._files = matchfiles
935 m._fileset = set(m._files)
927 m._fileset = set(m._files)
936 origmatchfn = m.matchfn
928 origmatchfn = m.matchfn
937
929
938 def matchfn(f):
930 def matchfn(f):
939 lfile = lfutil.splitstandin(f)
931 lfile = lfutil.splitstandin(f)
940 if lfile is not None:
932 if lfile is not None:
941 return origmatchfn(lfile) and (f in ctx or f in mctx)
933 return origmatchfn(lfile) and (f in ctx or f in mctx)
942 return origmatchfn(f)
934 return origmatchfn(f)
943
935
944 m.matchfn = matchfn
936 m.matchfn = matchfn
945 return m
937 return m
946
938
947 with extensions.wrappedfunction(scmutil, b'match', overridematch):
939 with extensions.wrappedfunction(scmutil, b'match', overridematch):
948 orig(ui, repo, ctx, parents, *pats, **opts)
940 orig(ui, repo, ctx, parents, *pats, **opts)
949
941
950 newstandins = lfutil.getstandinsstate(repo)
942 newstandins = lfutil.getstandinsstate(repo)
951 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
943 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
952 # lfdirstate should be 'normallookup'-ed for updated files,
944 # lfdirstate should be 'normallookup'-ed for updated files,
953 # because reverting doesn't touch dirstate for 'normal' files
945 # because reverting doesn't touch dirstate for 'normal' files
954 # when target revision is explicitly specified: in such case,
946 # when target revision is explicitly specified: in such case,
955 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
947 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
956 # of target (standin) file.
948 # of target (standin) file.
957 lfcommands.updatelfiles(
949 lfcommands.updatelfiles(
958 ui, repo, filelist, printmessage=False, normallookup=True
950 ui, repo, filelist, printmessage=False, normallookup=True
959 )
951 )
960
952
961
953
962 # after pulling changesets, we need to take some extra care to get
954 # after pulling changesets, we need to take some extra care to get
963 # largefiles updated remotely
955 # largefiles updated remotely
964 @eh.wrapcommand(
956 @eh.wrapcommand(
965 b'pull',
957 b'pull',
966 opts=[
958 opts=[
967 (
959 (
968 b'',
960 b'',
969 b'all-largefiles',
961 b'all-largefiles',
970 None,
962 None,
971 _(b'download all pulled versions of largefiles (DEPRECATED)'),
963 _(b'download all pulled versions of largefiles (DEPRECATED)'),
972 ),
964 ),
973 (
965 (
974 b'',
966 b'',
975 b'lfrev',
967 b'lfrev',
976 [],
968 [],
977 _(b'download largefiles for these revisions'),
969 _(b'download largefiles for these revisions'),
978 _(b'REV'),
970 _(b'REV'),
979 ),
971 ),
980 ],
972 ],
981 )
973 )
982 def overridepull(orig, ui, repo, source=None, **opts):
974 def overridepull(orig, ui, repo, source=None, **opts):
983 revsprepull = len(repo)
975 revsprepull = len(repo)
984 if not source:
976 if not source:
985 source = b'default'
977 source = b'default'
986 repo.lfpullsource = source
978 repo.lfpullsource = source
987 result = orig(ui, repo, source, **opts)
979 result = orig(ui, repo, source, **opts)
988 revspostpull = len(repo)
980 revspostpull = len(repo)
989 lfrevs = opts.get('lfrev', [])
981 lfrevs = opts.get('lfrev', [])
990 if opts.get('all_largefiles'):
982 if opts.get('all_largefiles'):
991 lfrevs.append(b'pulled()')
983 lfrevs.append(b'pulled()')
992 if lfrevs and revspostpull > revsprepull:
984 if lfrevs and revspostpull > revsprepull:
993 numcached = 0
985 numcached = 0
994 repo.firstpulled = revsprepull # for pulled() revset expression
986 repo.firstpulled = revsprepull # for pulled() revset expression
995 try:
987 try:
996 for rev in scmutil.revrange(repo, lfrevs):
988 for rev in scmutil.revrange(repo, lfrevs):
997 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
989 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
998 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
990 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
999 numcached += len(cached)
991 numcached += len(cached)
1000 finally:
992 finally:
1001 del repo.firstpulled
993 del repo.firstpulled
1002 ui.status(_(b"%d largefiles cached\n") % numcached)
994 ui.status(_(b"%d largefiles cached\n") % numcached)
1003 return result
995 return result
1004
996
1005
997
1006 @eh.wrapcommand(
998 @eh.wrapcommand(
1007 b'push',
999 b'push',
1008 opts=[
1000 opts=[
1009 (
1001 (
1010 b'',
1002 b'',
1011 b'lfrev',
1003 b'lfrev',
1012 [],
1004 [],
1013 _(b'upload largefiles for these revisions'),
1005 _(b'upload largefiles for these revisions'),
1014 _(b'REV'),
1006 _(b'REV'),
1015 )
1007 )
1016 ],
1008 ],
1017 )
1009 )
1018 def overridepush(orig, ui, repo, *args, **kwargs):
1010 def overridepush(orig, ui, repo, *args, **kwargs):
1019 """Override push command and store --lfrev parameters in opargs"""
1011 """Override push command and store --lfrev parameters in opargs"""
1020 lfrevs = kwargs.pop('lfrev', None)
1012 lfrevs = kwargs.pop('lfrev', None)
1021 if lfrevs:
1013 if lfrevs:
1022 opargs = kwargs.setdefault('opargs', {})
1014 opargs = kwargs.setdefault('opargs', {})
1023 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1015 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1024 return orig(ui, repo, *args, **kwargs)
1016 return orig(ui, repo, *args, **kwargs)
1025
1017
1026
1018
1027 @eh.wrapfunction(exchange, b'pushoperation')
1019 @eh.wrapfunction(exchange, b'pushoperation')
1028 def exchangepushoperation(orig, *args, **kwargs):
1020 def exchangepushoperation(orig, *args, **kwargs):
1029 """Override pushoperation constructor and store lfrevs parameter"""
1021 """Override pushoperation constructor and store lfrevs parameter"""
1030 lfrevs = kwargs.pop('lfrevs', None)
1022 lfrevs = kwargs.pop('lfrevs', None)
1031 pushop = orig(*args, **kwargs)
1023 pushop = orig(*args, **kwargs)
1032 pushop.lfrevs = lfrevs
1024 pushop.lfrevs = lfrevs
1033 return pushop
1025 return pushop
1034
1026
1035
1027
1036 @eh.revsetpredicate(b'pulled()')
1028 @eh.revsetpredicate(b'pulled()')
1037 def pulledrevsetsymbol(repo, subset, x):
1029 def pulledrevsetsymbol(repo, subset, x):
1038 """Changesets that just has been pulled.
1030 """Changesets that just has been pulled.
1039
1031
1040 Only available with largefiles from pull --lfrev expressions.
1032 Only available with largefiles from pull --lfrev expressions.
1041
1033
1042 .. container:: verbose
1034 .. container:: verbose
1043
1035
1044 Some examples:
1036 Some examples:
1045
1037
1046 - pull largefiles for all new changesets::
1038 - pull largefiles for all new changesets::
1047
1039
1048 hg pull -lfrev "pulled()"
1040 hg pull -lfrev "pulled()"
1049
1041
1050 - pull largefiles for all new branch heads::
1042 - pull largefiles for all new branch heads::
1051
1043
1052 hg pull -lfrev "head(pulled()) and not closed()"
1044 hg pull -lfrev "head(pulled()) and not closed()"
1053
1045
1054 """
1046 """
1055
1047
1056 try:
1048 try:
1057 firstpulled = repo.firstpulled
1049 firstpulled = repo.firstpulled
1058 except AttributeError:
1050 except AttributeError:
1059 raise error.Abort(_(b"pulled() only available in --lfrev"))
1051 raise error.Abort(_(b"pulled() only available in --lfrev"))
1060 return smartset.baseset([r for r in subset if r >= firstpulled])
1052 return smartset.baseset([r for r in subset if r >= firstpulled])
1061
1053
1062
1054
1063 @eh.wrapcommand(
1055 @eh.wrapcommand(
1064 b'clone',
1056 b'clone',
1065 opts=[
1057 opts=[
1066 (
1058 (
1067 b'',
1059 b'',
1068 b'all-largefiles',
1060 b'all-largefiles',
1069 None,
1061 None,
1070 _(b'download all versions of all largefiles'),
1062 _(b'download all versions of all largefiles'),
1071 )
1063 )
1072 ],
1064 ],
1073 )
1065 )
1074 def overrideclone(orig, ui, source, dest=None, **opts):
1066 def overrideclone(orig, ui, source, dest=None, **opts):
1075 d = dest
1067 d = dest
1076 if d is None:
1068 if d is None:
1077 d = hg.defaultdest(source)
1069 d = hg.defaultdest(source)
1078 if opts.get('all_largefiles') and not hg.islocal(d):
1070 if opts.get('all_largefiles') and not hg.islocal(d):
1079 raise error.Abort(
1071 raise error.Abort(
1080 _(b'--all-largefiles is incompatible with non-local destination %s')
1072 _(b'--all-largefiles is incompatible with non-local destination %s')
1081 % d
1073 % d
1082 )
1074 )
1083
1075
1084 return orig(ui, source, dest, **opts)
1076 return orig(ui, source, dest, **opts)
1085
1077
1086
1078
1087 @eh.wrapfunction(hg, b'clone')
1079 @eh.wrapfunction(hg, b'clone')
1088 def hgclone(orig, ui, opts, *args, **kwargs):
1080 def hgclone(orig, ui, opts, *args, **kwargs):
1089 result = orig(ui, opts, *args, **kwargs)
1081 result = orig(ui, opts, *args, **kwargs)
1090
1082
1091 if result is not None:
1083 if result is not None:
1092 sourcerepo, destrepo = result
1084 sourcerepo, destrepo = result
1093 repo = destrepo.local()
1085 repo = destrepo.local()
1094
1086
1095 # When cloning to a remote repo (like through SSH), no repo is available
1087 # When cloning to a remote repo (like through SSH), no repo is available
1096 # from the peer. Therefore the largefiles can't be downloaded and the
1088 # from the peer. Therefore the largefiles can't be downloaded and the
1097 # hgrc can't be updated.
1089 # hgrc can't be updated.
1098 if not repo:
1090 if not repo:
1099 return result
1091 return result
1100
1092
1101 # Caching is implicitly limited to 'rev' option, since the dest repo was
1093 # Caching is implicitly limited to 'rev' option, since the dest repo was
1102 # truncated at that point. The user may expect a download count with
1094 # truncated at that point. The user may expect a download count with
1103 # this option, so attempt whether or not this is a largefile repo.
1095 # this option, so attempt whether or not this is a largefile repo.
1104 if opts.get(b'all_largefiles'):
1096 if opts.get(b'all_largefiles'):
1105 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1097 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1106
1098
1107 if missing != 0:
1099 if missing != 0:
1108 return None
1100 return None
1109
1101
1110 return result
1102 return result
1111
1103
1112
1104
1113 @eh.wrapcommand(b'rebase', extension=b'rebase')
1105 @eh.wrapcommand(b'rebase', extension=b'rebase')
1114 def overriderebase(orig, ui, repo, **opts):
1106 def overriderebase(orig, ui, repo, **opts):
1115 if not util.safehasattr(repo, b'_largefilesenabled'):
1107 if not util.safehasattr(repo, b'_largefilesenabled'):
1116 return orig(ui, repo, **opts)
1108 return orig(ui, repo, **opts)
1117
1109
1118 resuming = opts.get('continue')
1110 resuming = opts.get('continue')
1119 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1111 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1120 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1112 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1121 try:
1113 try:
1122 return orig(ui, repo, **opts)
1114 return orig(ui, repo, **opts)
1123 finally:
1115 finally:
1124 repo._lfstatuswriters.pop()
1116 repo._lfstatuswriters.pop()
1125 repo._lfcommithooks.pop()
1117 repo._lfcommithooks.pop()
1126
1118
1127
1119
1128 @eh.wrapcommand(b'archive')
1120 @eh.wrapcommand(b'archive')
1129 def overridearchivecmd(orig, ui, repo, dest, **opts):
1121 def overridearchivecmd(orig, ui, repo, dest, **opts):
1130 with lfstatus(repo.unfiltered()):
1122 with lfstatus(repo.unfiltered()):
1131 return orig(ui, repo.unfiltered(), dest, **opts)
1123 return orig(ui, repo.unfiltered(), dest, **opts)
1132
1124
1133
1125
1134 @eh.wrapfunction(webcommands, b'archive')
1126 @eh.wrapfunction(webcommands, b'archive')
1135 def hgwebarchive(orig, web):
1127 def hgwebarchive(orig, web):
1136 with lfstatus(web.repo):
1128 with lfstatus(web.repo):
1137 return orig(web)
1129 return orig(web)
1138
1130
1139
1131
1140 @eh.wrapfunction(archival, b'archive')
1132 @eh.wrapfunction(archival, b'archive')
1141 def overridearchive(
1133 def overridearchive(
1142 orig,
1134 orig,
1143 repo,
1135 repo,
1144 dest,
1136 dest,
1145 node,
1137 node,
1146 kind,
1138 kind,
1147 decode=True,
1139 decode=True,
1148 match=None,
1140 match=None,
1149 prefix=b'',
1141 prefix=b'',
1150 mtime=None,
1142 mtime=None,
1151 subrepos=None,
1143 subrepos=None,
1152 ):
1144 ):
1153 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1145 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1154 # unfiltered repo's attr, so check that as well.
1146 # unfiltered repo's attr, so check that as well.
1155 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1147 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1156 return orig(
1148 return orig(
1157 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1149 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1158 )
1150 )
1159
1151
1160 # No need to lock because we are only reading history and
1152 # No need to lock because we are only reading history and
1161 # largefile caches, neither of which are modified.
1153 # largefile caches, neither of which are modified.
1162 if node is not None:
1154 if node is not None:
1163 lfcommands.cachelfiles(repo.ui, repo, node)
1155 lfcommands.cachelfiles(repo.ui, repo, node)
1164
1156
1165 if kind not in archival.archivers:
1157 if kind not in archival.archivers:
1166 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1158 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1167
1159
1168 ctx = repo[node]
1160 ctx = repo[node]
1169
1161
1170 if kind == b'files':
1162 if kind == b'files':
1171 if prefix:
1163 if prefix:
1172 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1164 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1173 else:
1165 else:
1174 prefix = archival.tidyprefix(dest, kind, prefix)
1166 prefix = archival.tidyprefix(dest, kind, prefix)
1175
1167
1176 def write(name, mode, islink, getdata):
1168 def write(name, mode, islink, getdata):
1177 if match and not match(name):
1169 if match and not match(name):
1178 return
1170 return
1179 data = getdata()
1171 data = getdata()
1180 if decode:
1172 if decode:
1181 data = repo.wwritedata(name, data)
1173 data = repo.wwritedata(name, data)
1182 archiver.addfile(prefix + name, mode, islink, data)
1174 archiver.addfile(prefix + name, mode, islink, data)
1183
1175
1184 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1176 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1185
1177
1186 if repo.ui.configbool(b"ui", b"archivemeta"):
1178 if repo.ui.configbool(b"ui", b"archivemeta"):
1187 write(
1179 write(
1188 b'.hg_archival.txt',
1180 b'.hg_archival.txt',
1189 0o644,
1181 0o644,
1190 False,
1182 False,
1191 lambda: archival.buildmetadata(ctx),
1183 lambda: archival.buildmetadata(ctx),
1192 )
1184 )
1193
1185
1194 for f in ctx:
1186 for f in ctx:
1195 ff = ctx.flags(f)
1187 ff = ctx.flags(f)
1196 getdata = ctx[f].data
1188 getdata = ctx[f].data
1197 lfile = lfutil.splitstandin(f)
1189 lfile = lfutil.splitstandin(f)
1198 if lfile is not None:
1190 if lfile is not None:
1199 if node is not None:
1191 if node is not None:
1200 path = lfutil.findfile(repo, getdata().strip())
1192 path = lfutil.findfile(repo, getdata().strip())
1201
1193
1202 if path is None:
1194 if path is None:
1203 raise error.Abort(
1195 raise error.Abort(
1204 _(
1196 _(
1205 b'largefile %s not found in repo store or system cache'
1197 b'largefile %s not found in repo store or system cache'
1206 )
1198 )
1207 % lfile
1199 % lfile
1208 )
1200 )
1209 else:
1201 else:
1210 path = lfile
1202 path = lfile
1211
1203
1212 f = lfile
1204 f = lfile
1213
1205
1214 getdata = lambda: util.readfile(path)
1206 getdata = lambda: util.readfile(path)
1215 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1207 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1216
1208
1217 if subrepos:
1209 if subrepos:
1218 for subpath in sorted(ctx.substate):
1210 for subpath in sorted(ctx.substate):
1219 sub = ctx.workingsub(subpath)
1211 sub = ctx.workingsub(subpath)
1220 submatch = matchmod.subdirmatcher(subpath, match)
1212 submatch = matchmod.subdirmatcher(subpath, match)
1221 subprefix = prefix + subpath + b'/'
1213 subprefix = prefix + subpath + b'/'
1222
1214
1223 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1215 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1224 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1216 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1225 # allow only hgsubrepos to set this, instead of the current scheme
1217 # allow only hgsubrepos to set this, instead of the current scheme
1226 # where the parent sets this for the child.
1218 # where the parent sets this for the child.
1227 with (
1219 with (
1228 util.safehasattr(sub, '_repo')
1220 util.safehasattr(sub, '_repo')
1229 and lfstatus(sub._repo)
1221 and lfstatus(sub._repo)
1230 or util.nullcontextmanager()
1222 or util.nullcontextmanager()
1231 ):
1223 ):
1232 sub.archive(archiver, subprefix, submatch)
1224 sub.archive(archiver, subprefix, submatch)
1233
1225
1234 archiver.done()
1226 archiver.done()
1235
1227
1236
1228
1237 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1229 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1238 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1230 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1239 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1231 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1240 if not lfenabled or not repo._repo.lfstatus:
1232 if not lfenabled or not repo._repo.lfstatus:
1241 return orig(repo, archiver, prefix, match, decode)
1233 return orig(repo, archiver, prefix, match, decode)
1242
1234
1243 repo._get(repo._state + (b'hg',))
1235 repo._get(repo._state + (b'hg',))
1244 rev = repo._state[1]
1236 rev = repo._state[1]
1245 ctx = repo._repo[rev]
1237 ctx = repo._repo[rev]
1246
1238
1247 if ctx.node() is not None:
1239 if ctx.node() is not None:
1248 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1240 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1249
1241
1250 def write(name, mode, islink, getdata):
1242 def write(name, mode, islink, getdata):
1251 # At this point, the standin has been replaced with the largefile name,
1243 # At this point, the standin has been replaced with the largefile name,
1252 # so the normal matcher works here without the lfutil variants.
1244 # so the normal matcher works here without the lfutil variants.
1253 if match and not match(f):
1245 if match and not match(f):
1254 return
1246 return
1255 data = getdata()
1247 data = getdata()
1256 if decode:
1248 if decode:
1257 data = repo._repo.wwritedata(name, data)
1249 data = repo._repo.wwritedata(name, data)
1258
1250
1259 archiver.addfile(prefix + name, mode, islink, data)
1251 archiver.addfile(prefix + name, mode, islink, data)
1260
1252
1261 for f in ctx:
1253 for f in ctx:
1262 ff = ctx.flags(f)
1254 ff = ctx.flags(f)
1263 getdata = ctx[f].data
1255 getdata = ctx[f].data
1264 lfile = lfutil.splitstandin(f)
1256 lfile = lfutil.splitstandin(f)
1265 if lfile is not None:
1257 if lfile is not None:
1266 if ctx.node() is not None:
1258 if ctx.node() is not None:
1267 path = lfutil.findfile(repo._repo, getdata().strip())
1259 path = lfutil.findfile(repo._repo, getdata().strip())
1268
1260
1269 if path is None:
1261 if path is None:
1270 raise error.Abort(
1262 raise error.Abort(
1271 _(
1263 _(
1272 b'largefile %s not found in repo store or system cache'
1264 b'largefile %s not found in repo store or system cache'
1273 )
1265 )
1274 % lfile
1266 % lfile
1275 )
1267 )
1276 else:
1268 else:
1277 path = lfile
1269 path = lfile
1278
1270
1279 f = lfile
1271 f = lfile
1280
1272
1281 getdata = lambda: util.readfile(os.path.join(prefix, path))
1273 getdata = lambda: util.readfile(os.path.join(prefix, path))
1282
1274
1283 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1275 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1284
1276
1285 for subpath in sorted(ctx.substate):
1277 for subpath in sorted(ctx.substate):
1286 sub = ctx.workingsub(subpath)
1278 sub = ctx.workingsub(subpath)
1287 submatch = matchmod.subdirmatcher(subpath, match)
1279 submatch = matchmod.subdirmatcher(subpath, match)
1288 subprefix = prefix + subpath + b'/'
1280 subprefix = prefix + subpath + b'/'
1289 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1290 # infer and possibly set lfstatus at the top of this function. That
1282 # infer and possibly set lfstatus at the top of this function. That
1291 # would allow only hgsubrepos to set this, instead of the current scheme
1283 # would allow only hgsubrepos to set this, instead of the current scheme
1292 # where the parent sets this for the child.
1284 # where the parent sets this for the child.
1293 with (
1285 with (
1294 util.safehasattr(sub, '_repo')
1286 util.safehasattr(sub, '_repo')
1295 and lfstatus(sub._repo)
1287 and lfstatus(sub._repo)
1296 or util.nullcontextmanager()
1288 or util.nullcontextmanager()
1297 ):
1289 ):
1298 sub.archive(archiver, subprefix, submatch, decode)
1290 sub.archive(archiver, subprefix, submatch, decode)
1299
1291
1300
1292
1301 # If a largefile is modified, the change is not reflected in its
1293 # If a largefile is modified, the change is not reflected in its
1302 # standin until a commit. cmdutil.bailifchanged() raises an exception
1294 # standin until a commit. cmdutil.bailifchanged() raises an exception
1303 # if the repo has uncommitted changes. Wrap it to also check if
1295 # if the repo has uncommitted changes. Wrap it to also check if
1304 # largefiles were changed. This is used by bisect, backout and fetch.
1296 # largefiles were changed. This is used by bisect, backout and fetch.
1305 @eh.wrapfunction(cmdutil, b'bailifchanged')
1297 @eh.wrapfunction(cmdutil, b'bailifchanged')
1306 def overridebailifchanged(orig, repo, *args, **kwargs):
1298 def overridebailifchanged(orig, repo, *args, **kwargs):
1307 orig(repo, *args, **kwargs)
1299 orig(repo, *args, **kwargs)
1308 with lfstatus(repo):
1300 with lfstatus(repo):
1309 s = repo.status()
1301 s = repo.status()
1310 if s.modified or s.added or s.removed or s.deleted:
1302 if s.modified or s.added or s.removed or s.deleted:
1311 raise error.Abort(_(b'uncommitted changes'))
1303 raise error.Abort(_(b'uncommitted changes'))
1312
1304
1313
1305
1314 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1306 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1315 def postcommitstatus(orig, repo, *args, **kwargs):
1307 def postcommitstatus(orig, repo, *args, **kwargs):
1316 with lfstatus(repo):
1308 with lfstatus(repo):
1317 return orig(repo, *args, **kwargs)
1309 return orig(repo, *args, **kwargs)
1318
1310
1319
1311
1320 @eh.wrapfunction(cmdutil, b'forget')
1312 @eh.wrapfunction(cmdutil, b'forget')
1321 def cmdutilforget(
1313 def cmdutilforget(
1322 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1314 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1323 ):
1315 ):
1324 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1316 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1325 bad, forgot = orig(
1317 bad, forgot = orig(
1326 ui,
1318 ui,
1327 repo,
1319 repo,
1328 normalmatcher,
1320 normalmatcher,
1329 prefix,
1321 prefix,
1330 uipathfn,
1322 uipathfn,
1331 explicitonly,
1323 explicitonly,
1332 dryrun,
1324 dryrun,
1333 interactive,
1325 interactive,
1334 )
1326 )
1335 m = composelargefilematcher(match, repo[None].manifest())
1327 m = composelargefilematcher(match, repo[None].manifest())
1336
1328
1337 with lfstatus(repo):
1329 with lfstatus(repo):
1338 s = repo.status(match=m, clean=True)
1330 s = repo.status(match=m, clean=True)
1339 manifest = repo[None].manifest()
1331 manifest = repo[None].manifest()
1340 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1332 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1341 forget = [f for f in forget if lfutil.standin(f) in manifest]
1333 forget = [f for f in forget if lfutil.standin(f) in manifest]
1342
1334
1343 for f in forget:
1335 for f in forget:
1344 fstandin = lfutil.standin(f)
1336 fstandin = lfutil.standin(f)
1345 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1337 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1346 ui.warn(
1338 ui.warn(
1347 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1339 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1348 )
1340 )
1349 bad.append(f)
1341 bad.append(f)
1350
1342
1351 for f in forget:
1343 for f in forget:
1352 if ui.verbose or not m.exact(f):
1344 if ui.verbose or not m.exact(f):
1353 ui.status(_(b'removing %s\n') % uipathfn(f))
1345 ui.status(_(b'removing %s\n') % uipathfn(f))
1354
1346
1355 # Need to lock because standin files are deleted then removed from the
1347 # Need to lock because standin files are deleted then removed from the
1356 # repository and we could race in-between.
1348 # repository and we could race in-between.
1357 with repo.wlock():
1349 with repo.wlock():
1358 lfdirstate = lfutil.openlfdirstate(ui, repo)
1350 lfdirstate = lfutil.openlfdirstate(ui, repo)
1359 for f in forget:
1351 for f in forget:
1360 if lfdirstate[f] == b'a':
1352 if lfdirstate[f] == b'a':
1361 lfdirstate.drop(f)
1353 lfdirstate.drop(f)
1362 else:
1354 else:
1363 lfdirstate.remove(f)
1355 lfdirstate.remove(f)
1364 lfdirstate.write()
1356 lfdirstate.write()
1365 standins = [lfutil.standin(f) for f in forget]
1357 standins = [lfutil.standin(f) for f in forget]
1366 for f in standins:
1358 for f in standins:
1367 repo.wvfs.unlinkpath(f, ignoremissing=True)
1359 repo.wvfs.unlinkpath(f, ignoremissing=True)
1368 rejected = repo[None].forget(standins)
1360 rejected = repo[None].forget(standins)
1369
1361
1370 bad.extend(f for f in rejected if f in m.files())
1362 bad.extend(f for f in rejected if f in m.files())
1371 forgot.extend(f for f in forget if f not in rejected)
1363 forgot.extend(f for f in forget if f not in rejected)
1372 return bad, forgot
1364 return bad, forgot
1373
1365
1374
1366
1375 def _getoutgoings(repo, other, missing, addfunc):
1367 def _getoutgoings(repo, other, missing, addfunc):
1376 """get pairs of filename and largefile hash in outgoing revisions
1368 """get pairs of filename and largefile hash in outgoing revisions
1377 in 'missing'.
1369 in 'missing'.
1378
1370
1379 largefiles already existing on 'other' repository are ignored.
1371 largefiles already existing on 'other' repository are ignored.
1380
1372
1381 'addfunc' is invoked with each unique pairs of filename and
1373 'addfunc' is invoked with each unique pairs of filename and
1382 largefile hash value.
1374 largefile hash value.
1383 """
1375 """
1384 knowns = set()
1376 knowns = set()
1385 lfhashes = set()
1377 lfhashes = set()
1386
1378
1387 def dedup(fn, lfhash):
1379 def dedup(fn, lfhash):
1388 k = (fn, lfhash)
1380 k = (fn, lfhash)
1389 if k not in knowns:
1381 if k not in knowns:
1390 knowns.add(k)
1382 knowns.add(k)
1391 lfhashes.add(lfhash)
1383 lfhashes.add(lfhash)
1392
1384
1393 lfutil.getlfilestoupload(repo, missing, dedup)
1385 lfutil.getlfilestoupload(repo, missing, dedup)
1394 if lfhashes:
1386 if lfhashes:
1395 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1387 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1396 for fn, lfhash in knowns:
1388 for fn, lfhash in knowns:
1397 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1389 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1398 addfunc(fn, lfhash)
1390 addfunc(fn, lfhash)
1399
1391
1400
1392
1401 def outgoinghook(ui, repo, other, opts, missing):
1393 def outgoinghook(ui, repo, other, opts, missing):
1402 if opts.pop(b'large', None):
1394 if opts.pop(b'large', None):
1403 lfhashes = set()
1395 lfhashes = set()
1404 if ui.debugflag:
1396 if ui.debugflag:
1405 toupload = {}
1397 toupload = {}
1406
1398
1407 def addfunc(fn, lfhash):
1399 def addfunc(fn, lfhash):
1408 if fn not in toupload:
1400 if fn not in toupload:
1409 toupload[fn] = []
1401 toupload[fn] = []
1410 toupload[fn].append(lfhash)
1402 toupload[fn].append(lfhash)
1411 lfhashes.add(lfhash)
1403 lfhashes.add(lfhash)
1412
1404
1413 def showhashes(fn):
1405 def showhashes(fn):
1414 for lfhash in sorted(toupload[fn]):
1406 for lfhash in sorted(toupload[fn]):
1415 ui.debug(b' %s\n' % lfhash)
1407 ui.debug(b' %s\n' % lfhash)
1416
1408
1417 else:
1409 else:
1418 toupload = set()
1410 toupload = set()
1419
1411
1420 def addfunc(fn, lfhash):
1412 def addfunc(fn, lfhash):
1421 toupload.add(fn)
1413 toupload.add(fn)
1422 lfhashes.add(lfhash)
1414 lfhashes.add(lfhash)
1423
1415
1424 def showhashes(fn):
1416 def showhashes(fn):
1425 pass
1417 pass
1426
1418
1427 _getoutgoings(repo, other, missing, addfunc)
1419 _getoutgoings(repo, other, missing, addfunc)
1428
1420
1429 if not toupload:
1421 if not toupload:
1430 ui.status(_(b'largefiles: no files to upload\n'))
1422 ui.status(_(b'largefiles: no files to upload\n'))
1431 else:
1423 else:
1432 ui.status(
1424 ui.status(
1433 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1425 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1434 )
1426 )
1435 for file in sorted(toupload):
1427 for file in sorted(toupload):
1436 ui.status(lfutil.splitstandin(file) + b'\n')
1428 ui.status(lfutil.splitstandin(file) + b'\n')
1437 showhashes(file)
1429 showhashes(file)
1438 ui.status(b'\n')
1430 ui.status(b'\n')
1439
1431
1440
1432
1441 @eh.wrapcommand(
1433 @eh.wrapcommand(
1442 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1434 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1443 )
1435 )
1444 def _outgoingcmd(orig, *args, **kwargs):
1436 def _outgoingcmd(orig, *args, **kwargs):
1445 # Nothing to do here other than add the extra help option- the hook above
1437 # Nothing to do here other than add the extra help option- the hook above
1446 # processes it.
1438 # processes it.
1447 return orig(*args, **kwargs)
1439 return orig(*args, **kwargs)
1448
1440
1449
1441
1450 def summaryremotehook(ui, repo, opts, changes):
1442 def summaryremotehook(ui, repo, opts, changes):
1451 largeopt = opts.get(b'large', False)
1443 largeopt = opts.get(b'large', False)
1452 if changes is None:
1444 if changes is None:
1453 if largeopt:
1445 if largeopt:
1454 return (False, True) # only outgoing check is needed
1446 return (False, True) # only outgoing check is needed
1455 else:
1447 else:
1456 return (False, False)
1448 return (False, False)
1457 elif largeopt:
1449 elif largeopt:
1458 url, branch, peer, outgoing = changes[1]
1450 url, branch, peer, outgoing = changes[1]
1459 if peer is None:
1451 if peer is None:
1460 # i18n: column positioning for "hg summary"
1452 # i18n: column positioning for "hg summary"
1461 ui.status(_(b'largefiles: (no remote repo)\n'))
1453 ui.status(_(b'largefiles: (no remote repo)\n'))
1462 return
1454 return
1463
1455
1464 toupload = set()
1456 toupload = set()
1465 lfhashes = set()
1457 lfhashes = set()
1466
1458
1467 def addfunc(fn, lfhash):
1459 def addfunc(fn, lfhash):
1468 toupload.add(fn)
1460 toupload.add(fn)
1469 lfhashes.add(lfhash)
1461 lfhashes.add(lfhash)
1470
1462
1471 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1463 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1472
1464
1473 if not toupload:
1465 if not toupload:
1474 # i18n: column positioning for "hg summary"
1466 # i18n: column positioning for "hg summary"
1475 ui.status(_(b'largefiles: (no files to upload)\n'))
1467 ui.status(_(b'largefiles: (no files to upload)\n'))
1476 else:
1468 else:
1477 # i18n: column positioning for "hg summary"
1469 # i18n: column positioning for "hg summary"
1478 ui.status(
1470 ui.status(
1479 _(b'largefiles: %d entities for %d files to upload\n')
1471 _(b'largefiles: %d entities for %d files to upload\n')
1480 % (len(lfhashes), len(toupload))
1472 % (len(lfhashes), len(toupload))
1481 )
1473 )
1482
1474
1483
1475
1484 @eh.wrapcommand(
1476 @eh.wrapcommand(
1485 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1477 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1486 )
1478 )
1487 def overridesummary(orig, ui, repo, *pats, **opts):
1479 def overridesummary(orig, ui, repo, *pats, **opts):
1488 with lfstatus(repo):
1480 with lfstatus(repo):
1489 orig(ui, repo, *pats, **opts)
1481 orig(ui, repo, *pats, **opts)
1490
1482
1491
1483
1492 @eh.wrapfunction(scmutil, b'addremove')
1484 @eh.wrapfunction(scmutil, b'addremove')
1493 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1485 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1494 if opts is None:
1486 if opts is None:
1495 opts = {}
1487 opts = {}
1496 if not lfutil.islfilesrepo(repo):
1488 if not lfutil.islfilesrepo(repo):
1497 return orig(repo, matcher, prefix, uipathfn, opts)
1489 return orig(repo, matcher, prefix, uipathfn, opts)
1498 # Get the list of missing largefiles so we can remove them
1490 # Get the list of missing largefiles so we can remove them
1499 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1491 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1500 unsure, s = lfdirstate.status(
1492 unsure, s = lfdirstate.status(
1501 matchmod.always(),
1493 matchmod.always(),
1502 subrepos=[],
1494 subrepos=[],
1503 ignored=False,
1495 ignored=False,
1504 clean=False,
1496 clean=False,
1505 unknown=False,
1497 unknown=False,
1506 )
1498 )
1507
1499
1508 # Call into the normal remove code, but the removing of the standin, we want
1500 # Call into the normal remove code, but the removing of the standin, we want
1509 # to have handled by original addremove. Monkey patching here makes sure
1501 # to have handled by original addremove. Monkey patching here makes sure
1510 # we don't remove the standin in the largefiles code, preventing a very
1502 # we don't remove the standin in the largefiles code, preventing a very
1511 # confused state later.
1503 # confused state later.
1512 if s.deleted:
1504 if s.deleted:
1513 m = copy.copy(matcher)
1505 m = copy.copy(matcher)
1514
1506
1515 # The m._files and m._map attributes are not changed to the deleted list
1507 # The m._files and m._map attributes are not changed to the deleted list
1516 # because that affects the m.exact() test, which in turn governs whether
1508 # because that affects the m.exact() test, which in turn governs whether
1517 # or not the file name is printed, and how. Simply limit the original
1509 # or not the file name is printed, and how. Simply limit the original
1518 # matches to those in the deleted status list.
1510 # matches to those in the deleted status list.
1519 matchfn = m.matchfn
1511 matchfn = m.matchfn
1520 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1512 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1521
1513
1522 removelargefiles(
1514 removelargefiles(
1523 repo.ui,
1515 repo.ui,
1524 repo,
1516 repo,
1525 True,
1517 True,
1526 m,
1518 m,
1527 uipathfn,
1519 uipathfn,
1528 opts.get(b'dry_run'),
1520 opts.get(b'dry_run'),
1529 **pycompat.strkwargs(opts)
1521 **pycompat.strkwargs(opts)
1530 )
1522 )
1531 # Call into the normal add code, and any files that *should* be added as
1523 # Call into the normal add code, and any files that *should* be added as
1532 # largefiles will be
1524 # largefiles will be
1533 added, bad = addlargefiles(
1525 added, bad = addlargefiles(
1534 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1526 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1535 )
1527 )
1536 # Now that we've handled largefiles, hand off to the original addremove
1528 # Now that we've handled largefiles, hand off to the original addremove
1537 # function to take care of the rest. Make sure it doesn't do anything with
1529 # function to take care of the rest. Make sure it doesn't do anything with
1538 # largefiles by passing a matcher that will ignore them.
1530 # largefiles by passing a matcher that will ignore them.
1539 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1531 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1540 return orig(repo, matcher, prefix, uipathfn, opts)
1532 return orig(repo, matcher, prefix, uipathfn, opts)
1541
1533
1542
1534
1543 # Calling purge with --all will cause the largefiles to be deleted.
1535 # Calling purge with --all will cause the largefiles to be deleted.
1544 # Override repo.status to prevent this from happening.
1536 # Override repo.status to prevent this from happening.
1545 @eh.wrapcommand(b'purge', extension=b'purge')
1537 @eh.wrapcommand(b'purge', extension=b'purge')
1546 def overridepurge(orig, ui, repo, *dirs, **opts):
1538 def overridepurge(orig, ui, repo, *dirs, **opts):
1547 # XXX Monkey patching a repoview will not work. The assigned attribute will
1539 # XXX Monkey patching a repoview will not work. The assigned attribute will
1548 # be set on the unfiltered repo, but we will only lookup attributes in the
1540 # be set on the unfiltered repo, but we will only lookup attributes in the
1549 # unfiltered repo if the lookup in the repoview object itself fails. As the
1541 # unfiltered repo if the lookup in the repoview object itself fails. As the
1550 # monkey patched method exists on the repoview class the lookup will not
1542 # monkey patched method exists on the repoview class the lookup will not
1551 # fail. As a result, the original version will shadow the monkey patched
1543 # fail. As a result, the original version will shadow the monkey patched
1552 # one, defeating the monkey patch.
1544 # one, defeating the monkey patch.
1553 #
1545 #
1554 # As a work around we use an unfiltered repo here. We should do something
1546 # As a work around we use an unfiltered repo here. We should do something
1555 # cleaner instead.
1547 # cleaner instead.
1556 repo = repo.unfiltered()
1548 repo = repo.unfiltered()
1557 oldstatus = repo.status
1549 oldstatus = repo.status
1558
1550
1559 def overridestatus(
1551 def overridestatus(
1560 node1=b'.',
1552 node1=b'.',
1561 node2=None,
1553 node2=None,
1562 match=None,
1554 match=None,
1563 ignored=False,
1555 ignored=False,
1564 clean=False,
1556 clean=False,
1565 unknown=False,
1557 unknown=False,
1566 listsubrepos=False,
1558 listsubrepos=False,
1567 ):
1559 ):
1568 r = oldstatus(
1560 r = oldstatus(
1569 node1, node2, match, ignored, clean, unknown, listsubrepos
1561 node1, node2, match, ignored, clean, unknown, listsubrepos
1570 )
1562 )
1571 lfdirstate = lfutil.openlfdirstate(ui, repo)
1563 lfdirstate = lfutil.openlfdirstate(ui, repo)
1572 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1564 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1573 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1565 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1574 return scmutil.status(
1566 return scmutil.status(
1575 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1567 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1576 )
1568 )
1577
1569
1578 repo.status = overridestatus
1570 repo.status = overridestatus
1579 orig(ui, repo, *dirs, **opts)
1571 orig(ui, repo, *dirs, **opts)
1580 repo.status = oldstatus
1572 repo.status = oldstatus
1581
1573
1582
1574
1583 @eh.wrapcommand(b'rollback')
1575 @eh.wrapcommand(b'rollback')
1584 def overriderollback(orig, ui, repo, **opts):
1576 def overriderollback(orig, ui, repo, **opts):
1585 with repo.wlock():
1577 with repo.wlock():
1586 before = repo.dirstate.parents()
1578 before = repo.dirstate.parents()
1587 orphans = {
1579 orphans = {
1588 f
1580 f
1589 for f in repo.dirstate
1581 for f in repo.dirstate
1590 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1582 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1591 }
1583 }
1592 result = orig(ui, repo, **opts)
1584 result = orig(ui, repo, **opts)
1593 after = repo.dirstate.parents()
1585 after = repo.dirstate.parents()
1594 if before == after:
1586 if before == after:
1595 return result # no need to restore standins
1587 return result # no need to restore standins
1596
1588
1597 pctx = repo[b'.']
1589 pctx = repo[b'.']
1598 for f in repo.dirstate:
1590 for f in repo.dirstate:
1599 if lfutil.isstandin(f):
1591 if lfutil.isstandin(f):
1600 orphans.discard(f)
1592 orphans.discard(f)
1601 if repo.dirstate[f] == b'r':
1593 if repo.dirstate[f] == b'r':
1602 repo.wvfs.unlinkpath(f, ignoremissing=True)
1594 repo.wvfs.unlinkpath(f, ignoremissing=True)
1603 elif f in pctx:
1595 elif f in pctx:
1604 fctx = pctx[f]
1596 fctx = pctx[f]
1605 repo.wwrite(f, fctx.data(), fctx.flags())
1597 repo.wwrite(f, fctx.data(), fctx.flags())
1606 else:
1598 else:
1607 # content of standin is not so important in 'a',
1599 # content of standin is not so important in 'a',
1608 # 'm' or 'n' (coming from the 2nd parent) cases
1600 # 'm' or 'n' (coming from the 2nd parent) cases
1609 lfutil.writestandin(repo, f, b'', False)
1601 lfutil.writestandin(repo, f, b'', False)
1610 for standin in orphans:
1602 for standin in orphans:
1611 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1603 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1612
1604
1613 lfdirstate = lfutil.openlfdirstate(ui, repo)
1605 lfdirstate = lfutil.openlfdirstate(ui, repo)
1614 orphans = set(lfdirstate)
1606 orphans = set(lfdirstate)
1615 lfiles = lfutil.listlfiles(repo)
1607 lfiles = lfutil.listlfiles(repo)
1616 for file in lfiles:
1608 for file in lfiles:
1617 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1609 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1618 orphans.discard(file)
1610 orphans.discard(file)
1619 for lfile in orphans:
1611 for lfile in orphans:
1620 lfdirstate.drop(lfile)
1612 lfdirstate.drop(lfile)
1621 lfdirstate.write()
1613 lfdirstate.write()
1622 return result
1614 return result
1623
1615
1624
1616
1625 @eh.wrapcommand(b'transplant', extension=b'transplant')
1617 @eh.wrapcommand(b'transplant', extension=b'transplant')
1626 def overridetransplant(orig, ui, repo, *revs, **opts):
1618 def overridetransplant(orig, ui, repo, *revs, **opts):
1627 resuming = opts.get('continue')
1619 resuming = opts.get('continue')
1628 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1620 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1629 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1621 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1630 try:
1622 try:
1631 result = orig(ui, repo, *revs, **opts)
1623 result = orig(ui, repo, *revs, **opts)
1632 finally:
1624 finally:
1633 repo._lfstatuswriters.pop()
1625 repo._lfstatuswriters.pop()
1634 repo._lfcommithooks.pop()
1626 repo._lfcommithooks.pop()
1635 return result
1627 return result
1636
1628
1637
1629
1638 @eh.wrapcommand(b'cat')
1630 @eh.wrapcommand(b'cat')
1639 def overridecat(orig, ui, repo, file1, *pats, **opts):
1631 def overridecat(orig, ui, repo, file1, *pats, **opts):
1640 opts = pycompat.byteskwargs(opts)
1632 opts = pycompat.byteskwargs(opts)
1641 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1633 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1642 err = 1
1634 err = 1
1643 notbad = set()
1635 notbad = set()
1644 m = scmutil.match(ctx, (file1,) + pats, opts)
1636 m = scmutil.match(ctx, (file1,) + pats, opts)
1645 origmatchfn = m.matchfn
1637 origmatchfn = m.matchfn
1646
1638
1647 def lfmatchfn(f):
1639 def lfmatchfn(f):
1648 if origmatchfn(f):
1640 if origmatchfn(f):
1649 return True
1641 return True
1650 lf = lfutil.splitstandin(f)
1642 lf = lfutil.splitstandin(f)
1651 if lf is None:
1643 if lf is None:
1652 return False
1644 return False
1653 notbad.add(lf)
1645 notbad.add(lf)
1654 return origmatchfn(lf)
1646 return origmatchfn(lf)
1655
1647
1656 m.matchfn = lfmatchfn
1648 m.matchfn = lfmatchfn
1657 origbadfn = m.bad
1649 origbadfn = m.bad
1658
1650
1659 def lfbadfn(f, msg):
1651 def lfbadfn(f, msg):
1660 if not f in notbad:
1652 if not f in notbad:
1661 origbadfn(f, msg)
1653 origbadfn(f, msg)
1662
1654
1663 m.bad = lfbadfn
1655 m.bad = lfbadfn
1664
1656
1665 origvisitdirfn = m.visitdir
1657 origvisitdirfn = m.visitdir
1666
1658
1667 def lfvisitdirfn(dir):
1659 def lfvisitdirfn(dir):
1668 if dir == lfutil.shortname:
1660 if dir == lfutil.shortname:
1669 return True
1661 return True
1670 ret = origvisitdirfn(dir)
1662 ret = origvisitdirfn(dir)
1671 if ret:
1663 if ret:
1672 return ret
1664 return ret
1673 lf = lfutil.splitstandin(dir)
1665 lf = lfutil.splitstandin(dir)
1674 if lf is None:
1666 if lf is None:
1675 return False
1667 return False
1676 return origvisitdirfn(lf)
1668 return origvisitdirfn(lf)
1677
1669
1678 m.visitdir = lfvisitdirfn
1670 m.visitdir = lfvisitdirfn
1679
1671
1680 for f in ctx.walk(m):
1672 for f in ctx.walk(m):
1681 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1673 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1682 lf = lfutil.splitstandin(f)
1674 lf = lfutil.splitstandin(f)
1683 if lf is None or origmatchfn(f):
1675 if lf is None or origmatchfn(f):
1684 # duplicating unreachable code from commands.cat
1676 # duplicating unreachable code from commands.cat
1685 data = ctx[f].data()
1677 data = ctx[f].data()
1686 if opts.get(b'decode'):
1678 if opts.get(b'decode'):
1687 data = repo.wwritedata(f, data)
1679 data = repo.wwritedata(f, data)
1688 fp.write(data)
1680 fp.write(data)
1689 else:
1681 else:
1690 hash = lfutil.readasstandin(ctx[f])
1682 hash = lfutil.readasstandin(ctx[f])
1691 if not lfutil.inusercache(repo.ui, hash):
1683 if not lfutil.inusercache(repo.ui, hash):
1692 store = storefactory.openstore(repo)
1684 store = storefactory.openstore(repo)
1693 success, missing = store.get([(lf, hash)])
1685 success, missing = store.get([(lf, hash)])
1694 if len(success) != 1:
1686 if len(success) != 1:
1695 raise error.Abort(
1687 raise error.Abort(
1696 _(
1688 _(
1697 b'largefile %s is not in cache and could not be '
1689 b'largefile %s is not in cache and could not be '
1698 b'downloaded'
1690 b'downloaded'
1699 )
1691 )
1700 % lf
1692 % lf
1701 )
1693 )
1702 path = lfutil.usercachepath(repo.ui, hash)
1694 path = lfutil.usercachepath(repo.ui, hash)
1703 with open(path, b"rb") as fpin:
1695 with open(path, b"rb") as fpin:
1704 for chunk in util.filechunkiter(fpin):
1696 for chunk in util.filechunkiter(fpin):
1705 fp.write(chunk)
1697 fp.write(chunk)
1706 err = 0
1698 err = 0
1707 return err
1699 return err
1708
1700
1709
1701
1710 @eh.wrapfunction(merge, b'update')
1702 @eh.wrapfunction(merge, b'update')
1711 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1703 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1712 matcher = kwargs.get('matcher', None)
1704 matcher = kwargs.get('matcher', None)
1713 # note if this is a partial update
1705 # note if this is a partial update
1714 partial = matcher and not matcher.always()
1706 partial = matcher and not matcher.always()
1715 with repo.wlock():
1707 with repo.wlock():
1716 # branch | | |
1708 # branch | | |
1717 # merge | force | partial | action
1709 # merge | force | partial | action
1718 # -------+-------+---------+--------------
1710 # -------+-------+---------+--------------
1719 # x | x | x | linear-merge
1711 # x | x | x | linear-merge
1720 # o | x | x | branch-merge
1712 # o | x | x | branch-merge
1721 # x | o | x | overwrite (as clean update)
1713 # x | o | x | overwrite (as clean update)
1722 # o | o | x | force-branch-merge (*1)
1714 # o | o | x | force-branch-merge (*1)
1723 # x | x | o | (*)
1715 # x | x | o | (*)
1724 # o | x | o | (*)
1716 # o | x | o | (*)
1725 # x | o | o | overwrite (as revert)
1717 # x | o | o | overwrite (as revert)
1726 # o | o | o | (*)
1718 # o | o | o | (*)
1727 #
1719 #
1728 # (*) don't care
1720 # (*) don't care
1729 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1721 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1730
1722
1731 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1723 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1732 unsure, s = lfdirstate.status(
1724 unsure, s = lfdirstate.status(
1733 matchmod.always(),
1725 matchmod.always(),
1734 subrepos=[],
1726 subrepos=[],
1735 ignored=False,
1727 ignored=False,
1736 clean=True,
1728 clean=True,
1737 unknown=False,
1729 unknown=False,
1738 )
1730 )
1739 oldclean = set(s.clean)
1731 oldclean = set(s.clean)
1740 pctx = repo[b'.']
1732 pctx = repo[b'.']
1741 dctx = repo[node]
1733 dctx = repo[node]
1742 for lfile in unsure + s.modified:
1734 for lfile in unsure + s.modified:
1743 lfileabs = repo.wvfs.join(lfile)
1735 lfileabs = repo.wvfs.join(lfile)
1744 if not repo.wvfs.exists(lfileabs):
1736 if not repo.wvfs.exists(lfileabs):
1745 continue
1737 continue
1746 lfhash = lfutil.hashfile(lfileabs)
1738 lfhash = lfutil.hashfile(lfileabs)
1747 standin = lfutil.standin(lfile)
1739 standin = lfutil.standin(lfile)
1748 lfutil.writestandin(
1740 lfutil.writestandin(
1749 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1741 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1750 )
1742 )
1751 if standin in pctx and lfhash == lfutil.readasstandin(
1743 if standin in pctx and lfhash == lfutil.readasstandin(
1752 pctx[standin]
1744 pctx[standin]
1753 ):
1745 ):
1754 oldclean.add(lfile)
1746 oldclean.add(lfile)
1755 for lfile in s.added:
1747 for lfile in s.added:
1756 fstandin = lfutil.standin(lfile)
1748 fstandin = lfutil.standin(lfile)
1757 if fstandin not in dctx:
1749 if fstandin not in dctx:
1758 # in this case, content of standin file is meaningless
1750 # in this case, content of standin file is meaningless
1759 # (in dctx, lfile is unknown, or normal file)
1751 # (in dctx, lfile is unknown, or normal file)
1760 continue
1752 continue
1761 lfutil.updatestandin(repo, lfile, fstandin)
1753 lfutil.updatestandin(repo, lfile, fstandin)
1762 # mark all clean largefiles as dirty, just in case the update gets
1754 # mark all clean largefiles as dirty, just in case the update gets
1763 # interrupted before largefiles and lfdirstate are synchronized
1755 # interrupted before largefiles and lfdirstate are synchronized
1764 for lfile in oldclean:
1756 for lfile in oldclean:
1765 lfdirstate.normallookup(lfile)
1757 lfdirstate.normallookup(lfile)
1766 lfdirstate.write()
1758 lfdirstate.write()
1767
1759
1768 oldstandins = lfutil.getstandinsstate(repo)
1760 oldstandins = lfutil.getstandinsstate(repo)
1769 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1761 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1770 # good candidate for in-memory merge (large files, custom dirstate,
1762 # good candidate for in-memory merge (large files, custom dirstate,
1771 # matcher usage).
1763 # matcher usage).
1772 kwargs['wc'] = repo[None]
1764 kwargs['wc'] = repo[None]
1773 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1765 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1774
1766
1775 newstandins = lfutil.getstandinsstate(repo)
1767 newstandins = lfutil.getstandinsstate(repo)
1776 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1768 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1777
1769
1778 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1770 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1779 # all the ones that didn't change as clean
1771 # all the ones that didn't change as clean
1780 for lfile in oldclean.difference(filelist):
1772 for lfile in oldclean.difference(filelist):
1781 lfdirstate.normal(lfile)
1773 lfdirstate.normal(lfile)
1782 lfdirstate.write()
1774 lfdirstate.write()
1783
1775
1784 if branchmerge or force or partial:
1776 if branchmerge or force or partial:
1785 filelist.extend(s.deleted + s.removed)
1777 filelist.extend(s.deleted + s.removed)
1786
1778
1787 lfcommands.updatelfiles(
1779 lfcommands.updatelfiles(
1788 repo.ui, repo, filelist=filelist, normallookup=partial
1780 repo.ui, repo, filelist=filelist, normallookup=partial
1789 )
1781 )
1790
1782
1791 return result
1783 return result
1792
1784
1793
1785
1794 @eh.wrapfunction(scmutil, b'marktouched')
1786 @eh.wrapfunction(scmutil, b'marktouched')
1795 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1787 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1796 result = orig(repo, files, *args, **kwargs)
1788 result = orig(repo, files, *args, **kwargs)
1797
1789
1798 filelist = []
1790 filelist = []
1799 for f in files:
1791 for f in files:
1800 lf = lfutil.splitstandin(f)
1792 lf = lfutil.splitstandin(f)
1801 if lf is not None:
1793 if lf is not None:
1802 filelist.append(lf)
1794 filelist.append(lf)
1803 if filelist:
1795 if filelist:
1804 lfcommands.updatelfiles(
1796 lfcommands.updatelfiles(
1805 repo.ui,
1797 repo.ui,
1806 repo,
1798 repo,
1807 filelist=filelist,
1799 filelist=filelist,
1808 printmessage=False,
1800 printmessage=False,
1809 normallookup=True,
1801 normallookup=True,
1810 )
1802 )
1811
1803
1812 return result
1804 return result
1813
1805
1814
1806
1815 @eh.wrapfunction(upgrade, b'preservedrequirements')
1807 @eh.wrapfunction(upgrade, b'preservedrequirements')
1816 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1808 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1817 def upgraderequirements(orig, repo):
1809 def upgraderequirements(orig, repo):
1818 reqs = orig(repo)
1810 reqs = orig(repo)
1819 if b'largefiles' in repo.requirements:
1811 if b'largefiles' in repo.requirements:
1820 reqs.add(b'largefiles')
1812 reqs.add(b'largefiles')
1821 return reqs
1813 return reqs
1822
1814
1823
1815
1824 _lfscheme = b'largefile://'
1816 _lfscheme = b'largefile://'
1825
1817
1826
1818
1827 @eh.wrapfunction(urlmod, b'open')
1819 @eh.wrapfunction(urlmod, b'open')
1828 def openlargefile(orig, ui, url_, data=None):
1820 def openlargefile(orig, ui, url_, data=None):
1829 if url_.startswith(_lfscheme):
1821 if url_.startswith(_lfscheme):
1830 if data:
1822 if data:
1831 msg = b"cannot use data on a 'largefile://' url"
1823 msg = b"cannot use data on a 'largefile://' url"
1832 raise error.ProgrammingError(msg)
1824 raise error.ProgrammingError(msg)
1833 lfid = url_[len(_lfscheme) :]
1825 lfid = url_[len(_lfscheme) :]
1834 return storefactory.getlfile(ui, lfid)
1826 return storefactory.getlfile(ui, lfid)
1835 else:
1827 else:
1836 return orig(ui, url_, data=data)
1828 return orig(ui, url_, data=data)
@@ -1,2305 +1,2282 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import stat
12 import stat
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 modifiednodeid,
18 modifiednodeid,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .thirdparty import attr
22 from .thirdparty import attr
23 from . import (
23 from . import (
24 copies,
24 copies,
25 encoding,
25 encoding,
26 error,
26 error,
27 filemerge,
27 filemerge,
28 match as matchmod,
28 match as matchmod,
29 mergestate as mergestatemod,
29 mergestate as mergestatemod,
30 obsutil,
30 obsutil,
31 pathutil,
31 pathutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepoutil,
34 subrepoutil,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42
42
43 def _getcheckunknownconfig(repo, section, name):
43 def _getcheckunknownconfig(repo, section, name):
44 config = repo.ui.config(section, name)
44 config = repo.ui.config(section, name)
45 valid = [b'abort', b'ignore', b'warn']
45 valid = [b'abort', b'ignore', b'warn']
46 if config not in valid:
46 if config not in valid:
47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
48 raise error.ConfigError(
48 raise error.ConfigError(
49 _(b"%s.%s not valid ('%s' is none of %s)")
49 _(b"%s.%s not valid ('%s' is none of %s)")
50 % (section, name, config, validstr)
50 % (section, name, config, validstr)
51 )
51 )
52 return config
52 return config
53
53
54
54
55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
56 if wctx.isinmemory():
56 if wctx.isinmemory():
57 # Nothing to do in IMM because nothing in the "working copy" can be an
57 # Nothing to do in IMM because nothing in the "working copy" can be an
58 # unknown file.
58 # unknown file.
59 #
59 #
60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
61 # because that function does other useful work.
61 # because that function does other useful work.
62 return False
62 return False
63
63
64 if f2 is None:
64 if f2 is None:
65 f2 = f
65 f2 = f
66 return (
66 return (
67 repo.wvfs.audit.check(f)
67 repo.wvfs.audit.check(f)
68 and repo.wvfs.isfileorlink(f)
68 and repo.wvfs.isfileorlink(f)
69 and repo.dirstate.normalize(f) not in repo.dirstate
69 and repo.dirstate.normalize(f) not in repo.dirstate
70 and mctx[f2].cmp(wctx[f])
70 and mctx[f2].cmp(wctx[f])
71 )
71 )
72
72
73
73
74 class _unknowndirschecker(object):
74 class _unknowndirschecker(object):
75 """
75 """
76 Look for any unknown files or directories that may have a path conflict
76 Look for any unknown files or directories that may have a path conflict
77 with a file. If any path prefix of the file exists as a file or link,
77 with a file. If any path prefix of the file exists as a file or link,
78 then it conflicts. If the file itself is a directory that contains any
78 then it conflicts. If the file itself is a directory that contains any
79 file that is not tracked, then it conflicts.
79 file that is not tracked, then it conflicts.
80
80
81 Returns the shortest path at which a conflict occurs, or None if there is
81 Returns the shortest path at which a conflict occurs, or None if there is
82 no conflict.
82 no conflict.
83 """
83 """
84
84
85 def __init__(self):
85 def __init__(self):
86 # A set of paths known to be good. This prevents repeated checking of
86 # A set of paths known to be good. This prevents repeated checking of
87 # dirs. It will be updated with any new dirs that are checked and found
87 # dirs. It will be updated with any new dirs that are checked and found
88 # to be safe.
88 # to be safe.
89 self._unknowndircache = set()
89 self._unknowndircache = set()
90
90
91 # A set of paths that are known to be absent. This prevents repeated
91 # A set of paths that are known to be absent. This prevents repeated
92 # checking of subdirectories that are known not to exist. It will be
92 # checking of subdirectories that are known not to exist. It will be
93 # updated with any new dirs that are checked and found to be absent.
93 # updated with any new dirs that are checked and found to be absent.
94 self._missingdircache = set()
94 self._missingdircache = set()
95
95
96 def __call__(self, repo, wctx, f):
96 def __call__(self, repo, wctx, f):
97 if wctx.isinmemory():
97 if wctx.isinmemory():
98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
99 return False
99 return False
100
100
101 # Check for path prefixes that exist as unknown files.
101 # Check for path prefixes that exist as unknown files.
102 for p in reversed(list(pathutil.finddirs(f))):
102 for p in reversed(list(pathutil.finddirs(f))):
103 if p in self._missingdircache:
103 if p in self._missingdircache:
104 return
104 return
105 if p in self._unknowndircache:
105 if p in self._unknowndircache:
106 continue
106 continue
107 if repo.wvfs.audit.check(p):
107 if repo.wvfs.audit.check(p):
108 if (
108 if (
109 repo.wvfs.isfileorlink(p)
109 repo.wvfs.isfileorlink(p)
110 and repo.dirstate.normalize(p) not in repo.dirstate
110 and repo.dirstate.normalize(p) not in repo.dirstate
111 ):
111 ):
112 return p
112 return p
113 if not repo.wvfs.lexists(p):
113 if not repo.wvfs.lexists(p):
114 self._missingdircache.add(p)
114 self._missingdircache.add(p)
115 return
115 return
116 self._unknowndircache.add(p)
116 self._unknowndircache.add(p)
117
117
118 # Check if the file conflicts with a directory containing unknown files.
118 # Check if the file conflicts with a directory containing unknown files.
119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
120 # Does the directory contain any files that are not in the dirstate?
120 # Does the directory contain any files that are not in the dirstate?
121 for p, dirs, files in repo.wvfs.walk(f):
121 for p, dirs, files in repo.wvfs.walk(f):
122 for fn in files:
122 for fn in files:
123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
124 relf = repo.dirstate.normalize(relf, isknown=True)
124 relf = repo.dirstate.normalize(relf, isknown=True)
125 if relf not in repo.dirstate:
125 if relf not in repo.dirstate:
126 return f
126 return f
127 return None
127 return None
128
128
129
129
130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
131 """
131 """
132 Considers any actions that care about the presence of conflicting unknown
132 Considers any actions that care about the presence of conflicting unknown
133 files. For some actions, the result is to abort; for others, it is to
133 files. For some actions, the result is to abort; for others, it is to
134 choose a different action.
134 choose a different action.
135 """
135 """
136 fileconflicts = set()
136 fileconflicts = set()
137 pathconflicts = set()
137 pathconflicts = set()
138 warnconflicts = set()
138 warnconflicts = set()
139 abortconflicts = set()
139 abortconflicts = set()
140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
142 pathconfig = repo.ui.configbool(
142 pathconfig = repo.ui.configbool(
143 b'experimental', b'merge.checkpathconflicts'
143 b'experimental', b'merge.checkpathconflicts'
144 )
144 )
145 if not force:
145 if not force:
146
146
147 def collectconflicts(conflicts, config):
147 def collectconflicts(conflicts, config):
148 if config == b'abort':
148 if config == b'abort':
149 abortconflicts.update(conflicts)
149 abortconflicts.update(conflicts)
150 elif config == b'warn':
150 elif config == b'warn':
151 warnconflicts.update(conflicts)
151 warnconflicts.update(conflicts)
152
152
153 checkunknowndirs = _unknowndirschecker()
153 checkunknowndirs = _unknowndirschecker()
154 for f in mresult.files(
154 for f in mresult.files(
155 (
155 (
156 mergestatemod.ACTION_CREATED,
156 mergestatemod.ACTION_CREATED,
157 mergestatemod.ACTION_DELETED_CHANGED,
157 mergestatemod.ACTION_DELETED_CHANGED,
158 )
158 )
159 ):
159 ):
160 if _checkunknownfile(repo, wctx, mctx, f):
160 if _checkunknownfile(repo, wctx, mctx, f):
161 fileconflicts.add(f)
161 fileconflicts.add(f)
162 elif pathconfig and f not in wctx:
162 elif pathconfig and f not in wctx:
163 path = checkunknowndirs(repo, wctx, f)
163 path = checkunknowndirs(repo, wctx, f)
164 if path is not None:
164 if path is not None:
165 pathconflicts.add(path)
165 pathconflicts.add(path)
166 for f, args, msg in mresult.getactions(
166 for f, args, msg in mresult.getactions(
167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
168 ):
168 ):
169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
170 fileconflicts.add(f)
170 fileconflicts.add(f)
171
171
172 allconflicts = fileconflicts | pathconflicts
172 allconflicts = fileconflicts | pathconflicts
173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
174 unknownconflicts = allconflicts - ignoredconflicts
174 unknownconflicts = allconflicts - ignoredconflicts
175 collectconflicts(ignoredconflicts, ignoredconfig)
175 collectconflicts(ignoredconflicts, ignoredconfig)
176 collectconflicts(unknownconflicts, unknownconfig)
176 collectconflicts(unknownconflicts, unknownconfig)
177 else:
177 else:
178 for f, args, msg in list(
178 for f, args, msg in list(
179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
180 ):
180 ):
181 fl2, anc = args
181 fl2, anc = args
182 different = _checkunknownfile(repo, wctx, mctx, f)
182 different = _checkunknownfile(repo, wctx, mctx, f)
183 if repo.dirstate._ignore(f):
183 if repo.dirstate._ignore(f):
184 config = ignoredconfig
184 config = ignoredconfig
185 else:
185 else:
186 config = unknownconfig
186 config = unknownconfig
187
187
188 # The behavior when force is True is described by this table:
188 # The behavior when force is True is described by this table:
189 # config different mergeforce | action backup
189 # config different mergeforce | action backup
190 # * n * | get n
190 # * n * | get n
191 # * y y | merge -
191 # * y y | merge -
192 # abort y n | merge - (1)
192 # abort y n | merge - (1)
193 # warn y n | warn + get y
193 # warn y n | warn + get y
194 # ignore y n | get y
194 # ignore y n | get y
195 #
195 #
196 # (1) this is probably the wrong behavior here -- we should
196 # (1) this is probably the wrong behavior here -- we should
197 # probably abort, but some actions like rebases currently
197 # probably abort, but some actions like rebases currently
198 # don't like an abort happening in the middle of
198 # don't like an abort happening in the middle of
199 # merge.update.
199 # merge.update.
200 if not different:
200 if not different:
201 mresult.addfile(
201 mresult.addfile(
202 f,
202 f,
203 mergestatemod.ACTION_GET,
203 mergestatemod.ACTION_GET,
204 (fl2, False),
204 (fl2, False),
205 b'remote created',
205 b'remote created',
206 )
206 )
207 elif mergeforce or config == b'abort':
207 elif mergeforce or config == b'abort':
208 mresult.addfile(
208 mresult.addfile(
209 f,
209 f,
210 mergestatemod.ACTION_MERGE,
210 mergestatemod.ACTION_MERGE,
211 (f, f, None, False, anc),
211 (f, f, None, False, anc),
212 b'remote differs from untracked local',
212 b'remote differs from untracked local',
213 )
213 )
214 elif config == b'abort':
214 elif config == b'abort':
215 abortconflicts.add(f)
215 abortconflicts.add(f)
216 else:
216 else:
217 if config == b'warn':
217 if config == b'warn':
218 warnconflicts.add(f)
218 warnconflicts.add(f)
219 mresult.addfile(
219 mresult.addfile(
220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
221 )
221 )
222
222
223 for f in sorted(abortconflicts):
223 for f in sorted(abortconflicts):
224 warn = repo.ui.warn
224 warn = repo.ui.warn
225 if f in pathconflicts:
225 if f in pathconflicts:
226 if repo.wvfs.isfileorlink(f):
226 if repo.wvfs.isfileorlink(f):
227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
228 else:
228 else:
229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
230 else:
230 else:
231 warn(_(b"%s: untracked file differs\n") % f)
231 warn(_(b"%s: untracked file differs\n") % f)
232 if abortconflicts:
232 if abortconflicts:
233 raise error.Abort(
233 raise error.Abort(
234 _(
234 _(
235 b"untracked files in working directory "
235 b"untracked files in working directory "
236 b"differ from files in requested revision"
236 b"differ from files in requested revision"
237 )
237 )
238 )
238 )
239
239
240 for f in sorted(warnconflicts):
240 for f in sorted(warnconflicts):
241 if repo.wvfs.isfileorlink(f):
241 if repo.wvfs.isfileorlink(f):
242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
243 else:
243 else:
244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
245
245
246 for f, args, msg in list(
246 for f, args, msg in list(
247 mresult.getactions([mergestatemod.ACTION_CREATED])
247 mresult.getactions([mergestatemod.ACTION_CREATED])
248 ):
248 ):
249 backup = (
249 backup = (
250 f in fileconflicts
250 f in fileconflicts
251 or f in pathconflicts
251 or f in pathconflicts
252 or any(p in pathconflicts for p in pathutil.finddirs(f))
252 or any(p in pathconflicts for p in pathutil.finddirs(f))
253 )
253 )
254 (flags,) = args
254 (flags,) = args
255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
256
256
257
257
258 def _forgetremoved(wctx, mctx, branchmerge, mresult):
258 def _forgetremoved(wctx, mctx, branchmerge, mresult):
259 """
259 """
260 Forget removed files
260 Forget removed files
261
261
262 If we're jumping between revisions (as opposed to merging), and if
262 If we're jumping between revisions (as opposed to merging), and if
263 neither the working directory nor the target rev has the file,
263 neither the working directory nor the target rev has the file,
264 then we need to remove it from the dirstate, to prevent the
264 then we need to remove it from the dirstate, to prevent the
265 dirstate from listing the file when it is no longer in the
265 dirstate from listing the file when it is no longer in the
266 manifest.
266 manifest.
267
267
268 If we're merging, and the other revision has removed a file
268 If we're merging, and the other revision has removed a file
269 that is not present in the working directory, we need to mark it
269 that is not present in the working directory, we need to mark it
270 as removed.
270 as removed.
271 """
271 """
272
272
273 m = mergestatemod.ACTION_FORGET
273 m = mergestatemod.ACTION_FORGET
274 if branchmerge:
274 if branchmerge:
275 m = mergestatemod.ACTION_REMOVE
275 m = mergestatemod.ACTION_REMOVE
276 for f in wctx.deleted():
276 for f in wctx.deleted():
277 if f not in mctx:
277 if f not in mctx:
278 mresult.addfile(f, m, None, b"forget deleted")
278 mresult.addfile(f, m, None, b"forget deleted")
279
279
280 if not branchmerge:
280 if not branchmerge:
281 for f in wctx.removed():
281 for f in wctx.removed():
282 if f not in mctx:
282 if f not in mctx:
283 mresult.addfile(
283 mresult.addfile(
284 f, mergestatemod.ACTION_FORGET, None, b"forget removed",
284 f, mergestatemod.ACTION_FORGET, None, b"forget removed",
285 )
285 )
286
286
287
287
288 def _checkcollision(repo, wmf, mresult):
288 def _checkcollision(repo, wmf, mresult):
289 """
289 """
290 Check for case-folding collisions.
290 Check for case-folding collisions.
291 """
291 """
292 # If the repo is narrowed, filter out files outside the narrowspec.
292 # If the repo is narrowed, filter out files outside the narrowspec.
293 narrowmatch = repo.narrowmatch()
293 narrowmatch = repo.narrowmatch()
294 if not narrowmatch.always():
294 if not narrowmatch.always():
295 pmmf = set(wmf.walk(narrowmatch))
295 pmmf = set(wmf.walk(narrowmatch))
296 if mresult:
296 if mresult:
297 for f in list(mresult.files()):
297 for f in list(mresult.files()):
298 if not narrowmatch(f):
298 if not narrowmatch(f):
299 mresult.removefile(f)
299 mresult.removefile(f)
300 else:
300 else:
301 # build provisional merged manifest up
301 # build provisional merged manifest up
302 pmmf = set(wmf)
302 pmmf = set(wmf)
303
303
304 if mresult:
304 if mresult:
305 # KEEP and EXEC are no-op
305 # KEEP and EXEC are no-op
306 for f in mresult.files(
306 for f in mresult.files(
307 (
307 (
308 mergestatemod.ACTION_ADD,
308 mergestatemod.ACTION_ADD,
309 mergestatemod.ACTION_ADD_MODIFIED,
309 mergestatemod.ACTION_ADD_MODIFIED,
310 mergestatemod.ACTION_FORGET,
310 mergestatemod.ACTION_FORGET,
311 mergestatemod.ACTION_GET,
311 mergestatemod.ACTION_GET,
312 mergestatemod.ACTION_CHANGED_DELETED,
312 mergestatemod.ACTION_CHANGED_DELETED,
313 mergestatemod.ACTION_DELETED_CHANGED,
313 mergestatemod.ACTION_DELETED_CHANGED,
314 )
314 )
315 ):
315 ):
316 pmmf.add(f)
316 pmmf.add(f)
317 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
317 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
318 pmmf.discard(f)
318 pmmf.discard(f)
319 for f, args, msg in mresult.getactions(
319 for f, args, msg in mresult.getactions(
320 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
320 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
321 ):
321 ):
322 f2, flags = args
322 f2, flags = args
323 pmmf.discard(f2)
323 pmmf.discard(f2)
324 pmmf.add(f)
324 pmmf.add(f)
325 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
325 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
326 pmmf.add(f)
326 pmmf.add(f)
327 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
327 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
328 f1, f2, fa, move, anc = args
328 f1, f2, fa, move, anc = args
329 if move:
329 if move:
330 pmmf.discard(f1)
330 pmmf.discard(f1)
331 pmmf.add(f)
331 pmmf.add(f)
332
332
333 # check case-folding collision in provisional merged manifest
333 # check case-folding collision in provisional merged manifest
334 foldmap = {}
334 foldmap = {}
335 for f in pmmf:
335 for f in pmmf:
336 fold = util.normcase(f)
336 fold = util.normcase(f)
337 if fold in foldmap:
337 if fold in foldmap:
338 raise error.Abort(
338 raise error.Abort(
339 _(b"case-folding collision between %s and %s")
339 _(b"case-folding collision between %s and %s")
340 % (f, foldmap[fold])
340 % (f, foldmap[fold])
341 )
341 )
342 foldmap[fold] = f
342 foldmap[fold] = f
343
343
344 # check case-folding of directories
344 # check case-folding of directories
345 foldprefix = unfoldprefix = lastfull = b''
345 foldprefix = unfoldprefix = lastfull = b''
346 for fold, f in sorted(foldmap.items()):
346 for fold, f in sorted(foldmap.items()):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 # the folded prefix matches but actual casing is different
348 # the folded prefix matches but actual casing is different
349 raise error.Abort(
349 raise error.Abort(
350 _(b"case-folding collision between %s and directory of %s")
350 _(b"case-folding collision between %s and directory of %s")
351 % (lastfull, f)
351 % (lastfull, f)
352 )
352 )
353 foldprefix = fold + b'/'
353 foldprefix = fold + b'/'
354 unfoldprefix = f + b'/'
354 unfoldprefix = f + b'/'
355 lastfull = f
355 lastfull = f
356
356
357
357
358 def driverpreprocess(repo, ms, wctx, labels=None):
358 def driverpreprocess(repo, ms, wctx, labels=None):
359 """run the preprocess step of the merge driver, if any
359 """run the preprocess step of the merge driver, if any
360
360
361 This is currently not implemented -- it's an extension point."""
361 This is currently not implemented -- it's an extension point."""
362 return True
362 return True
363
363
364
364
365 def driverconclude(repo, ms, wctx, labels=None):
365 def driverconclude(repo, ms, wctx, labels=None):
366 """run the conclude step of the merge driver, if any
366 """run the conclude step of the merge driver, if any
367
367
368 This is currently not implemented -- it's an extension point."""
368 This is currently not implemented -- it's an extension point."""
369 return True
369 return True
370
370
371
371
372 def _filesindirs(repo, manifest, dirs):
372 def _filesindirs(repo, manifest, dirs):
373 """
373 """
374 Generator that yields pairs of all the files in the manifest that are found
374 Generator that yields pairs of all the files in the manifest that are found
375 inside the directories listed in dirs, and which directory they are found
375 inside the directories listed in dirs, and which directory they are found
376 in.
376 in.
377 """
377 """
378 for f in manifest:
378 for f in manifest:
379 for p in pathutil.finddirs(f):
379 for p in pathutil.finddirs(f):
380 if p in dirs:
380 if p in dirs:
381 yield f, p
381 yield f, p
382 break
382 break
383
383
384
384
385 def checkpathconflicts(repo, wctx, mctx, mresult):
385 def checkpathconflicts(repo, wctx, mctx, mresult):
386 """
386 """
387 Check if any actions introduce path conflicts in the repository, updating
387 Check if any actions introduce path conflicts in the repository, updating
388 actions to record or handle the path conflict accordingly.
388 actions to record or handle the path conflict accordingly.
389 """
389 """
390 mf = wctx.manifest()
390 mf = wctx.manifest()
391
391
392 # The set of local files that conflict with a remote directory.
392 # The set of local files that conflict with a remote directory.
393 localconflicts = set()
393 localconflicts = set()
394
394
395 # The set of directories that conflict with a remote file, and so may cause
395 # The set of directories that conflict with a remote file, and so may cause
396 # conflicts if they still contain any files after the merge.
396 # conflicts if they still contain any files after the merge.
397 remoteconflicts = set()
397 remoteconflicts = set()
398
398
399 # The set of directories that appear as both a file and a directory in the
399 # The set of directories that appear as both a file and a directory in the
400 # remote manifest. These indicate an invalid remote manifest, which
400 # remote manifest. These indicate an invalid remote manifest, which
401 # can't be updated to cleanly.
401 # can't be updated to cleanly.
402 invalidconflicts = set()
402 invalidconflicts = set()
403
403
404 # The set of directories that contain files that are being created.
404 # The set of directories that contain files that are being created.
405 createdfiledirs = set()
405 createdfiledirs = set()
406
406
407 # The set of files deleted by all the actions.
407 # The set of files deleted by all the actions.
408 deletedfiles = set()
408 deletedfiles = set()
409
409
410 for f in mresult.files(
410 for f in mresult.files(
411 (
411 (
412 mergestatemod.ACTION_CREATED,
412 mergestatemod.ACTION_CREATED,
413 mergestatemod.ACTION_DELETED_CHANGED,
413 mergestatemod.ACTION_DELETED_CHANGED,
414 mergestatemod.ACTION_MERGE,
414 mergestatemod.ACTION_MERGE,
415 mergestatemod.ACTION_CREATED_MERGE,
415 mergestatemod.ACTION_CREATED_MERGE,
416 )
416 )
417 ):
417 ):
418 # This action may create a new local file.
418 # This action may create a new local file.
419 createdfiledirs.update(pathutil.finddirs(f))
419 createdfiledirs.update(pathutil.finddirs(f))
420 if mf.hasdir(f):
420 if mf.hasdir(f):
421 # The file aliases a local directory. This might be ok if all
421 # The file aliases a local directory. This might be ok if all
422 # the files in the local directory are being deleted. This
422 # the files in the local directory are being deleted. This
423 # will be checked once we know what all the deleted files are.
423 # will be checked once we know what all the deleted files are.
424 remoteconflicts.add(f)
424 remoteconflicts.add(f)
425 # Track the names of all deleted files.
425 # Track the names of all deleted files.
426 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
426 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
427 deletedfiles.add(f)
427 deletedfiles.add(f)
428 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
428 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
429 f1, f2, fa, move, anc = args
429 f1, f2, fa, move, anc = args
430 if move:
430 if move:
431 deletedfiles.add(f1)
431 deletedfiles.add(f1)
432 for (f, args, msg) in mresult.getactions(
432 for (f, args, msg) in mresult.getactions(
433 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
433 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
434 ):
434 ):
435 f2, flags = args
435 f2, flags = args
436 deletedfiles.add(f2)
436 deletedfiles.add(f2)
437
437
438 # Check all directories that contain created files for path conflicts.
438 # Check all directories that contain created files for path conflicts.
439 for p in createdfiledirs:
439 for p in createdfiledirs:
440 if p in mf:
440 if p in mf:
441 if p in mctx:
441 if p in mctx:
442 # A file is in a directory which aliases both a local
442 # A file is in a directory which aliases both a local
443 # and a remote file. This is an internal inconsistency
443 # and a remote file. This is an internal inconsistency
444 # within the remote manifest.
444 # within the remote manifest.
445 invalidconflicts.add(p)
445 invalidconflicts.add(p)
446 else:
446 else:
447 # A file is in a directory which aliases a local file.
447 # A file is in a directory which aliases a local file.
448 # We will need to rename the local file.
448 # We will need to rename the local file.
449 localconflicts.add(p)
449 localconflicts.add(p)
450 pd = mresult.getfile(p)
450 pd = mresult.getfile(p)
451 if pd and pd[0] in (
451 if pd and pd[0] in (
452 mergestatemod.ACTION_CREATED,
452 mergestatemod.ACTION_CREATED,
453 mergestatemod.ACTION_DELETED_CHANGED,
453 mergestatemod.ACTION_DELETED_CHANGED,
454 mergestatemod.ACTION_MERGE,
454 mergestatemod.ACTION_MERGE,
455 mergestatemod.ACTION_CREATED_MERGE,
455 mergestatemod.ACTION_CREATED_MERGE,
456 ):
456 ):
457 # The file is in a directory which aliases a remote file.
457 # The file is in a directory which aliases a remote file.
458 # This is an internal inconsistency within the remote
458 # This is an internal inconsistency within the remote
459 # manifest.
459 # manifest.
460 invalidconflicts.add(p)
460 invalidconflicts.add(p)
461
461
462 # Rename all local conflicting files that have not been deleted.
462 # Rename all local conflicting files that have not been deleted.
463 for p in localconflicts:
463 for p in localconflicts:
464 if p not in deletedfiles:
464 if p not in deletedfiles:
465 ctxname = bytes(wctx).rstrip(b'+')
465 ctxname = bytes(wctx).rstrip(b'+')
466 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
466 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
467 porig = wctx[p].copysource() or p
467 porig = wctx[p].copysource() or p
468 mresult.addfile(
468 mresult.addfile(
469 pnew,
469 pnew,
470 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
470 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
471 (p, porig),
471 (p, porig),
472 b'local path conflict',
472 b'local path conflict',
473 )
473 )
474 mresult.addfile(
474 mresult.addfile(
475 p,
475 p,
476 mergestatemod.ACTION_PATH_CONFLICT,
476 mergestatemod.ACTION_PATH_CONFLICT,
477 (pnew, b'l'),
477 (pnew, b'l'),
478 b'path conflict',
478 b'path conflict',
479 )
479 )
480
480
481 if remoteconflicts:
481 if remoteconflicts:
482 # Check if all files in the conflicting directories have been removed.
482 # Check if all files in the conflicting directories have been removed.
483 ctxname = bytes(mctx).rstrip(b'+')
483 ctxname = bytes(mctx).rstrip(b'+')
484 for f, p in _filesindirs(repo, mf, remoteconflicts):
484 for f, p in _filesindirs(repo, mf, remoteconflicts):
485 if f not in deletedfiles:
485 if f not in deletedfiles:
486 m, args, msg = mresult.getfile(p)
486 m, args, msg = mresult.getfile(p)
487 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
487 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
488 if m in (
488 if m in (
489 mergestatemod.ACTION_DELETED_CHANGED,
489 mergestatemod.ACTION_DELETED_CHANGED,
490 mergestatemod.ACTION_MERGE,
490 mergestatemod.ACTION_MERGE,
491 ):
491 ):
492 # Action was merge, just update target.
492 # Action was merge, just update target.
493 mresult.addfile(pnew, m, args, msg)
493 mresult.addfile(pnew, m, args, msg)
494 else:
494 else:
495 # Action was create, change to renamed get action.
495 # Action was create, change to renamed get action.
496 fl = args[0]
496 fl = args[0]
497 mresult.addfile(
497 mresult.addfile(
498 pnew,
498 pnew,
499 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
499 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
500 (p, fl),
500 (p, fl),
501 b'remote path conflict',
501 b'remote path conflict',
502 )
502 )
503 mresult.addfile(
503 mresult.addfile(
504 p,
504 p,
505 mergestatemod.ACTION_PATH_CONFLICT,
505 mergestatemod.ACTION_PATH_CONFLICT,
506 (pnew, mergestatemod.ACTION_REMOVE),
506 (pnew, mergestatemod.ACTION_REMOVE),
507 b'path conflict',
507 b'path conflict',
508 )
508 )
509 remoteconflicts.remove(p)
509 remoteconflicts.remove(p)
510 break
510 break
511
511
512 if invalidconflicts:
512 if invalidconflicts:
513 for p in invalidconflicts:
513 for p in invalidconflicts:
514 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
514 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
515 raise error.Abort(_(b"destination manifest contains path conflicts"))
515 raise error.Abort(_(b"destination manifest contains path conflicts"))
516
516
517
517
518 def _filternarrowactions(narrowmatch, branchmerge, mresult):
518 def _filternarrowactions(narrowmatch, branchmerge, mresult):
519 """
519 """
520 Filters out actions that can ignored because the repo is narrowed.
520 Filters out actions that can ignored because the repo is narrowed.
521
521
522 Raise an exception if the merge cannot be completed because the repo is
522 Raise an exception if the merge cannot be completed because the repo is
523 narrowed.
523 narrowed.
524 """
524 """
525 # TODO: handle with nonconflicttypes
525 # TODO: handle with nonconflicttypes
526 nooptypes = {mergestatemod.ACTION_KEEP}
526 nooptypes = {mergestatemod.ACTION_KEEP}
527 nonconflicttypes = {
527 nonconflicttypes = {
528 mergestatemod.ACTION_ADD,
528 mergestatemod.ACTION_ADD,
529 mergestatemod.ACTION_ADD_MODIFIED,
529 mergestatemod.ACTION_ADD_MODIFIED,
530 mergestatemod.ACTION_CREATED,
530 mergestatemod.ACTION_CREATED,
531 mergestatemod.ACTION_CREATED_MERGE,
531 mergestatemod.ACTION_CREATED_MERGE,
532 mergestatemod.ACTION_FORGET,
532 mergestatemod.ACTION_FORGET,
533 mergestatemod.ACTION_GET,
533 mergestatemod.ACTION_GET,
534 mergestatemod.ACTION_REMOVE,
534 mergestatemod.ACTION_REMOVE,
535 mergestatemod.ACTION_EXEC,
535 mergestatemod.ACTION_EXEC,
536 }
536 }
537 # We mutate the items in the dict during iteration, so iterate
537 # We mutate the items in the dict during iteration, so iterate
538 # over a copy.
538 # over a copy.
539 for f, action in mresult.filemap():
539 for f, action in mresult.filemap():
540 if narrowmatch(f):
540 if narrowmatch(f):
541 pass
541 pass
542 elif not branchmerge:
542 elif not branchmerge:
543 mresult.removefile(f) # just updating, ignore changes outside clone
543 mresult.removefile(f) # just updating, ignore changes outside clone
544 elif action[0] in nooptypes:
544 elif action[0] in nooptypes:
545 mresult.removefile(f) # merge does not affect file
545 mresult.removefile(f) # merge does not affect file
546 elif action[0] in nonconflicttypes:
546 elif action[0] in nonconflicttypes:
547 raise error.Abort(
547 raise error.Abort(
548 _(
548 _(
549 b'merge affects file \'%s\' outside narrow, '
549 b'merge affects file \'%s\' outside narrow, '
550 b'which is not yet supported'
550 b'which is not yet supported'
551 )
551 )
552 % f,
552 % f,
553 hint=_(b'merging in the other direction may work'),
553 hint=_(b'merging in the other direction may work'),
554 )
554 )
555 else:
555 else:
556 raise error.Abort(
556 raise error.Abort(
557 _(b'conflict in file \'%s\' is outside narrow clone') % f
557 _(b'conflict in file \'%s\' is outside narrow clone') % f
558 )
558 )
559
559
560
560
561 class mergeresult(object):
561 class mergeresult(object):
562 ''''An object representing result of merging manifests.
562 ''''An object representing result of merging manifests.
563
563
564 It has information about what actions need to be performed on dirstate
564 It has information about what actions need to be performed on dirstate
565 mapping of divergent renames and other such cases. '''
565 mapping of divergent renames and other such cases. '''
566
566
567 def __init__(self):
567 def __init__(self):
568 """
568 """
569 filemapping: dict of filename as keys and action related info as values
569 filemapping: dict of filename as keys and action related info as values
570 diverge: mapping of source name -> list of dest name for
570 diverge: mapping of source name -> list of dest name for
571 divergent renames
571 divergent renames
572 renamedelete: mapping of source name -> list of destinations for files
572 renamedelete: mapping of source name -> list of destinations for files
573 deleted on one side and renamed on other.
573 deleted on one side and renamed on other.
574 commitinfo: dict containing data which should be used on commit
574 commitinfo: dict containing data which should be used on commit
575 contains a filename -> info mapping
575 contains a filename -> info mapping
576 actionmapping: dict of action names as keys and values are dict of
576 actionmapping: dict of action names as keys and values are dict of
577 filename as key and related data as values
577 filename as key and related data as values
578 """
578 """
579 self._filemapping = {}
579 self._filemapping = {}
580 self._diverge = {}
580 self._diverge = {}
581 self._renamedelete = {}
581 self._renamedelete = {}
582 self._commitinfo = {}
582 self._commitinfo = {}
583 self._actionmapping = collections.defaultdict(dict)
583 self._actionmapping = collections.defaultdict(dict)
584
584
585 def updatevalues(self, diverge, renamedelete, commitinfo):
585 def updatevalues(self, diverge, renamedelete, commitinfo):
586 self._diverge = diverge
586 self._diverge = diverge
587 self._renamedelete = renamedelete
587 self._renamedelete = renamedelete
588 self._commitinfo = commitinfo
588 self._commitinfo = commitinfo
589
589
590 def addfile(self, filename, action, data, message):
590 def addfile(self, filename, action, data, message):
591 """ adds a new file to the mergeresult object
591 """ adds a new file to the mergeresult object
592
592
593 filename: file which we are adding
593 filename: file which we are adding
594 action: one of mergestatemod.ACTION_*
594 action: one of mergestatemod.ACTION_*
595 data: a tuple of information like fctx and ctx related to this merge
595 data: a tuple of information like fctx and ctx related to this merge
596 message: a message about the merge
596 message: a message about the merge
597 """
597 """
598 # if the file already existed, we need to delete it's old
598 # if the file already existed, we need to delete it's old
599 # entry form _actionmapping too
599 # entry form _actionmapping too
600 if filename in self._filemapping:
600 if filename in self._filemapping:
601 a, d, m = self._filemapping[filename]
601 a, d, m = self._filemapping[filename]
602 del self._actionmapping[a][filename]
602 del self._actionmapping[a][filename]
603
603
604 self._filemapping[filename] = (action, data, message)
604 self._filemapping[filename] = (action, data, message)
605 self._actionmapping[action][filename] = (data, message)
605 self._actionmapping[action][filename] = (data, message)
606
606
607 def getfile(self, filename, default_return=None):
607 def getfile(self, filename, default_return=None):
608 """ returns (action, args, msg) about this file
608 """ returns (action, args, msg) about this file
609
609
610 returns default_return if the file is not present """
610 returns default_return if the file is not present """
611 if filename in self._filemapping:
611 if filename in self._filemapping:
612 return self._filemapping[filename]
612 return self._filemapping[filename]
613 return default_return
613 return default_return
614
614
615 def files(self, actions=None):
615 def files(self, actions=None):
616 """ returns files on which provided action needs to perfromed
616 """ returns files on which provided action needs to perfromed
617
617
618 If actions is None, all files are returned
618 If actions is None, all files are returned
619 """
619 """
620 # TODO: think whether we should return renamedelete and
620 # TODO: think whether we should return renamedelete and
621 # diverge filenames also
621 # diverge filenames also
622 if actions is None:
622 if actions is None:
623 for f in self._filemapping:
623 for f in self._filemapping:
624 yield f
624 yield f
625
625
626 else:
626 else:
627 for a in actions:
627 for a in actions:
628 for f in self._actionmapping[a]:
628 for f in self._actionmapping[a]:
629 yield f
629 yield f
630
630
631 def removefile(self, filename):
631 def removefile(self, filename):
632 """ removes a file from the mergeresult object as the file might
632 """ removes a file from the mergeresult object as the file might
633 not merging anymore """
633 not merging anymore """
634 action, data, message = self._filemapping[filename]
634 action, data, message = self._filemapping[filename]
635 del self._filemapping[filename]
635 del self._filemapping[filename]
636 del self._actionmapping[action][filename]
636 del self._actionmapping[action][filename]
637
637
638 def getactions(self, actions, sort=False):
638 def getactions(self, actions, sort=False):
639 """ get list of files which are marked with these actions
639 """ get list of files which are marked with these actions
640 if sort is true, files for each action is sorted and then added
640 if sort is true, files for each action is sorted and then added
641
641
642 Returns a list of tuple of form (filename, data, message)
642 Returns a list of tuple of form (filename, data, message)
643 """
643 """
644 for a in actions:
644 for a in actions:
645 if sort:
645 if sort:
646 for f in sorted(self._actionmapping[a]):
646 for f in sorted(self._actionmapping[a]):
647 args, msg = self._actionmapping[a][f]
647 args, msg = self._actionmapping[a][f]
648 yield f, args, msg
648 yield f, args, msg
649 else:
649 else:
650 for f, (args, msg) in pycompat.iteritems(
650 for f, (args, msg) in pycompat.iteritems(
651 self._actionmapping[a]
651 self._actionmapping[a]
652 ):
652 ):
653 yield f, args, msg
653 yield f, args, msg
654
654
655 def len(self, actions=None):
655 def len(self, actions=None):
656 """ returns number of files which needs actions
656 """ returns number of files which needs actions
657
657
658 if actions is passed, total of number of files in that action
658 if actions is passed, total of number of files in that action
659 only is returned """
659 only is returned """
660
660
661 if actions is None:
661 if actions is None:
662 return len(self._filemapping)
662 return len(self._filemapping)
663
663
664 return sum(len(self._actionmapping[a]) for a in actions)
664 return sum(len(self._actionmapping[a]) for a in actions)
665
665
666 def filemap(self, sort=False):
666 def filemap(self, sort=False):
667 if sorted:
667 if sorted:
668 for key, val in sorted(pycompat.iteritems(self._filemapping)):
668 for key, val in sorted(pycompat.iteritems(self._filemapping)):
669 yield key, val
669 yield key, val
670 else:
670 else:
671 for key, val in pycompat.iteritems(self._filemapping):
671 for key, val in pycompat.iteritems(self._filemapping):
672 yield key, val
672 yield key, val
673
673
674 @property
674 @property
675 def diverge(self):
675 def diverge(self):
676 return self._diverge
676 return self._diverge
677
677
678 @property
678 @property
679 def renamedelete(self):
679 def renamedelete(self):
680 return self._renamedelete
680 return self._renamedelete
681
681
682 @property
682 @property
683 def commitinfo(self):
683 def commitinfo(self):
684 return self._commitinfo
684 return self._commitinfo
685
685
686 @property
686 @property
687 def actionsdict(self):
687 def actionsdict(self):
688 """ returns a dictionary of actions to be perfomed with action as key
688 """ returns a dictionary of actions to be perfomed with action as key
689 and a list of files and related arguments as values """
689 and a list of files and related arguments as values """
690 res = emptyactions()
690 res = collections.defaultdict(list)
691 for a, d in pycompat.iteritems(self._actionmapping):
691 for a, d in pycompat.iteritems(self._actionmapping):
692 for f, (args, msg) in pycompat.iteritems(d):
692 for f, (args, msg) in pycompat.iteritems(d):
693 res[a].append((f, args, msg))
693 res[a].append((f, args, msg))
694 return res
694 return res
695
695
696 def setactions(self, actions):
696 def setactions(self, actions):
697 self._filemapping = actions
697 self._filemapping = actions
698 self._actionmapping = collections.defaultdict(dict)
698 self._actionmapping = collections.defaultdict(dict)
699 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
699 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
700 self._actionmapping[act][f] = data, msg
700 self._actionmapping[act][f] = data, msg
701
701
702 def hasconflicts(self):
702 def hasconflicts(self):
703 """ tells whether this merge resulted in some actions which can
703 """ tells whether this merge resulted in some actions which can
704 result in conflicts or not """
704 result in conflicts or not """
705 for a in self._actionmapping.keys():
705 for a in self._actionmapping.keys():
706 if (
706 if (
707 a
707 a
708 not in (
708 not in (
709 mergestatemod.ACTION_GET,
709 mergestatemod.ACTION_GET,
710 mergestatemod.ACTION_KEEP,
710 mergestatemod.ACTION_KEEP,
711 mergestatemod.ACTION_EXEC,
711 mergestatemod.ACTION_EXEC,
712 mergestatemod.ACTION_REMOVE,
712 mergestatemod.ACTION_REMOVE,
713 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
713 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
714 )
714 )
715 and self._actionmapping[a]
715 and self._actionmapping[a]
716 ):
716 ):
717 return True
717 return True
718
718
719 return False
719 return False
720
720
721
721
722 def manifestmerge(
722 def manifestmerge(
723 repo,
723 repo,
724 wctx,
724 wctx,
725 p2,
725 p2,
726 pa,
726 pa,
727 branchmerge,
727 branchmerge,
728 force,
728 force,
729 matcher,
729 matcher,
730 acceptremote,
730 acceptremote,
731 followcopies,
731 followcopies,
732 forcefulldiff=False,
732 forcefulldiff=False,
733 ):
733 ):
734 """
734 """
735 Merge wctx and p2 with ancestor pa and generate merge action list
735 Merge wctx and p2 with ancestor pa and generate merge action list
736
736
737 branchmerge and force are as passed in to update
737 branchmerge and force are as passed in to update
738 matcher = matcher to filter file lists
738 matcher = matcher to filter file lists
739 acceptremote = accept the incoming changes without prompting
739 acceptremote = accept the incoming changes without prompting
740
740
741 Returns an object of mergeresult class
741 Returns an object of mergeresult class
742 """
742 """
743 mresult = mergeresult()
743 mresult = mergeresult()
744 if matcher is not None and matcher.always():
744 if matcher is not None and matcher.always():
745 matcher = None
745 matcher = None
746
746
747 # manifests fetched in order are going to be faster, so prime the caches
747 # manifests fetched in order are going to be faster, so prime the caches
748 [
748 [
749 x.manifest()
749 x.manifest()
750 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
750 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
751 ]
751 ]
752
752
753 branch_copies1 = copies.branch_copies()
753 branch_copies1 = copies.branch_copies()
754 branch_copies2 = copies.branch_copies()
754 branch_copies2 = copies.branch_copies()
755 diverge = {}
755 diverge = {}
756 # information from merge which is needed at commit time
756 # information from merge which is needed at commit time
757 # for example choosing filelog of which parent to commit
757 # for example choosing filelog of which parent to commit
758 # TODO: use specific constants in future for this mapping
758 # TODO: use specific constants in future for this mapping
759 commitinfo = {}
759 commitinfo = {}
760 if followcopies:
760 if followcopies:
761 branch_copies1, branch_copies2, diverge = copies.mergecopies(
761 branch_copies1, branch_copies2, diverge = copies.mergecopies(
762 repo, wctx, p2, pa
762 repo, wctx, p2, pa
763 )
763 )
764
764
765 boolbm = pycompat.bytestr(bool(branchmerge))
765 boolbm = pycompat.bytestr(bool(branchmerge))
766 boolf = pycompat.bytestr(bool(force))
766 boolf = pycompat.bytestr(bool(force))
767 boolm = pycompat.bytestr(bool(matcher))
767 boolm = pycompat.bytestr(bool(matcher))
768 repo.ui.note(_(b"resolving manifests\n"))
768 repo.ui.note(_(b"resolving manifests\n"))
769 repo.ui.debug(
769 repo.ui.debug(
770 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
770 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
771 )
771 )
772 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
772 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
773
773
774 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
774 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
775 copied1 = set(branch_copies1.copy.values())
775 copied1 = set(branch_copies1.copy.values())
776 copied1.update(branch_copies1.movewithdir.values())
776 copied1.update(branch_copies1.movewithdir.values())
777 copied2 = set(branch_copies2.copy.values())
777 copied2 = set(branch_copies2.copy.values())
778 copied2.update(branch_copies2.movewithdir.values())
778 copied2.update(branch_copies2.movewithdir.values())
779
779
780 if b'.hgsubstate' in m1 and wctx.rev() is None:
780 if b'.hgsubstate' in m1 and wctx.rev() is None:
781 # Check whether sub state is modified, and overwrite the manifest
781 # Check whether sub state is modified, and overwrite the manifest
782 # to flag the change. If wctx is a committed revision, we shouldn't
782 # to flag the change. If wctx is a committed revision, we shouldn't
783 # care for the dirty state of the working directory.
783 # care for the dirty state of the working directory.
784 if any(wctx.sub(s).dirty() for s in wctx.substate):
784 if any(wctx.sub(s).dirty() for s in wctx.substate):
785 m1[b'.hgsubstate'] = modifiednodeid
785 m1[b'.hgsubstate'] = modifiednodeid
786
786
787 # Don't use m2-vs-ma optimization if:
787 # Don't use m2-vs-ma optimization if:
788 # - ma is the same as m1 or m2, which we're just going to diff again later
788 # - ma is the same as m1 or m2, which we're just going to diff again later
789 # - The caller specifically asks for a full diff, which is useful during bid
789 # - The caller specifically asks for a full diff, which is useful during bid
790 # merge.
790 # merge.
791 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
791 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
792 # Identify which files are relevant to the merge, so we can limit the
792 # Identify which files are relevant to the merge, so we can limit the
793 # total m1-vs-m2 diff to just those files. This has significant
793 # total m1-vs-m2 diff to just those files. This has significant
794 # performance benefits in large repositories.
794 # performance benefits in large repositories.
795 relevantfiles = set(ma.diff(m2).keys())
795 relevantfiles = set(ma.diff(m2).keys())
796
796
797 # For copied and moved files, we need to add the source file too.
797 # For copied and moved files, we need to add the source file too.
798 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
798 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
799 if copyvalue in relevantfiles:
799 if copyvalue in relevantfiles:
800 relevantfiles.add(copykey)
800 relevantfiles.add(copykey)
801 for movedirkey in branch_copies1.movewithdir:
801 for movedirkey in branch_copies1.movewithdir:
802 relevantfiles.add(movedirkey)
802 relevantfiles.add(movedirkey)
803 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
803 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
804 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
804 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
805
805
806 diff = m1.diff(m2, match=matcher)
806 diff = m1.diff(m2, match=matcher)
807
807
808 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
808 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
809 if n1 and n2: # file exists on both local and remote side
809 if n1 and n2: # file exists on both local and remote side
810 if f not in ma:
810 if f not in ma:
811 # TODO: what if they're renamed from different sources?
811 # TODO: what if they're renamed from different sources?
812 fa = branch_copies1.copy.get(
812 fa = branch_copies1.copy.get(
813 f, None
813 f, None
814 ) or branch_copies2.copy.get(f, None)
814 ) or branch_copies2.copy.get(f, None)
815 args, msg = None, None
815 args, msg = None, None
816 if fa is not None:
816 if fa is not None:
817 args = (f, f, fa, False, pa.node())
817 args = (f, f, fa, False, pa.node())
818 msg = b'both renamed from %s' % fa
818 msg = b'both renamed from %s' % fa
819 else:
819 else:
820 args = (f, f, None, False, pa.node())
820 args = (f, f, None, False, pa.node())
821 msg = b'both created'
821 msg = b'both created'
822 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
822 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
823 else:
823 else:
824 a = ma[f]
824 a = ma[f]
825 fla = ma.flags(f)
825 fla = ma.flags(f)
826 nol = b'l' not in fl1 + fl2 + fla
826 nol = b'l' not in fl1 + fl2 + fla
827 if n2 == a and fl2 == fla:
827 if n2 == a and fl2 == fla:
828 mresult.addfile(
828 mresult.addfile(
829 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
829 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
830 )
830 )
831 elif n1 == a and fl1 == fla: # local unchanged - use remote
831 elif n1 == a and fl1 == fla: # local unchanged - use remote
832 if n1 == n2: # optimization: keep local content
832 if n1 == n2: # optimization: keep local content
833 mresult.addfile(
833 mresult.addfile(
834 f,
834 f,
835 mergestatemod.ACTION_EXEC,
835 mergestatemod.ACTION_EXEC,
836 (fl2,),
836 (fl2,),
837 b'update permissions',
837 b'update permissions',
838 )
838 )
839 else:
839 else:
840 mresult.addfile(
840 mresult.addfile(
841 f,
841 f,
842 mergestatemod.ACTION_GET,
842 mergestatemod.ACTION_GET,
843 (fl2, False),
843 (fl2, False),
844 b'remote is newer',
844 b'remote is newer',
845 )
845 )
846 if branchmerge:
846 if branchmerge:
847 commitinfo[f] = b'other'
847 commitinfo[f] = b'other'
848 elif nol and n2 == a: # remote only changed 'x'
848 elif nol and n2 == a: # remote only changed 'x'
849 mresult.addfile(
849 mresult.addfile(
850 f,
850 f,
851 mergestatemod.ACTION_EXEC,
851 mergestatemod.ACTION_EXEC,
852 (fl2,),
852 (fl2,),
853 b'update permissions',
853 b'update permissions',
854 )
854 )
855 elif nol and n1 == a: # local only changed 'x'
855 elif nol and n1 == a: # local only changed 'x'
856 mresult.addfile(
856 mresult.addfile(
857 f,
857 f,
858 mergestatemod.ACTION_GET,
858 mergestatemod.ACTION_GET,
859 (fl1, False),
859 (fl1, False),
860 b'remote is newer',
860 b'remote is newer',
861 )
861 )
862 if branchmerge:
862 if branchmerge:
863 commitinfo[f] = b'other'
863 commitinfo[f] = b'other'
864 else: # both changed something
864 else: # both changed something
865 mresult.addfile(
865 mresult.addfile(
866 f,
866 f,
867 mergestatemod.ACTION_MERGE,
867 mergestatemod.ACTION_MERGE,
868 (f, f, f, False, pa.node()),
868 (f, f, f, False, pa.node()),
869 b'versions differ',
869 b'versions differ',
870 )
870 )
871 elif n1: # file exists only on local side
871 elif n1: # file exists only on local side
872 if f in copied2:
872 if f in copied2:
873 pass # we'll deal with it on m2 side
873 pass # we'll deal with it on m2 side
874 elif (
874 elif (
875 f in branch_copies1.movewithdir
875 f in branch_copies1.movewithdir
876 ): # directory rename, move local
876 ): # directory rename, move local
877 f2 = branch_copies1.movewithdir[f]
877 f2 = branch_copies1.movewithdir[f]
878 if f2 in m2:
878 if f2 in m2:
879 mresult.addfile(
879 mresult.addfile(
880 f2,
880 f2,
881 mergestatemod.ACTION_MERGE,
881 mergestatemod.ACTION_MERGE,
882 (f, f2, None, True, pa.node()),
882 (f, f2, None, True, pa.node()),
883 b'remote directory rename, both created',
883 b'remote directory rename, both created',
884 )
884 )
885 else:
885 else:
886 mresult.addfile(
886 mresult.addfile(
887 f2,
887 f2,
888 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
888 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
889 (f, fl1),
889 (f, fl1),
890 b'remote directory rename - move from %s' % f,
890 b'remote directory rename - move from %s' % f,
891 )
891 )
892 elif f in branch_copies1.copy:
892 elif f in branch_copies1.copy:
893 f2 = branch_copies1.copy[f]
893 f2 = branch_copies1.copy[f]
894 mresult.addfile(
894 mresult.addfile(
895 f,
895 f,
896 mergestatemod.ACTION_MERGE,
896 mergestatemod.ACTION_MERGE,
897 (f, f2, f2, False, pa.node()),
897 (f, f2, f2, False, pa.node()),
898 b'local copied/moved from %s' % f2,
898 b'local copied/moved from %s' % f2,
899 )
899 )
900 elif f in ma: # clean, a different, no remote
900 elif f in ma: # clean, a different, no remote
901 if n1 != ma[f]:
901 if n1 != ma[f]:
902 if acceptremote:
902 if acceptremote:
903 mresult.addfile(
903 mresult.addfile(
904 f,
904 f,
905 mergestatemod.ACTION_REMOVE,
905 mergestatemod.ACTION_REMOVE,
906 None,
906 None,
907 b'remote delete',
907 b'remote delete',
908 )
908 )
909 else:
909 else:
910 mresult.addfile(
910 mresult.addfile(
911 f,
911 f,
912 mergestatemod.ACTION_CHANGED_DELETED,
912 mergestatemod.ACTION_CHANGED_DELETED,
913 (f, None, f, False, pa.node()),
913 (f, None, f, False, pa.node()),
914 b'prompt changed/deleted',
914 b'prompt changed/deleted',
915 )
915 )
916 elif n1 == addednodeid:
916 elif n1 == addednodeid:
917 # This file was locally added. We should forget it instead of
917 # This file was locally added. We should forget it instead of
918 # deleting it.
918 # deleting it.
919 mresult.addfile(
919 mresult.addfile(
920 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
920 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
921 )
921 )
922 else:
922 else:
923 mresult.addfile(
923 mresult.addfile(
924 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
924 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
925 )
925 )
926 elif n2: # file exists only on remote side
926 elif n2: # file exists only on remote side
927 if f in copied1:
927 if f in copied1:
928 pass # we'll deal with it on m1 side
928 pass # we'll deal with it on m1 side
929 elif f in branch_copies2.movewithdir:
929 elif f in branch_copies2.movewithdir:
930 f2 = branch_copies2.movewithdir[f]
930 f2 = branch_copies2.movewithdir[f]
931 if f2 in m1:
931 if f2 in m1:
932 mresult.addfile(
932 mresult.addfile(
933 f2,
933 f2,
934 mergestatemod.ACTION_MERGE,
934 mergestatemod.ACTION_MERGE,
935 (f2, f, None, False, pa.node()),
935 (f2, f, None, False, pa.node()),
936 b'local directory rename, both created',
936 b'local directory rename, both created',
937 )
937 )
938 else:
938 else:
939 mresult.addfile(
939 mresult.addfile(
940 f2,
940 f2,
941 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
941 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
942 (f, fl2),
942 (f, fl2),
943 b'local directory rename - get from %s' % f,
943 b'local directory rename - get from %s' % f,
944 )
944 )
945 elif f in branch_copies2.copy:
945 elif f in branch_copies2.copy:
946 f2 = branch_copies2.copy[f]
946 f2 = branch_copies2.copy[f]
947 msg, args = None, None
947 msg, args = None, None
948 if f2 in m2:
948 if f2 in m2:
949 args = (f2, f, f2, False, pa.node())
949 args = (f2, f, f2, False, pa.node())
950 msg = b'remote copied from %s' % f2
950 msg = b'remote copied from %s' % f2
951 else:
951 else:
952 args = (f2, f, f2, True, pa.node())
952 args = (f2, f, f2, True, pa.node())
953 msg = b'remote moved from %s' % f2
953 msg = b'remote moved from %s' % f2
954 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
954 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
955 elif f not in ma:
955 elif f not in ma:
956 # local unknown, remote created: the logic is described by the
956 # local unknown, remote created: the logic is described by the
957 # following table:
957 # following table:
958 #
958 #
959 # force branchmerge different | action
959 # force branchmerge different | action
960 # n * * | create
960 # n * * | create
961 # y n * | create
961 # y n * | create
962 # y y n | create
962 # y y n | create
963 # y y y | merge
963 # y y y | merge
964 #
964 #
965 # Checking whether the files are different is expensive, so we
965 # Checking whether the files are different is expensive, so we
966 # don't do that when we can avoid it.
966 # don't do that when we can avoid it.
967 if not force:
967 if not force:
968 mresult.addfile(
968 mresult.addfile(
969 f,
969 f,
970 mergestatemod.ACTION_CREATED,
970 mergestatemod.ACTION_CREATED,
971 (fl2,),
971 (fl2,),
972 b'remote created',
972 b'remote created',
973 )
973 )
974 elif not branchmerge:
974 elif not branchmerge:
975 mresult.addfile(
975 mresult.addfile(
976 f,
976 f,
977 mergestatemod.ACTION_CREATED,
977 mergestatemod.ACTION_CREATED,
978 (fl2,),
978 (fl2,),
979 b'remote created',
979 b'remote created',
980 )
980 )
981 else:
981 else:
982 mresult.addfile(
982 mresult.addfile(
983 f,
983 f,
984 mergestatemod.ACTION_CREATED_MERGE,
984 mergestatemod.ACTION_CREATED_MERGE,
985 (fl2, pa.node()),
985 (fl2, pa.node()),
986 b'remote created, get or merge',
986 b'remote created, get or merge',
987 )
987 )
988 elif n2 != ma[f]:
988 elif n2 != ma[f]:
989 df = None
989 df = None
990 for d in branch_copies1.dirmove:
990 for d in branch_copies1.dirmove:
991 if f.startswith(d):
991 if f.startswith(d):
992 # new file added in a directory that was moved
992 # new file added in a directory that was moved
993 df = branch_copies1.dirmove[d] + f[len(d) :]
993 df = branch_copies1.dirmove[d] + f[len(d) :]
994 break
994 break
995 if df is not None and df in m1:
995 if df is not None and df in m1:
996 mresult.addfile(
996 mresult.addfile(
997 df,
997 df,
998 mergestatemod.ACTION_MERGE,
998 mergestatemod.ACTION_MERGE,
999 (df, f, f, False, pa.node()),
999 (df, f, f, False, pa.node()),
1000 b'local directory rename - respect move '
1000 b'local directory rename - respect move '
1001 b'from %s' % f,
1001 b'from %s' % f,
1002 )
1002 )
1003 elif acceptremote:
1003 elif acceptremote:
1004 mresult.addfile(
1004 mresult.addfile(
1005 f,
1005 f,
1006 mergestatemod.ACTION_CREATED,
1006 mergestatemod.ACTION_CREATED,
1007 (fl2,),
1007 (fl2,),
1008 b'remote recreating',
1008 b'remote recreating',
1009 )
1009 )
1010 else:
1010 else:
1011 mresult.addfile(
1011 mresult.addfile(
1012 f,
1012 f,
1013 mergestatemod.ACTION_DELETED_CHANGED,
1013 mergestatemod.ACTION_DELETED_CHANGED,
1014 (None, f, f, False, pa.node()),
1014 (None, f, f, False, pa.node()),
1015 b'prompt deleted/changed',
1015 b'prompt deleted/changed',
1016 )
1016 )
1017
1017
1018 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1018 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1019 # If we are merging, look for path conflicts.
1019 # If we are merging, look for path conflicts.
1020 checkpathconflicts(repo, wctx, p2, mresult)
1020 checkpathconflicts(repo, wctx, p2, mresult)
1021
1021
1022 narrowmatch = repo.narrowmatch()
1022 narrowmatch = repo.narrowmatch()
1023 if not narrowmatch.always():
1023 if not narrowmatch.always():
1024 # Updates "actions" in place
1024 # Updates "actions" in place
1025 _filternarrowactions(narrowmatch, branchmerge, mresult)
1025 _filternarrowactions(narrowmatch, branchmerge, mresult)
1026
1026
1027 renamedelete = branch_copies1.renamedelete
1027 renamedelete = branch_copies1.renamedelete
1028 renamedelete.update(branch_copies2.renamedelete)
1028 renamedelete.update(branch_copies2.renamedelete)
1029
1029
1030 mresult.updatevalues(diverge, renamedelete, commitinfo)
1030 mresult.updatevalues(diverge, renamedelete, commitinfo)
1031 return mresult
1031 return mresult
1032
1032
1033
1033
1034 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1034 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1035 """Resolves false conflicts where the nodeid changed but the content
1035 """Resolves false conflicts where the nodeid changed but the content
1036 remained the same."""
1036 remained the same."""
1037 # We force a copy of actions.items() because we're going to mutate
1037 # We force a copy of actions.items() because we're going to mutate
1038 # actions as we resolve trivial conflicts.
1038 # actions as we resolve trivial conflicts.
1039 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1039 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1040 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1040 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1041 # local did change but ended up with same content
1041 # local did change but ended up with same content
1042 mresult.addfile(
1042 mresult.addfile(
1043 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1043 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1044 )
1044 )
1045
1045
1046 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1046 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1047 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1047 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1048 # remote did change but ended up with same content
1048 # remote did change but ended up with same content
1049 mresult.removefile(f) # don't get = keep local deleted
1049 mresult.removefile(f) # don't get = keep local deleted
1050
1050
1051
1051
1052 def calculateupdates(
1052 def calculateupdates(
1053 repo,
1053 repo,
1054 wctx,
1054 wctx,
1055 mctx,
1055 mctx,
1056 ancestors,
1056 ancestors,
1057 branchmerge,
1057 branchmerge,
1058 force,
1058 force,
1059 acceptremote,
1059 acceptremote,
1060 followcopies,
1060 followcopies,
1061 matcher=None,
1061 matcher=None,
1062 mergeforce=False,
1062 mergeforce=False,
1063 ):
1063 ):
1064 """
1064 """
1065 Calculate the actions needed to merge mctx into wctx using ancestors
1065 Calculate the actions needed to merge mctx into wctx using ancestors
1066
1066
1067 Uses manifestmerge() to merge manifest and get list of actions required to
1067 Uses manifestmerge() to merge manifest and get list of actions required to
1068 perform for merging two manifests. If there are multiple ancestors, uses bid
1068 perform for merging two manifests. If there are multiple ancestors, uses bid
1069 merge if enabled.
1069 merge if enabled.
1070
1070
1071 Also filters out actions which are unrequired if repository is sparse.
1071 Also filters out actions which are unrequired if repository is sparse.
1072
1072
1073 Returns mergeresult object same as manifestmerge().
1073 Returns mergeresult object same as manifestmerge().
1074 """
1074 """
1075 # Avoid cycle.
1075 # Avoid cycle.
1076 from . import sparse
1076 from . import sparse
1077
1077
1078 mresult = None
1078 mresult = None
1079 if len(ancestors) == 1: # default
1079 if len(ancestors) == 1: # default
1080 mresult = manifestmerge(
1080 mresult = manifestmerge(
1081 repo,
1081 repo,
1082 wctx,
1082 wctx,
1083 mctx,
1083 mctx,
1084 ancestors[0],
1084 ancestors[0],
1085 branchmerge,
1085 branchmerge,
1086 force,
1086 force,
1087 matcher,
1087 matcher,
1088 acceptremote,
1088 acceptremote,
1089 followcopies,
1089 followcopies,
1090 )
1090 )
1091 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1091 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1092
1092
1093 else: # only when merge.preferancestor=* - the default
1093 else: # only when merge.preferancestor=* - the default
1094 repo.ui.note(
1094 repo.ui.note(
1095 _(b"note: merging %s and %s using bids from ancestors %s\n")
1095 _(b"note: merging %s and %s using bids from ancestors %s\n")
1096 % (
1096 % (
1097 wctx,
1097 wctx,
1098 mctx,
1098 mctx,
1099 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1099 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1100 )
1100 )
1101 )
1101 )
1102
1102
1103 # mapping filename to bids (action method to list af actions)
1103 # mapping filename to bids (action method to list af actions)
1104 # {FILENAME1 : BID1, FILENAME2 : BID2}
1104 # {FILENAME1 : BID1, FILENAME2 : BID2}
1105 # BID is another dictionary which contains
1105 # BID is another dictionary which contains
1106 # mapping of following form:
1106 # mapping of following form:
1107 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1107 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1108 fbids = {}
1108 fbids = {}
1109 diverge, renamedelete = None, None
1109 diverge, renamedelete = None, None
1110 for ancestor in ancestors:
1110 for ancestor in ancestors:
1111 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1111 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1112 mresult1 = manifestmerge(
1112 mresult1 = manifestmerge(
1113 repo,
1113 repo,
1114 wctx,
1114 wctx,
1115 mctx,
1115 mctx,
1116 ancestor,
1116 ancestor,
1117 branchmerge,
1117 branchmerge,
1118 force,
1118 force,
1119 matcher,
1119 matcher,
1120 acceptremote,
1120 acceptremote,
1121 followcopies,
1121 followcopies,
1122 forcefulldiff=True,
1122 forcefulldiff=True,
1123 )
1123 )
1124 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1124 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1125
1125
1126 # Track the shortest set of warning on the theory that bid
1126 # Track the shortest set of warning on the theory that bid
1127 # merge will correctly incorporate more information
1127 # merge will correctly incorporate more information
1128 if diverge is None or len(mresult1.diverge) < len(diverge):
1128 if diverge is None or len(mresult1.diverge) < len(diverge):
1129 diverge = mresult1.diverge
1129 diverge = mresult1.diverge
1130 if renamedelete is None or len(renamedelete) < len(
1130 if renamedelete is None or len(renamedelete) < len(
1131 mresult1.renamedelete
1131 mresult1.renamedelete
1132 ):
1132 ):
1133 renamedelete = mresult1.renamedelete
1133 renamedelete = mresult1.renamedelete
1134
1134
1135 for f, a in mresult1.filemap(sort=True):
1135 for f, a in mresult1.filemap(sort=True):
1136 m, args, msg = a
1136 m, args, msg = a
1137 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1137 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1138 if f in fbids:
1138 if f in fbids:
1139 d = fbids[f]
1139 d = fbids[f]
1140 if m in d:
1140 if m in d:
1141 d[m].append(a)
1141 d[m].append(a)
1142 else:
1142 else:
1143 d[m] = [a]
1143 d[m] = [a]
1144 else:
1144 else:
1145 fbids[f] = {m: [a]}
1145 fbids[f] = {m: [a]}
1146
1146
1147 # Call for bids
1147 # Call for bids
1148 # Pick the best bid for each file
1148 # Pick the best bid for each file
1149 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1149 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1150 mresult = mergeresult()
1150 mresult = mergeresult()
1151 for f, bids in sorted(fbids.items()):
1151 for f, bids in sorted(fbids.items()):
1152 # bids is a mapping from action method to list af actions
1152 # bids is a mapping from action method to list af actions
1153 # Consensus?
1153 # Consensus?
1154 if len(bids) == 1: # all bids are the same kind of method
1154 if len(bids) == 1: # all bids are the same kind of method
1155 m, l = list(bids.items())[0]
1155 m, l = list(bids.items())[0]
1156 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1156 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1157 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1157 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1158 mresult.addfile(f, *l[0])
1158 mresult.addfile(f, *l[0])
1159 continue
1159 continue
1160 # If keep is an option, just do it.
1160 # If keep is an option, just do it.
1161 if mergestatemod.ACTION_KEEP in bids:
1161 if mergestatemod.ACTION_KEEP in bids:
1162 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1162 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1163 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1163 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1164 continue
1164 continue
1165 # If there are gets and they all agree [how could they not?], do it.
1165 # If there are gets and they all agree [how could they not?], do it.
1166 if mergestatemod.ACTION_GET in bids:
1166 if mergestatemod.ACTION_GET in bids:
1167 ga0 = bids[mergestatemod.ACTION_GET][0]
1167 ga0 = bids[mergestatemod.ACTION_GET][0]
1168 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1168 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1169 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1169 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1170 mresult.addfile(f, *ga0)
1170 mresult.addfile(f, *ga0)
1171 continue
1171 continue
1172 # TODO: Consider other simple actions such as mode changes
1172 # TODO: Consider other simple actions such as mode changes
1173 # Handle inefficient democrazy.
1173 # Handle inefficient democrazy.
1174 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1174 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1175 for m, l in sorted(bids.items()):
1175 for m, l in sorted(bids.items()):
1176 for _f, args, msg in l:
1176 for _f, args, msg in l:
1177 repo.ui.note(b' %s -> %s\n' % (msg, m))
1177 repo.ui.note(b' %s -> %s\n' % (msg, m))
1178 # Pick random action. TODO: Instead, prompt user when resolving
1178 # Pick random action. TODO: Instead, prompt user when resolving
1179 m, l = list(bids.items())[0]
1179 m, l = list(bids.items())[0]
1180 repo.ui.warn(
1180 repo.ui.warn(
1181 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1181 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1182 )
1182 )
1183 mresult.addfile(f, *l[0])
1183 mresult.addfile(f, *l[0])
1184 continue
1184 continue
1185 repo.ui.note(_(b'end of auction\n\n'))
1185 repo.ui.note(_(b'end of auction\n\n'))
1186 # TODO: think about commitinfo when bid merge is used
1186 # TODO: think about commitinfo when bid merge is used
1187 mresult.updatevalues(diverge, renamedelete, {})
1187 mresult.updatevalues(diverge, renamedelete, {})
1188
1188
1189 if wctx.rev() is None:
1189 if wctx.rev() is None:
1190 _forgetremoved(wctx, mctx, branchmerge, mresult)
1190 _forgetremoved(wctx, mctx, branchmerge, mresult)
1191
1191
1192 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1192 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1193 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1193 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1194
1194
1195 return mresult
1195 return mresult
1196
1196
1197
1197
1198 def _getcwd():
1198 def _getcwd():
1199 try:
1199 try:
1200 return encoding.getcwd()
1200 return encoding.getcwd()
1201 except OSError as err:
1201 except OSError as err:
1202 if err.errno == errno.ENOENT:
1202 if err.errno == errno.ENOENT:
1203 return None
1203 return None
1204 raise
1204 raise
1205
1205
1206
1206
1207 def batchremove(repo, wctx, actions):
1207 def batchremove(repo, wctx, actions):
1208 """apply removes to the working directory
1208 """apply removes to the working directory
1209
1209
1210 yields tuples for progress updates
1210 yields tuples for progress updates
1211 """
1211 """
1212 verbose = repo.ui.verbose
1212 verbose = repo.ui.verbose
1213 cwd = _getcwd()
1213 cwd = _getcwd()
1214 i = 0
1214 i = 0
1215 for f, args, msg in actions:
1215 for f, args, msg in actions:
1216 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1216 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1217 if verbose:
1217 if verbose:
1218 repo.ui.note(_(b"removing %s\n") % f)
1218 repo.ui.note(_(b"removing %s\n") % f)
1219 wctx[f].audit()
1219 wctx[f].audit()
1220 try:
1220 try:
1221 wctx[f].remove(ignoremissing=True)
1221 wctx[f].remove(ignoremissing=True)
1222 except OSError as inst:
1222 except OSError as inst:
1223 repo.ui.warn(
1223 repo.ui.warn(
1224 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1224 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1225 )
1225 )
1226 if i == 100:
1226 if i == 100:
1227 yield i, f
1227 yield i, f
1228 i = 0
1228 i = 0
1229 i += 1
1229 i += 1
1230 if i > 0:
1230 if i > 0:
1231 yield i, f
1231 yield i, f
1232
1232
1233 if cwd and not _getcwd():
1233 if cwd and not _getcwd():
1234 # cwd was removed in the course of removing files; print a helpful
1234 # cwd was removed in the course of removing files; print a helpful
1235 # warning.
1235 # warning.
1236 repo.ui.warn(
1236 repo.ui.warn(
1237 _(
1237 _(
1238 b"current directory was removed\n"
1238 b"current directory was removed\n"
1239 b"(consider changing to repo root: %s)\n"
1239 b"(consider changing to repo root: %s)\n"
1240 )
1240 )
1241 % repo.root
1241 % repo.root
1242 )
1242 )
1243
1243
1244
1244
1245 def batchget(repo, mctx, wctx, wantfiledata, actions):
1245 def batchget(repo, mctx, wctx, wantfiledata, actions):
1246 """apply gets to the working directory
1246 """apply gets to the working directory
1247
1247
1248 mctx is the context to get from
1248 mctx is the context to get from
1249
1249
1250 Yields arbitrarily many (False, tuple) for progress updates, followed by
1250 Yields arbitrarily many (False, tuple) for progress updates, followed by
1251 exactly one (True, filedata). When wantfiledata is false, filedata is an
1251 exactly one (True, filedata). When wantfiledata is false, filedata is an
1252 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1252 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1253 mtime) of the file f written for each action.
1253 mtime) of the file f written for each action.
1254 """
1254 """
1255 filedata = {}
1255 filedata = {}
1256 verbose = repo.ui.verbose
1256 verbose = repo.ui.verbose
1257 fctx = mctx.filectx
1257 fctx = mctx.filectx
1258 ui = repo.ui
1258 ui = repo.ui
1259 i = 0
1259 i = 0
1260 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1260 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1261 for f, (flags, backup), msg in actions:
1261 for f, (flags, backup), msg in actions:
1262 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1262 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1263 if verbose:
1263 if verbose:
1264 repo.ui.note(_(b"getting %s\n") % f)
1264 repo.ui.note(_(b"getting %s\n") % f)
1265
1265
1266 if backup:
1266 if backup:
1267 # If a file or directory exists with the same name, back that
1267 # If a file or directory exists with the same name, back that
1268 # up. Otherwise, look to see if there is a file that conflicts
1268 # up. Otherwise, look to see if there is a file that conflicts
1269 # with a directory this file is in, and if so, back that up.
1269 # with a directory this file is in, and if so, back that up.
1270 conflicting = f
1270 conflicting = f
1271 if not repo.wvfs.lexists(f):
1271 if not repo.wvfs.lexists(f):
1272 for p in pathutil.finddirs(f):
1272 for p in pathutil.finddirs(f):
1273 if repo.wvfs.isfileorlink(p):
1273 if repo.wvfs.isfileorlink(p):
1274 conflicting = p
1274 conflicting = p
1275 break
1275 break
1276 if repo.wvfs.lexists(conflicting):
1276 if repo.wvfs.lexists(conflicting):
1277 orig = scmutil.backuppath(ui, repo, conflicting)
1277 orig = scmutil.backuppath(ui, repo, conflicting)
1278 util.rename(repo.wjoin(conflicting), orig)
1278 util.rename(repo.wjoin(conflicting), orig)
1279 wfctx = wctx[f]
1279 wfctx = wctx[f]
1280 wfctx.clearunknown()
1280 wfctx.clearunknown()
1281 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1281 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1282 size = wfctx.write(
1282 size = wfctx.write(
1283 fctx(f).data(),
1283 fctx(f).data(),
1284 flags,
1284 flags,
1285 backgroundclose=True,
1285 backgroundclose=True,
1286 atomictemp=atomictemp,
1286 atomictemp=atomictemp,
1287 )
1287 )
1288 if wantfiledata:
1288 if wantfiledata:
1289 s = wfctx.lstat()
1289 s = wfctx.lstat()
1290 mode = s.st_mode
1290 mode = s.st_mode
1291 mtime = s[stat.ST_MTIME]
1291 mtime = s[stat.ST_MTIME]
1292 filedata[f] = (mode, size, mtime) # for dirstate.normal
1292 filedata[f] = (mode, size, mtime) # for dirstate.normal
1293 if i == 100:
1293 if i == 100:
1294 yield False, (i, f)
1294 yield False, (i, f)
1295 i = 0
1295 i = 0
1296 i += 1
1296 i += 1
1297 if i > 0:
1297 if i > 0:
1298 yield False, (i, f)
1298 yield False, (i, f)
1299 yield True, filedata
1299 yield True, filedata
1300
1300
1301
1301
1302 def _prefetchfiles(repo, ctx, mresult):
1302 def _prefetchfiles(repo, ctx, mresult):
1303 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1303 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1304 of merge actions. ``ctx`` is the context being merged in."""
1304 of merge actions. ``ctx`` is the context being merged in."""
1305
1305
1306 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1306 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1307 # don't touch the context to be merged in. 'cd' is skipped, because
1307 # don't touch the context to be merged in. 'cd' is skipped, because
1308 # changed/deleted never resolves to something from the remote side.
1308 # changed/deleted never resolves to something from the remote side.
1309 files = mresult.files(
1309 files = mresult.files(
1310 [
1310 [
1311 mergestatemod.ACTION_GET,
1311 mergestatemod.ACTION_GET,
1312 mergestatemod.ACTION_DELETED_CHANGED,
1312 mergestatemod.ACTION_DELETED_CHANGED,
1313 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1313 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1314 mergestatemod.ACTION_MERGE,
1314 mergestatemod.ACTION_MERGE,
1315 ]
1315 ]
1316 )
1316 )
1317
1317
1318 prefetch = scmutil.prefetchfiles
1318 prefetch = scmutil.prefetchfiles
1319 matchfiles = scmutil.matchfiles
1319 matchfiles = scmutil.matchfiles
1320 prefetch(
1320 prefetch(
1321 repo, [(ctx.rev(), matchfiles(repo, files),)],
1321 repo, [(ctx.rev(), matchfiles(repo, files),)],
1322 )
1322 )
1323
1323
1324
1324
1325 @attr.s(frozen=True)
1325 @attr.s(frozen=True)
1326 class updateresult(object):
1326 class updateresult(object):
1327 updatedcount = attr.ib()
1327 updatedcount = attr.ib()
1328 mergedcount = attr.ib()
1328 mergedcount = attr.ib()
1329 removedcount = attr.ib()
1329 removedcount = attr.ib()
1330 unresolvedcount = attr.ib()
1330 unresolvedcount = attr.ib()
1331
1331
1332 def isempty(self):
1332 def isempty(self):
1333 return not (
1333 return not (
1334 self.updatedcount
1334 self.updatedcount
1335 or self.mergedcount
1335 or self.mergedcount
1336 or self.removedcount
1336 or self.removedcount
1337 or self.unresolvedcount
1337 or self.unresolvedcount
1338 )
1338 )
1339
1339
1340
1340
1341 def emptyactions():
1342 """create an actions dict, to be populated and passed to applyupdates()"""
1343 return {
1344 m: []
1345 for m in (
1346 mergestatemod.ACTION_ADD,
1347 mergestatemod.ACTION_ADD_MODIFIED,
1348 mergestatemod.ACTION_FORGET,
1349 mergestatemod.ACTION_GET,
1350 mergestatemod.ACTION_CHANGED_DELETED,
1351 mergestatemod.ACTION_DELETED_CHANGED,
1352 mergestatemod.ACTION_REMOVE,
1353 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1354 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1355 mergestatemod.ACTION_MERGE,
1356 mergestatemod.ACTION_EXEC,
1357 mergestatemod.ACTION_KEEP,
1358 mergestatemod.ACTION_PATH_CONFLICT,
1359 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1360 )
1361 }
1362
1363
1364 def applyupdates(
1341 def applyupdates(
1365 repo,
1342 repo,
1366 mresult,
1343 mresult,
1367 wctx,
1344 wctx,
1368 mctx,
1345 mctx,
1369 overwrite,
1346 overwrite,
1370 wantfiledata,
1347 wantfiledata,
1371 labels=None,
1348 labels=None,
1372 commitinfo=None,
1349 commitinfo=None,
1373 ):
1350 ):
1374 """apply the merge action list to the working directory
1351 """apply the merge action list to the working directory
1375
1352
1376 mresult is a mergeresult object representing result of the merge
1353 mresult is a mergeresult object representing result of the merge
1377 wctx is the working copy context
1354 wctx is the working copy context
1378 mctx is the context to be merged into the working copy
1355 mctx is the context to be merged into the working copy
1379 commitinfo is a mapping of information which needs to be stored somewhere
1356 commitinfo is a mapping of information which needs to be stored somewhere
1380 (probably mergestate) so that it can be used at commit time.
1357 (probably mergestate) so that it can be used at commit time.
1381
1358
1382 Return a tuple of (counts, filedata), where counts is a tuple
1359 Return a tuple of (counts, filedata), where counts is a tuple
1383 (updated, merged, removed, unresolved) that describes how many
1360 (updated, merged, removed, unresolved) that describes how many
1384 files were affected by the update, and filedata is as described in
1361 files were affected by the update, and filedata is as described in
1385 batchget.
1362 batchget.
1386 """
1363 """
1387
1364
1388 _prefetchfiles(repo, mctx, mresult)
1365 _prefetchfiles(repo, mctx, mresult)
1389
1366
1390 updated, merged, removed = 0, 0, 0
1367 updated, merged, removed = 0, 0, 0
1391 ms = mergestatemod.mergestate.clean(
1368 ms = mergestatemod.mergestate.clean(
1392 repo, wctx.p1().node(), mctx.node(), labels
1369 repo, wctx.p1().node(), mctx.node(), labels
1393 )
1370 )
1394
1371
1395 if commitinfo is None:
1372 if commitinfo is None:
1396 commitinfo = {}
1373 commitinfo = {}
1397
1374
1398 for f, op in pycompat.iteritems(commitinfo):
1375 for f, op in pycompat.iteritems(commitinfo):
1399 # the other side of filenode was choosen while merging, store this in
1376 # the other side of filenode was choosen while merging, store this in
1400 # mergestate so that it can be reused on commit
1377 # mergestate so that it can be reused on commit
1401 if op == b'other':
1378 if op == b'other':
1402 ms.addmergedother(f)
1379 ms.addmergedother(f)
1403
1380
1404 moves = []
1381 moves = []
1405
1382
1406 # 'cd' and 'dc' actions are treated like other merge conflicts
1383 # 'cd' and 'dc' actions are treated like other merge conflicts
1407 mergeactions = list(
1384 mergeactions = list(
1408 mresult.getactions(
1385 mresult.getactions(
1409 [
1386 [
1410 mergestatemod.ACTION_CHANGED_DELETED,
1387 mergestatemod.ACTION_CHANGED_DELETED,
1411 mergestatemod.ACTION_DELETED_CHANGED,
1388 mergestatemod.ACTION_DELETED_CHANGED,
1412 mergestatemod.ACTION_MERGE,
1389 mergestatemod.ACTION_MERGE,
1413 ],
1390 ],
1414 sort=True,
1391 sort=True,
1415 )
1392 )
1416 )
1393 )
1417 for f, args, msg in mergeactions:
1394 for f, args, msg in mergeactions:
1418 f1, f2, fa, move, anc = args
1395 f1, f2, fa, move, anc = args
1419 if f == b'.hgsubstate': # merged internally
1396 if f == b'.hgsubstate': # merged internally
1420 continue
1397 continue
1421 if f1 is None:
1398 if f1 is None:
1422 fcl = filemerge.absentfilectx(wctx, fa)
1399 fcl = filemerge.absentfilectx(wctx, fa)
1423 else:
1400 else:
1424 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1401 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1425 fcl = wctx[f1]
1402 fcl = wctx[f1]
1426 if f2 is None:
1403 if f2 is None:
1427 fco = filemerge.absentfilectx(mctx, fa)
1404 fco = filemerge.absentfilectx(mctx, fa)
1428 else:
1405 else:
1429 fco = mctx[f2]
1406 fco = mctx[f2]
1430 actx = repo[anc]
1407 actx = repo[anc]
1431 if fa in actx:
1408 if fa in actx:
1432 fca = actx[fa]
1409 fca = actx[fa]
1433 else:
1410 else:
1434 # TODO: move to absentfilectx
1411 # TODO: move to absentfilectx
1435 fca = repo.filectx(f1, fileid=nullrev)
1412 fca = repo.filectx(f1, fileid=nullrev)
1436 ms.add(fcl, fco, fca, f)
1413 ms.add(fcl, fco, fca, f)
1437 if f1 != f and move:
1414 if f1 != f and move:
1438 moves.append(f1)
1415 moves.append(f1)
1439
1416
1440 # remove renamed files after safely stored
1417 # remove renamed files after safely stored
1441 for f in moves:
1418 for f in moves:
1442 if wctx[f].lexists():
1419 if wctx[f].lexists():
1443 repo.ui.debug(b"removing %s\n" % f)
1420 repo.ui.debug(b"removing %s\n" % f)
1444 wctx[f].audit()
1421 wctx[f].audit()
1445 wctx[f].remove()
1422 wctx[f].remove()
1446
1423
1447 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1424 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1448 progress = repo.ui.makeprogress(
1425 progress = repo.ui.makeprogress(
1449 _(b'updating'), unit=_(b'files'), total=numupdates
1426 _(b'updating'), unit=_(b'files'), total=numupdates
1450 )
1427 )
1451
1428
1452 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1429 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1453 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1430 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1454
1431
1455 # record path conflicts
1432 # record path conflicts
1456 for f, args, msg in mresult.getactions(
1433 for f, args, msg in mresult.getactions(
1457 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1434 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1458 ):
1435 ):
1459 f1, fo = args
1436 f1, fo = args
1460 s = repo.ui.status
1437 s = repo.ui.status
1461 s(
1438 s(
1462 _(
1439 _(
1463 b"%s: path conflict - a file or link has the same name as a "
1440 b"%s: path conflict - a file or link has the same name as a "
1464 b"directory\n"
1441 b"directory\n"
1465 )
1442 )
1466 % f
1443 % f
1467 )
1444 )
1468 if fo == b'l':
1445 if fo == b'l':
1469 s(_(b"the local file has been renamed to %s\n") % f1)
1446 s(_(b"the local file has been renamed to %s\n") % f1)
1470 else:
1447 else:
1471 s(_(b"the remote file has been renamed to %s\n") % f1)
1448 s(_(b"the remote file has been renamed to %s\n") % f1)
1472 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1449 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1473 ms.addpathconflict(f, f1, fo)
1450 ms.addpathconflict(f, f1, fo)
1474 progress.increment(item=f)
1451 progress.increment(item=f)
1475
1452
1476 # When merging in-memory, we can't support worker processes, so set the
1453 # When merging in-memory, we can't support worker processes, so set the
1477 # per-item cost at 0 in that case.
1454 # per-item cost at 0 in that case.
1478 cost = 0 if wctx.isinmemory() else 0.001
1455 cost = 0 if wctx.isinmemory() else 0.001
1479
1456
1480 # remove in parallel (must come before resolving path conflicts and getting)
1457 # remove in parallel (must come before resolving path conflicts and getting)
1481 prog = worker.worker(
1458 prog = worker.worker(
1482 repo.ui,
1459 repo.ui,
1483 cost,
1460 cost,
1484 batchremove,
1461 batchremove,
1485 (repo, wctx),
1462 (repo, wctx),
1486 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1463 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1487 )
1464 )
1488 for i, item in prog:
1465 for i, item in prog:
1489 progress.increment(step=i, item=item)
1466 progress.increment(step=i, item=item)
1490 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1467 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1491
1468
1492 # resolve path conflicts (must come before getting)
1469 # resolve path conflicts (must come before getting)
1493 for f, args, msg in mresult.getactions(
1470 for f, args, msg in mresult.getactions(
1494 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1471 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1495 ):
1472 ):
1496 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1473 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1497 (f0, origf0) = args
1474 (f0, origf0) = args
1498 if wctx[f0].lexists():
1475 if wctx[f0].lexists():
1499 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1476 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1500 wctx[f].audit()
1477 wctx[f].audit()
1501 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1478 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1502 wctx[f0].remove()
1479 wctx[f0].remove()
1503 progress.increment(item=f)
1480 progress.increment(item=f)
1504
1481
1505 # get in parallel.
1482 # get in parallel.
1506 threadsafe = repo.ui.configbool(
1483 threadsafe = repo.ui.configbool(
1507 b'experimental', b'worker.wdir-get-thread-safe'
1484 b'experimental', b'worker.wdir-get-thread-safe'
1508 )
1485 )
1509 prog = worker.worker(
1486 prog = worker.worker(
1510 repo.ui,
1487 repo.ui,
1511 cost,
1488 cost,
1512 batchget,
1489 batchget,
1513 (repo, mctx, wctx, wantfiledata),
1490 (repo, mctx, wctx, wantfiledata),
1514 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1491 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1515 threadsafe=threadsafe,
1492 threadsafe=threadsafe,
1516 hasretval=True,
1493 hasretval=True,
1517 )
1494 )
1518 getfiledata = {}
1495 getfiledata = {}
1519 for final, res in prog:
1496 for final, res in prog:
1520 if final:
1497 if final:
1521 getfiledata = res
1498 getfiledata = res
1522 else:
1499 else:
1523 i, item = res
1500 i, item = res
1524 progress.increment(step=i, item=item)
1501 progress.increment(step=i, item=item)
1525
1502
1526 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1503 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1527 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1504 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1528
1505
1529 # forget (manifest only, just log it) (must come first)
1506 # forget (manifest only, just log it) (must come first)
1530 for f, args, msg in mresult.getactions(
1507 for f, args, msg in mresult.getactions(
1531 (mergestatemod.ACTION_FORGET,), sort=True
1508 (mergestatemod.ACTION_FORGET,), sort=True
1532 ):
1509 ):
1533 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1510 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1534 progress.increment(item=f)
1511 progress.increment(item=f)
1535
1512
1536 # re-add (manifest only, just log it)
1513 # re-add (manifest only, just log it)
1537 for f, args, msg in mresult.getactions(
1514 for f, args, msg in mresult.getactions(
1538 (mergestatemod.ACTION_ADD,), sort=True
1515 (mergestatemod.ACTION_ADD,), sort=True
1539 ):
1516 ):
1540 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1517 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1541 progress.increment(item=f)
1518 progress.increment(item=f)
1542
1519
1543 # re-add/mark as modified (manifest only, just log it)
1520 # re-add/mark as modified (manifest only, just log it)
1544 for f, args, msg in mresult.getactions(
1521 for f, args, msg in mresult.getactions(
1545 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1522 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1546 ):
1523 ):
1547 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1524 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1548 progress.increment(item=f)
1525 progress.increment(item=f)
1549
1526
1550 # keep (noop, just log it)
1527 # keep (noop, just log it)
1551 for f, args, msg in mresult.getactions(
1528 for f, args, msg in mresult.getactions(
1552 (mergestatemod.ACTION_KEEP,), sort=True
1529 (mergestatemod.ACTION_KEEP,), sort=True
1553 ):
1530 ):
1554 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1531 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1555 # no progress
1532 # no progress
1556
1533
1557 # directory rename, move local
1534 # directory rename, move local
1558 for f, args, msg in mresult.getactions(
1535 for f, args, msg in mresult.getactions(
1559 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1536 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1560 ):
1537 ):
1561 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1538 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1562 progress.increment(item=f)
1539 progress.increment(item=f)
1563 f0, flags = args
1540 f0, flags = args
1564 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1541 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1565 wctx[f].audit()
1542 wctx[f].audit()
1566 wctx[f].write(wctx.filectx(f0).data(), flags)
1543 wctx[f].write(wctx.filectx(f0).data(), flags)
1567 wctx[f0].remove()
1544 wctx[f0].remove()
1568
1545
1569 # local directory rename, get
1546 # local directory rename, get
1570 for f, args, msg in mresult.getactions(
1547 for f, args, msg in mresult.getactions(
1571 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1548 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1572 ):
1549 ):
1573 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1550 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1574 progress.increment(item=f)
1551 progress.increment(item=f)
1575 f0, flags = args
1552 f0, flags = args
1576 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1553 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1577 wctx[f].write(mctx.filectx(f0).data(), flags)
1554 wctx[f].write(mctx.filectx(f0).data(), flags)
1578
1555
1579 # exec
1556 # exec
1580 for f, args, msg in mresult.getactions(
1557 for f, args, msg in mresult.getactions(
1581 (mergestatemod.ACTION_EXEC,), sort=True
1558 (mergestatemod.ACTION_EXEC,), sort=True
1582 ):
1559 ):
1583 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1560 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1584 progress.increment(item=f)
1561 progress.increment(item=f)
1585 (flags,) = args
1562 (flags,) = args
1586 wctx[f].audit()
1563 wctx[f].audit()
1587 wctx[f].setflags(b'l' in flags, b'x' in flags)
1564 wctx[f].setflags(b'l' in flags, b'x' in flags)
1588
1565
1589 # these actions updates the file
1566 # these actions updates the file
1590 updated = mresult.len(
1567 updated = mresult.len(
1591 (
1568 (
1592 mergestatemod.ACTION_GET,
1569 mergestatemod.ACTION_GET,
1593 mergestatemod.ACTION_EXEC,
1570 mergestatemod.ACTION_EXEC,
1594 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1571 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1595 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1572 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1596 )
1573 )
1597 )
1574 )
1598 # the ordering is important here -- ms.mergedriver will raise if the merge
1575 # the ordering is important here -- ms.mergedriver will raise if the merge
1599 # driver has changed, and we want to be able to bypass it when overwrite is
1576 # driver has changed, and we want to be able to bypass it when overwrite is
1600 # True
1577 # True
1601 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1578 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1602
1579
1603 if usemergedriver:
1580 if usemergedriver:
1604 if wctx.isinmemory():
1581 if wctx.isinmemory():
1605 raise error.InMemoryMergeConflictsError(
1582 raise error.InMemoryMergeConflictsError(
1606 b"in-memory merge does not support mergedriver"
1583 b"in-memory merge does not support mergedriver"
1607 )
1584 )
1608 ms.commit()
1585 ms.commit()
1609 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1586 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1610 # the driver might leave some files unresolved
1587 # the driver might leave some files unresolved
1611 unresolvedf = set(ms.unresolved())
1588 unresolvedf = set(ms.unresolved())
1612 if not proceed:
1589 if not proceed:
1613 # XXX setting unresolved to at least 1 is a hack to make sure we
1590 # XXX setting unresolved to at least 1 is a hack to make sure we
1614 # error out
1591 # error out
1615 return updateresult(
1592 return updateresult(
1616 updated, merged, removed, max(len(unresolvedf), 1)
1593 updated, merged, removed, max(len(unresolvedf), 1)
1617 )
1594 )
1618 newactions = []
1595 newactions = []
1619 for f, args, msg in mergeactions:
1596 for f, args, msg in mergeactions:
1620 if f in unresolvedf:
1597 if f in unresolvedf:
1621 newactions.append((f, args, msg))
1598 newactions.append((f, args, msg))
1622 mergeactions = newactions
1599 mergeactions = newactions
1623
1600
1624 try:
1601 try:
1625 # premerge
1602 # premerge
1626 tocomplete = []
1603 tocomplete = []
1627 for f, args, msg in mergeactions:
1604 for f, args, msg in mergeactions:
1628 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1605 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1629 progress.increment(item=f)
1606 progress.increment(item=f)
1630 if f == b'.hgsubstate': # subrepo states need updating
1607 if f == b'.hgsubstate': # subrepo states need updating
1631 subrepoutil.submerge(
1608 subrepoutil.submerge(
1632 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1609 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1633 )
1610 )
1634 continue
1611 continue
1635 wctx[f].audit()
1612 wctx[f].audit()
1636 complete, r = ms.preresolve(f, wctx)
1613 complete, r = ms.preresolve(f, wctx)
1637 if not complete:
1614 if not complete:
1638 numupdates += 1
1615 numupdates += 1
1639 tocomplete.append((f, args, msg))
1616 tocomplete.append((f, args, msg))
1640
1617
1641 # merge
1618 # merge
1642 for f, args, msg in tocomplete:
1619 for f, args, msg in tocomplete:
1643 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1620 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1644 progress.increment(item=f, total=numupdates)
1621 progress.increment(item=f, total=numupdates)
1645 ms.resolve(f, wctx)
1622 ms.resolve(f, wctx)
1646
1623
1647 finally:
1624 finally:
1648 ms.commit()
1625 ms.commit()
1649
1626
1650 unresolved = ms.unresolvedcount()
1627 unresolved = ms.unresolvedcount()
1651
1628
1652 if (
1629 if (
1653 usemergedriver
1630 usemergedriver
1654 and not unresolved
1631 and not unresolved
1655 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1632 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1656 ):
1633 ):
1657 if not driverconclude(repo, ms, wctx, labels=labels):
1634 if not driverconclude(repo, ms, wctx, labels=labels):
1658 # XXX setting unresolved to at least 1 is a hack to make sure we
1635 # XXX setting unresolved to at least 1 is a hack to make sure we
1659 # error out
1636 # error out
1660 unresolved = max(unresolved, 1)
1637 unresolved = max(unresolved, 1)
1661
1638
1662 ms.commit()
1639 ms.commit()
1663
1640
1664 msupdated, msmerged, msremoved = ms.counts()
1641 msupdated, msmerged, msremoved = ms.counts()
1665 updated += msupdated
1642 updated += msupdated
1666 merged += msmerged
1643 merged += msmerged
1667 removed += msremoved
1644 removed += msremoved
1668
1645
1669 extraactions = ms.actions()
1646 extraactions = ms.actions()
1670 if extraactions:
1647 if extraactions:
1671 mfiles = {
1648 mfiles = {
1672 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1649 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1673 }
1650 }
1674 for k, acts in pycompat.iteritems(extraactions):
1651 for k, acts in pycompat.iteritems(extraactions):
1675 for a in acts:
1652 for a in acts:
1676 mresult.addfile(a[0], k, *a[1:])
1653 mresult.addfile(a[0], k, *a[1:])
1677 if k == mergestatemod.ACTION_GET and wantfiledata:
1654 if k == mergestatemod.ACTION_GET and wantfiledata:
1678 # no filedata until mergestate is updated to provide it
1655 # no filedata until mergestate is updated to provide it
1679 for a in acts:
1656 for a in acts:
1680 getfiledata[a[0]] = None
1657 getfiledata[a[0]] = None
1681 # Remove these files from actions[ACTION_MERGE] as well. This is
1658 # Remove these files from actions[ACTION_MERGE] as well. This is
1682 # important because in recordupdates, files in actions[ACTION_MERGE]
1659 # important because in recordupdates, files in actions[ACTION_MERGE]
1683 # are processed after files in other actions, and the merge driver
1660 # are processed after files in other actions, and the merge driver
1684 # might add files to those actions via extraactions above. This can
1661 # might add files to those actions via extraactions above. This can
1685 # lead to a file being recorded twice, with poor results. This is
1662 # lead to a file being recorded twice, with poor results. This is
1686 # especially problematic for actions[ACTION_REMOVE] (currently only
1663 # especially problematic for actions[ACTION_REMOVE] (currently only
1687 # possible with the merge driver in the initial merge process;
1664 # possible with the merge driver in the initial merge process;
1688 # interrupted merges don't go through this flow).
1665 # interrupted merges don't go through this flow).
1689 #
1666 #
1690 # The real fix here is to have indexes by both file and action so
1667 # The real fix here is to have indexes by both file and action so
1691 # that when the action for a file is changed it is automatically
1668 # that when the action for a file is changed it is automatically
1692 # reflected in the other action lists. But that involves a more
1669 # reflected in the other action lists. But that involves a more
1693 # complex data structure, so this will do for now.
1670 # complex data structure, so this will do for now.
1694 #
1671 #
1695 # We don't need to do the same operation for 'dc' and 'cd' because
1672 # We don't need to do the same operation for 'dc' and 'cd' because
1696 # those lists aren't consulted again.
1673 # those lists aren't consulted again.
1697 mfiles.difference_update(a[0] for a in acts)
1674 mfiles.difference_update(a[0] for a in acts)
1698
1675
1699 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1676 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1700 if a[0] not in mfiles:
1677 if a[0] not in mfiles:
1701 mresult.removefile(a[0])
1678 mresult.removefile(a[0])
1702
1679
1703 progress.complete()
1680 progress.complete()
1704 assert len(getfiledata) == (
1681 assert len(getfiledata) == (
1705 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1682 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1706 )
1683 )
1707 return updateresult(updated, merged, removed, unresolved), getfiledata
1684 return updateresult(updated, merged, removed, unresolved), getfiledata
1708
1685
1709
1686
1710 def _advertisefsmonitor(repo, num_gets, p1node):
1687 def _advertisefsmonitor(repo, num_gets, p1node):
1711 # Advertise fsmonitor when its presence could be useful.
1688 # Advertise fsmonitor when its presence could be useful.
1712 #
1689 #
1713 # We only advertise when performing an update from an empty working
1690 # We only advertise when performing an update from an empty working
1714 # directory. This typically only occurs during initial clone.
1691 # directory. This typically only occurs during initial clone.
1715 #
1692 #
1716 # We give users a mechanism to disable the warning in case it is
1693 # We give users a mechanism to disable the warning in case it is
1717 # annoying.
1694 # annoying.
1718 #
1695 #
1719 # We only allow on Linux and MacOS because that's where fsmonitor is
1696 # We only allow on Linux and MacOS because that's where fsmonitor is
1720 # considered stable.
1697 # considered stable.
1721 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1698 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1722 fsmonitorthreshold = repo.ui.configint(
1699 fsmonitorthreshold = repo.ui.configint(
1723 b'fsmonitor', b'warn_update_file_count'
1700 b'fsmonitor', b'warn_update_file_count'
1724 )
1701 )
1725 try:
1702 try:
1726 # avoid cycle: extensions -> cmdutil -> merge
1703 # avoid cycle: extensions -> cmdutil -> merge
1727 from . import extensions
1704 from . import extensions
1728
1705
1729 extensions.find(b'fsmonitor')
1706 extensions.find(b'fsmonitor')
1730 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1707 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1731 # We intentionally don't look at whether fsmonitor has disabled
1708 # We intentionally don't look at whether fsmonitor has disabled
1732 # itself because a) fsmonitor may have already printed a warning
1709 # itself because a) fsmonitor may have already printed a warning
1733 # b) we only care about the config state here.
1710 # b) we only care about the config state here.
1734 except KeyError:
1711 except KeyError:
1735 fsmonitorenabled = False
1712 fsmonitorenabled = False
1736
1713
1737 if (
1714 if (
1738 fsmonitorwarning
1715 fsmonitorwarning
1739 and not fsmonitorenabled
1716 and not fsmonitorenabled
1740 and p1node == nullid
1717 and p1node == nullid
1741 and num_gets >= fsmonitorthreshold
1718 and num_gets >= fsmonitorthreshold
1742 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1719 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1743 ):
1720 ):
1744 repo.ui.warn(
1721 repo.ui.warn(
1745 _(
1722 _(
1746 b'(warning: large working directory being used without '
1723 b'(warning: large working directory being used without '
1747 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1724 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1748 b'see "hg help -e fsmonitor")\n'
1725 b'see "hg help -e fsmonitor")\n'
1749 )
1726 )
1750 )
1727 )
1751
1728
1752
1729
1753 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1730 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1754 UPDATECHECK_NONE = b'none'
1731 UPDATECHECK_NONE = b'none'
1755 UPDATECHECK_LINEAR = b'linear'
1732 UPDATECHECK_LINEAR = b'linear'
1756 UPDATECHECK_NO_CONFLICT = b'noconflict'
1733 UPDATECHECK_NO_CONFLICT = b'noconflict'
1757
1734
1758
1735
1759 def update(
1736 def update(
1760 repo,
1737 repo,
1761 node,
1738 node,
1762 branchmerge,
1739 branchmerge,
1763 force,
1740 force,
1764 ancestor=None,
1741 ancestor=None,
1765 mergeancestor=False,
1742 mergeancestor=False,
1766 labels=None,
1743 labels=None,
1767 matcher=None,
1744 matcher=None,
1768 mergeforce=False,
1745 mergeforce=False,
1769 updatedirstate=True,
1746 updatedirstate=True,
1770 updatecheck=None,
1747 updatecheck=None,
1771 wc=None,
1748 wc=None,
1772 ):
1749 ):
1773 """
1750 """
1774 Perform a merge between the working directory and the given node
1751 Perform a merge between the working directory and the given node
1775
1752
1776 node = the node to update to
1753 node = the node to update to
1777 branchmerge = whether to merge between branches
1754 branchmerge = whether to merge between branches
1778 force = whether to force branch merging or file overwriting
1755 force = whether to force branch merging or file overwriting
1779 matcher = a matcher to filter file lists (dirstate not updated)
1756 matcher = a matcher to filter file lists (dirstate not updated)
1780 mergeancestor = whether it is merging with an ancestor. If true,
1757 mergeancestor = whether it is merging with an ancestor. If true,
1781 we should accept the incoming changes for any prompts that occur.
1758 we should accept the incoming changes for any prompts that occur.
1782 If false, merging with an ancestor (fast-forward) is only allowed
1759 If false, merging with an ancestor (fast-forward) is only allowed
1783 between different named branches. This flag is used by rebase extension
1760 between different named branches. This flag is used by rebase extension
1784 as a temporary fix and should be avoided in general.
1761 as a temporary fix and should be avoided in general.
1785 labels = labels to use for base, local and other
1762 labels = labels to use for base, local and other
1786 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1763 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1787 this is True, then 'force' should be True as well.
1764 this is True, then 'force' should be True as well.
1788
1765
1789 The table below shows all the behaviors of the update command given the
1766 The table below shows all the behaviors of the update command given the
1790 -c/--check and -C/--clean or no options, whether the working directory is
1767 -c/--check and -C/--clean or no options, whether the working directory is
1791 dirty, whether a revision is specified, and the relationship of the parent
1768 dirty, whether a revision is specified, and the relationship of the parent
1792 rev to the target rev (linear or not). Match from top first. The -n
1769 rev to the target rev (linear or not). Match from top first. The -n
1793 option doesn't exist on the command line, but represents the
1770 option doesn't exist on the command line, but represents the
1794 experimental.updatecheck=noconflict option.
1771 experimental.updatecheck=noconflict option.
1795
1772
1796 This logic is tested by test-update-branches.t.
1773 This logic is tested by test-update-branches.t.
1797
1774
1798 -c -C -n -m dirty rev linear | result
1775 -c -C -n -m dirty rev linear | result
1799 y y * * * * * | (1)
1776 y y * * * * * | (1)
1800 y * y * * * * | (1)
1777 y * y * * * * | (1)
1801 y * * y * * * | (1)
1778 y * * y * * * | (1)
1802 * y y * * * * | (1)
1779 * y y * * * * | (1)
1803 * y * y * * * | (1)
1780 * y * y * * * | (1)
1804 * * y y * * * | (1)
1781 * * y y * * * | (1)
1805 * * * * * n n | x
1782 * * * * * n n | x
1806 * * * * n * * | ok
1783 * * * * n * * | ok
1807 n n n n y * y | merge
1784 n n n n y * y | merge
1808 n n n n y y n | (2)
1785 n n n n y y n | (2)
1809 n n n y y * * | merge
1786 n n n y y * * | merge
1810 n n y n y * * | merge if no conflict
1787 n n y n y * * | merge if no conflict
1811 n y n n y * * | discard
1788 n y n n y * * | discard
1812 y n n n y * * | (3)
1789 y n n n y * * | (3)
1813
1790
1814 x = can't happen
1791 x = can't happen
1815 * = don't-care
1792 * = don't-care
1816 1 = incompatible options (checked in commands.py)
1793 1 = incompatible options (checked in commands.py)
1817 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1794 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1818 3 = abort: uncommitted changes (checked in commands.py)
1795 3 = abort: uncommitted changes (checked in commands.py)
1819
1796
1820 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1797 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1821 to repo[None] if None is passed.
1798 to repo[None] if None is passed.
1822
1799
1823 Return the same tuple as applyupdates().
1800 Return the same tuple as applyupdates().
1824 """
1801 """
1825 # Avoid cycle.
1802 # Avoid cycle.
1826 from . import sparse
1803 from . import sparse
1827
1804
1828 # This function used to find the default destination if node was None, but
1805 # This function used to find the default destination if node was None, but
1829 # that's now in destutil.py.
1806 # that's now in destutil.py.
1830 assert node is not None
1807 assert node is not None
1831 if not branchmerge and not force:
1808 if not branchmerge and not force:
1832 # TODO: remove the default once all callers that pass branchmerge=False
1809 # TODO: remove the default once all callers that pass branchmerge=False
1833 # and force=False pass a value for updatecheck. We may want to allow
1810 # and force=False pass a value for updatecheck. We may want to allow
1834 # updatecheck='abort' to better suppport some of these callers.
1811 # updatecheck='abort' to better suppport some of these callers.
1835 if updatecheck is None:
1812 if updatecheck is None:
1836 updatecheck = UPDATECHECK_LINEAR
1813 updatecheck = UPDATECHECK_LINEAR
1837 if updatecheck not in (
1814 if updatecheck not in (
1838 UPDATECHECK_NONE,
1815 UPDATECHECK_NONE,
1839 UPDATECHECK_LINEAR,
1816 UPDATECHECK_LINEAR,
1840 UPDATECHECK_NO_CONFLICT,
1817 UPDATECHECK_NO_CONFLICT,
1841 ):
1818 ):
1842 raise ValueError(
1819 raise ValueError(
1843 r'Invalid updatecheck %r (can accept %r)'
1820 r'Invalid updatecheck %r (can accept %r)'
1844 % (
1821 % (
1845 updatecheck,
1822 updatecheck,
1846 (
1823 (
1847 UPDATECHECK_NONE,
1824 UPDATECHECK_NONE,
1848 UPDATECHECK_LINEAR,
1825 UPDATECHECK_LINEAR,
1849 UPDATECHECK_NO_CONFLICT,
1826 UPDATECHECK_NO_CONFLICT,
1850 ),
1827 ),
1851 )
1828 )
1852 )
1829 )
1853 if wc is not None and wc.isinmemory():
1830 if wc is not None and wc.isinmemory():
1854 maybe_wlock = util.nullcontextmanager()
1831 maybe_wlock = util.nullcontextmanager()
1855 else:
1832 else:
1856 maybe_wlock = repo.wlock()
1833 maybe_wlock = repo.wlock()
1857 with maybe_wlock:
1834 with maybe_wlock:
1858 if wc is None:
1835 if wc is None:
1859 wc = repo[None]
1836 wc = repo[None]
1860 pl = wc.parents()
1837 pl = wc.parents()
1861 p1 = pl[0]
1838 p1 = pl[0]
1862 p2 = repo[node]
1839 p2 = repo[node]
1863 if ancestor is not None:
1840 if ancestor is not None:
1864 pas = [repo[ancestor]]
1841 pas = [repo[ancestor]]
1865 else:
1842 else:
1866 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1843 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1867 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1844 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1868 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1845 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1869 else:
1846 else:
1870 pas = [p1.ancestor(p2, warn=branchmerge)]
1847 pas = [p1.ancestor(p2, warn=branchmerge)]
1871
1848
1872 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1849 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1873
1850
1874 overwrite = force and not branchmerge
1851 overwrite = force and not branchmerge
1875 ### check phase
1852 ### check phase
1876 if not overwrite:
1853 if not overwrite:
1877 if len(pl) > 1:
1854 if len(pl) > 1:
1878 raise error.Abort(_(b"outstanding uncommitted merge"))
1855 raise error.Abort(_(b"outstanding uncommitted merge"))
1879 ms = mergestatemod.mergestate.read(repo)
1856 ms = mergestatemod.mergestate.read(repo)
1880 if list(ms.unresolved()):
1857 if list(ms.unresolved()):
1881 raise error.Abort(
1858 raise error.Abort(
1882 _(b"outstanding merge conflicts"),
1859 _(b"outstanding merge conflicts"),
1883 hint=_(b"use 'hg resolve' to resolve"),
1860 hint=_(b"use 'hg resolve' to resolve"),
1884 )
1861 )
1885 if branchmerge:
1862 if branchmerge:
1886 if pas == [p2]:
1863 if pas == [p2]:
1887 raise error.Abort(
1864 raise error.Abort(
1888 _(
1865 _(
1889 b"merging with a working directory ancestor"
1866 b"merging with a working directory ancestor"
1890 b" has no effect"
1867 b" has no effect"
1891 )
1868 )
1892 )
1869 )
1893 elif pas == [p1]:
1870 elif pas == [p1]:
1894 if not mergeancestor and wc.branch() == p2.branch():
1871 if not mergeancestor and wc.branch() == p2.branch():
1895 raise error.Abort(
1872 raise error.Abort(
1896 _(b"nothing to merge"),
1873 _(b"nothing to merge"),
1897 hint=_(b"use 'hg update' or check 'hg heads'"),
1874 hint=_(b"use 'hg update' or check 'hg heads'"),
1898 )
1875 )
1899 if not force and (wc.files() or wc.deleted()):
1876 if not force and (wc.files() or wc.deleted()):
1900 raise error.Abort(
1877 raise error.Abort(
1901 _(b"uncommitted changes"),
1878 _(b"uncommitted changes"),
1902 hint=_(b"use 'hg status' to list changes"),
1879 hint=_(b"use 'hg status' to list changes"),
1903 )
1880 )
1904 if not wc.isinmemory():
1881 if not wc.isinmemory():
1905 for s in sorted(wc.substate):
1882 for s in sorted(wc.substate):
1906 wc.sub(s).bailifchanged()
1883 wc.sub(s).bailifchanged()
1907
1884
1908 elif not overwrite:
1885 elif not overwrite:
1909 if p1 == p2: # no-op update
1886 if p1 == p2: # no-op update
1910 # call the hooks and exit early
1887 # call the hooks and exit early
1911 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1888 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1912 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1889 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1913 return updateresult(0, 0, 0, 0)
1890 return updateresult(0, 0, 0, 0)
1914
1891
1915 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1892 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1916 [p1],
1893 [p1],
1917 [p2],
1894 [p2],
1918 ): # nonlinear
1895 ): # nonlinear
1919 dirty = wc.dirty(missing=True)
1896 dirty = wc.dirty(missing=True)
1920 if dirty:
1897 if dirty:
1921 # Branching is a bit strange to ensure we do the minimal
1898 # Branching is a bit strange to ensure we do the minimal
1922 # amount of call to obsutil.foreground.
1899 # amount of call to obsutil.foreground.
1923 foreground = obsutil.foreground(repo, [p1.node()])
1900 foreground = obsutil.foreground(repo, [p1.node()])
1924 # note: the <node> variable contains a random identifier
1901 # note: the <node> variable contains a random identifier
1925 if repo[node].node() in foreground:
1902 if repo[node].node() in foreground:
1926 pass # allow updating to successors
1903 pass # allow updating to successors
1927 else:
1904 else:
1928 msg = _(b"uncommitted changes")
1905 msg = _(b"uncommitted changes")
1929 hint = _(b"commit or update --clean to discard changes")
1906 hint = _(b"commit or update --clean to discard changes")
1930 raise error.UpdateAbort(msg, hint=hint)
1907 raise error.UpdateAbort(msg, hint=hint)
1931 else:
1908 else:
1932 # Allow jumping branches if clean and specific rev given
1909 # Allow jumping branches if clean and specific rev given
1933 pass
1910 pass
1934
1911
1935 if overwrite:
1912 if overwrite:
1936 pas = [wc]
1913 pas = [wc]
1937 elif not branchmerge:
1914 elif not branchmerge:
1938 pas = [p1]
1915 pas = [p1]
1939
1916
1940 # deprecated config: merge.followcopies
1917 # deprecated config: merge.followcopies
1941 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1918 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1942 if overwrite:
1919 if overwrite:
1943 followcopies = False
1920 followcopies = False
1944 elif not pas[0]:
1921 elif not pas[0]:
1945 followcopies = False
1922 followcopies = False
1946 if not branchmerge and not wc.dirty(missing=True):
1923 if not branchmerge and not wc.dirty(missing=True):
1947 followcopies = False
1924 followcopies = False
1948
1925
1949 ### calculate phase
1926 ### calculate phase
1950 mresult = calculateupdates(
1927 mresult = calculateupdates(
1951 repo,
1928 repo,
1952 wc,
1929 wc,
1953 p2,
1930 p2,
1954 pas,
1931 pas,
1955 branchmerge,
1932 branchmerge,
1956 force,
1933 force,
1957 mergeancestor,
1934 mergeancestor,
1958 followcopies,
1935 followcopies,
1959 matcher=matcher,
1936 matcher=matcher,
1960 mergeforce=mergeforce,
1937 mergeforce=mergeforce,
1961 )
1938 )
1962
1939
1963 if updatecheck == UPDATECHECK_NO_CONFLICT:
1940 if updatecheck == UPDATECHECK_NO_CONFLICT:
1964 if mresult.hasconflicts():
1941 if mresult.hasconflicts():
1965 msg = _(b"conflicting changes")
1942 msg = _(b"conflicting changes")
1966 hint = _(b"commit or update --clean to discard changes")
1943 hint = _(b"commit or update --clean to discard changes")
1967 raise error.Abort(msg, hint=hint)
1944 raise error.Abort(msg, hint=hint)
1968
1945
1969 # Prompt and create actions. Most of this is in the resolve phase
1946 # Prompt and create actions. Most of this is in the resolve phase
1970 # already, but we can't handle .hgsubstate in filemerge or
1947 # already, but we can't handle .hgsubstate in filemerge or
1971 # subrepoutil.submerge yet so we have to keep prompting for it.
1948 # subrepoutil.submerge yet so we have to keep prompting for it.
1972 vals = mresult.getfile(b'.hgsubstate')
1949 vals = mresult.getfile(b'.hgsubstate')
1973 if vals:
1950 if vals:
1974 f = b'.hgsubstate'
1951 f = b'.hgsubstate'
1975 m, args, msg = vals
1952 m, args, msg = vals
1976 prompts = filemerge.partextras(labels)
1953 prompts = filemerge.partextras(labels)
1977 prompts[b'f'] = f
1954 prompts[b'f'] = f
1978 if m == mergestatemod.ACTION_CHANGED_DELETED:
1955 if m == mergestatemod.ACTION_CHANGED_DELETED:
1979 if repo.ui.promptchoice(
1956 if repo.ui.promptchoice(
1980 _(
1957 _(
1981 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1958 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1982 b"use (c)hanged version or (d)elete?"
1959 b"use (c)hanged version or (d)elete?"
1983 b"$$ &Changed $$ &Delete"
1960 b"$$ &Changed $$ &Delete"
1984 )
1961 )
1985 % prompts,
1962 % prompts,
1986 0,
1963 0,
1987 ):
1964 ):
1988 mresult.addfile(
1965 mresult.addfile(
1989 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1966 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1990 )
1967 )
1991 elif f in p1:
1968 elif f in p1:
1992 mresult.addfile(
1969 mresult.addfile(
1993 f,
1970 f,
1994 mergestatemod.ACTION_ADD_MODIFIED,
1971 mergestatemod.ACTION_ADD_MODIFIED,
1995 None,
1972 None,
1996 b'prompt keep',
1973 b'prompt keep',
1997 )
1974 )
1998 else:
1975 else:
1999 mresult.addfile(
1976 mresult.addfile(
2000 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
1977 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
2001 )
1978 )
2002 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1979 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2003 f1, f2, fa, move, anc = args
1980 f1, f2, fa, move, anc = args
2004 flags = p2[f2].flags()
1981 flags = p2[f2].flags()
2005 if (
1982 if (
2006 repo.ui.promptchoice(
1983 repo.ui.promptchoice(
2007 _(
1984 _(
2008 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1985 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2009 b"use (c)hanged version or leave (d)eleted?"
1986 b"use (c)hanged version or leave (d)eleted?"
2010 b"$$ &Changed $$ &Deleted"
1987 b"$$ &Changed $$ &Deleted"
2011 )
1988 )
2012 % prompts,
1989 % prompts,
2013 0,
1990 0,
2014 )
1991 )
2015 == 0
1992 == 0
2016 ):
1993 ):
2017 mresult.addfile(
1994 mresult.addfile(
2018 f,
1995 f,
2019 mergestatemod.ACTION_GET,
1996 mergestatemod.ACTION_GET,
2020 (flags, False),
1997 (flags, False),
2021 b'prompt recreating',
1998 b'prompt recreating',
2022 )
1999 )
2023 else:
2000 else:
2024 mresult.removefile(f)
2001 mresult.removefile(f)
2025
2002
2026 if not util.fscasesensitive(repo.path):
2003 if not util.fscasesensitive(repo.path):
2027 # check collision between files only in p2 for clean update
2004 # check collision between files only in p2 for clean update
2028 if not branchmerge and (
2005 if not branchmerge and (
2029 force or not wc.dirty(missing=True, branch=False)
2006 force or not wc.dirty(missing=True, branch=False)
2030 ):
2007 ):
2031 _checkcollision(repo, p2.manifest(), None)
2008 _checkcollision(repo, p2.manifest(), None)
2032 else:
2009 else:
2033 _checkcollision(repo, wc.manifest(), mresult)
2010 _checkcollision(repo, wc.manifest(), mresult)
2034
2011
2035 # divergent renames
2012 # divergent renames
2036 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2013 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2037 repo.ui.warn(
2014 repo.ui.warn(
2038 _(
2015 _(
2039 b"note: possible conflict - %s was renamed "
2016 b"note: possible conflict - %s was renamed "
2040 b"multiple times to:\n"
2017 b"multiple times to:\n"
2041 )
2018 )
2042 % f
2019 % f
2043 )
2020 )
2044 for nf in sorted(fl):
2021 for nf in sorted(fl):
2045 repo.ui.warn(b" %s\n" % nf)
2022 repo.ui.warn(b" %s\n" % nf)
2046
2023
2047 # rename and delete
2024 # rename and delete
2048 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2025 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2049 repo.ui.warn(
2026 repo.ui.warn(
2050 _(
2027 _(
2051 b"note: possible conflict - %s was deleted "
2028 b"note: possible conflict - %s was deleted "
2052 b"and renamed to:\n"
2029 b"and renamed to:\n"
2053 )
2030 )
2054 % f
2031 % f
2055 )
2032 )
2056 for nf in sorted(fl):
2033 for nf in sorted(fl):
2057 repo.ui.warn(b" %s\n" % nf)
2034 repo.ui.warn(b" %s\n" % nf)
2058
2035
2059 ### apply phase
2036 ### apply phase
2060 if not branchmerge: # just jump to the new rev
2037 if not branchmerge: # just jump to the new rev
2061 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2038 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2062 # If we're doing a partial update, we need to skip updating
2039 # If we're doing a partial update, we need to skip updating
2063 # the dirstate.
2040 # the dirstate.
2064 always = matcher is None or matcher.always()
2041 always = matcher is None or matcher.always()
2065 updatedirstate = updatedirstate and always and not wc.isinmemory()
2042 updatedirstate = updatedirstate and always and not wc.isinmemory()
2066 if updatedirstate:
2043 if updatedirstate:
2067 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2044 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2068 # note that we're in the middle of an update
2045 # note that we're in the middle of an update
2069 repo.vfs.write(b'updatestate', p2.hex())
2046 repo.vfs.write(b'updatestate', p2.hex())
2070
2047
2071 _advertisefsmonitor(
2048 _advertisefsmonitor(
2072 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2049 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2073 )
2050 )
2074
2051
2075 wantfiledata = updatedirstate and not branchmerge
2052 wantfiledata = updatedirstate and not branchmerge
2076 stats, getfiledata = applyupdates(
2053 stats, getfiledata = applyupdates(
2077 repo,
2054 repo,
2078 mresult,
2055 mresult,
2079 wc,
2056 wc,
2080 p2,
2057 p2,
2081 overwrite,
2058 overwrite,
2082 wantfiledata,
2059 wantfiledata,
2083 labels=labels,
2060 labels=labels,
2084 commitinfo=mresult.commitinfo,
2061 commitinfo=mresult.commitinfo,
2085 )
2062 )
2086
2063
2087 if updatedirstate:
2064 if updatedirstate:
2088 with repo.dirstate.parentchange():
2065 with repo.dirstate.parentchange():
2089 repo.setparents(fp1, fp2)
2066 repo.setparents(fp1, fp2)
2090 mergestatemod.recordupdates(
2067 mergestatemod.recordupdates(
2091 repo, mresult.actionsdict, branchmerge, getfiledata
2068 repo, mresult.actionsdict, branchmerge, getfiledata
2092 )
2069 )
2093 # update completed, clear state
2070 # update completed, clear state
2094 util.unlink(repo.vfs.join(b'updatestate'))
2071 util.unlink(repo.vfs.join(b'updatestate'))
2095
2072
2096 if not branchmerge:
2073 if not branchmerge:
2097 repo.dirstate.setbranch(p2.branch())
2074 repo.dirstate.setbranch(p2.branch())
2098
2075
2099 # If we're updating to a location, clean up any stale temporary includes
2076 # If we're updating to a location, clean up any stale temporary includes
2100 # (ex: this happens during hg rebase --abort).
2077 # (ex: this happens during hg rebase --abort).
2101 if not branchmerge:
2078 if not branchmerge:
2102 sparse.prunetemporaryincludes(repo)
2079 sparse.prunetemporaryincludes(repo)
2103
2080
2104 if updatedirstate:
2081 if updatedirstate:
2105 repo.hook(
2082 repo.hook(
2106 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2083 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2107 )
2084 )
2108 return stats
2085 return stats
2109
2086
2110
2087
2111 def merge(ctx, labels=None, force=False, wc=None):
2088 def merge(ctx, labels=None, force=False, wc=None):
2112 """Merge another topological branch into the working copy.
2089 """Merge another topological branch into the working copy.
2113
2090
2114 force = whether the merge was run with 'merge --force' (deprecated)
2091 force = whether the merge was run with 'merge --force' (deprecated)
2115 """
2092 """
2116
2093
2117 return update(
2094 return update(
2118 ctx.repo(),
2095 ctx.repo(),
2119 ctx.rev(),
2096 ctx.rev(),
2120 labels=labels,
2097 labels=labels,
2121 branchmerge=True,
2098 branchmerge=True,
2122 force=force,
2099 force=force,
2123 mergeforce=force,
2100 mergeforce=force,
2124 wc=wc,
2101 wc=wc,
2125 )
2102 )
2126
2103
2127
2104
2128 def clean_update(ctx, wc=None):
2105 def clean_update(ctx, wc=None):
2129 """Do a clean update to the given commit.
2106 """Do a clean update to the given commit.
2130
2107
2131 This involves updating to the commit and discarding any changes in the
2108 This involves updating to the commit and discarding any changes in the
2132 working copy.
2109 working copy.
2133 """
2110 """
2134 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2111 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2135
2112
2136
2113
2137 def revert_to(ctx, matcher=None, wc=None):
2114 def revert_to(ctx, matcher=None, wc=None):
2138 """Revert the working copy to the given commit.
2115 """Revert the working copy to the given commit.
2139
2116
2140 The working copy will keep its current parent(s) but its content will
2117 The working copy will keep its current parent(s) but its content will
2141 be the same as in the given commit.
2118 be the same as in the given commit.
2142 """
2119 """
2143
2120
2144 return update(
2121 return update(
2145 ctx.repo(),
2122 ctx.repo(),
2146 ctx.rev(),
2123 ctx.rev(),
2147 branchmerge=False,
2124 branchmerge=False,
2148 force=True,
2125 force=True,
2149 updatedirstate=False,
2126 updatedirstate=False,
2150 matcher=matcher,
2127 matcher=matcher,
2151 wc=wc,
2128 wc=wc,
2152 )
2129 )
2153
2130
2154
2131
2155 def graft(
2132 def graft(
2156 repo,
2133 repo,
2157 ctx,
2134 ctx,
2158 base=None,
2135 base=None,
2159 labels=None,
2136 labels=None,
2160 keepparent=False,
2137 keepparent=False,
2161 keepconflictparent=False,
2138 keepconflictparent=False,
2162 wctx=None,
2139 wctx=None,
2163 ):
2140 ):
2164 """Do a graft-like merge.
2141 """Do a graft-like merge.
2165
2142
2166 This is a merge where the merge ancestor is chosen such that one
2143 This is a merge where the merge ancestor is chosen such that one
2167 or more changesets are grafted onto the current changeset. In
2144 or more changesets are grafted onto the current changeset. In
2168 addition to the merge, this fixes up the dirstate to include only
2145 addition to the merge, this fixes up the dirstate to include only
2169 a single parent (if keepparent is False) and tries to duplicate any
2146 a single parent (if keepparent is False) and tries to duplicate any
2170 renames/copies appropriately.
2147 renames/copies appropriately.
2171
2148
2172 ctx - changeset to rebase
2149 ctx - changeset to rebase
2173 base - merge base, or ctx.p1() if not specified
2150 base - merge base, or ctx.p1() if not specified
2174 labels - merge labels eg ['local', 'graft']
2151 labels - merge labels eg ['local', 'graft']
2175 keepparent - keep second parent if any
2152 keepparent - keep second parent if any
2176 keepconflictparent - if unresolved, keep parent used for the merge
2153 keepconflictparent - if unresolved, keep parent used for the merge
2177
2154
2178 """
2155 """
2179 # If we're grafting a descendant onto an ancestor, be sure to pass
2156 # If we're grafting a descendant onto an ancestor, be sure to pass
2180 # mergeancestor=True to update. This does two things: 1) allows the merge if
2157 # mergeancestor=True to update. This does two things: 1) allows the merge if
2181 # the destination is the same as the parent of the ctx (so we can use graft
2158 # the destination is the same as the parent of the ctx (so we can use graft
2182 # to copy commits), and 2) informs update that the incoming changes are
2159 # to copy commits), and 2) informs update that the incoming changes are
2183 # newer than the destination so it doesn't prompt about "remote changed foo
2160 # newer than the destination so it doesn't prompt about "remote changed foo
2184 # which local deleted".
2161 # which local deleted".
2185 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2162 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2186 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2163 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2187 wctx = wctx or repo[None]
2164 wctx = wctx or repo[None]
2188 pctx = wctx.p1()
2165 pctx = wctx.p1()
2189 base = base or ctx.p1()
2166 base = base or ctx.p1()
2190 mergeancestor = (
2167 mergeancestor = (
2191 repo.changelog.isancestor(pctx.node(), ctx.node())
2168 repo.changelog.isancestor(pctx.node(), ctx.node())
2192 or pctx.rev() == base.rev()
2169 or pctx.rev() == base.rev()
2193 )
2170 )
2194
2171
2195 stats = update(
2172 stats = update(
2196 repo,
2173 repo,
2197 ctx.node(),
2174 ctx.node(),
2198 True,
2175 True,
2199 True,
2176 True,
2200 base.node(),
2177 base.node(),
2201 mergeancestor=mergeancestor,
2178 mergeancestor=mergeancestor,
2202 labels=labels,
2179 labels=labels,
2203 wc=wctx,
2180 wc=wctx,
2204 )
2181 )
2205
2182
2206 if keepconflictparent and stats.unresolvedcount:
2183 if keepconflictparent and stats.unresolvedcount:
2207 pother = ctx.node()
2184 pother = ctx.node()
2208 else:
2185 else:
2209 pother = nullid
2186 pother = nullid
2210 parents = ctx.parents()
2187 parents = ctx.parents()
2211 if keepparent and len(parents) == 2 and base in parents:
2188 if keepparent and len(parents) == 2 and base in parents:
2212 parents.remove(base)
2189 parents.remove(base)
2213 pother = parents[0].node()
2190 pother = parents[0].node()
2214 # Never set both parents equal to each other
2191 # Never set both parents equal to each other
2215 if pother == pctx.node():
2192 if pother == pctx.node():
2216 pother = nullid
2193 pother = nullid
2217
2194
2218 if wctx.isinmemory():
2195 if wctx.isinmemory():
2219 wctx.setparents(pctx.node(), pother)
2196 wctx.setparents(pctx.node(), pother)
2220 # fix up dirstate for copies and renames
2197 # fix up dirstate for copies and renames
2221 copies.graftcopies(wctx, ctx, base)
2198 copies.graftcopies(wctx, ctx, base)
2222 else:
2199 else:
2223 with repo.dirstate.parentchange():
2200 with repo.dirstate.parentchange():
2224 repo.setparents(pctx.node(), pother)
2201 repo.setparents(pctx.node(), pother)
2225 repo.dirstate.write(repo.currenttransaction())
2202 repo.dirstate.write(repo.currenttransaction())
2226 # fix up dirstate for copies and renames
2203 # fix up dirstate for copies and renames
2227 copies.graftcopies(wctx, ctx, base)
2204 copies.graftcopies(wctx, ctx, base)
2228 return stats
2205 return stats
2229
2206
2230
2207
2231 def purge(
2208 def purge(
2232 repo,
2209 repo,
2233 matcher,
2210 matcher,
2234 unknown=True,
2211 unknown=True,
2235 ignored=False,
2212 ignored=False,
2236 removeemptydirs=True,
2213 removeemptydirs=True,
2237 removefiles=True,
2214 removefiles=True,
2238 abortonerror=False,
2215 abortonerror=False,
2239 noop=False,
2216 noop=False,
2240 ):
2217 ):
2241 """Purge the working directory of untracked files.
2218 """Purge the working directory of untracked files.
2242
2219
2243 ``matcher`` is a matcher configured to scan the working directory -
2220 ``matcher`` is a matcher configured to scan the working directory -
2244 potentially a subset.
2221 potentially a subset.
2245
2222
2246 ``unknown`` controls whether unknown files should be purged.
2223 ``unknown`` controls whether unknown files should be purged.
2247
2224
2248 ``ignored`` controls whether ignored files should be purged.
2225 ``ignored`` controls whether ignored files should be purged.
2249
2226
2250 ``removeemptydirs`` controls whether empty directories should be removed.
2227 ``removeemptydirs`` controls whether empty directories should be removed.
2251
2228
2252 ``removefiles`` controls whether files are removed.
2229 ``removefiles`` controls whether files are removed.
2253
2230
2254 ``abortonerror`` causes an exception to be raised if an error occurs
2231 ``abortonerror`` causes an exception to be raised if an error occurs
2255 deleting a file or directory.
2232 deleting a file or directory.
2256
2233
2257 ``noop`` controls whether to actually remove files. If not defined, actions
2234 ``noop`` controls whether to actually remove files. If not defined, actions
2258 will be taken.
2235 will be taken.
2259
2236
2260 Returns an iterable of relative paths in the working directory that were
2237 Returns an iterable of relative paths in the working directory that were
2261 or would be removed.
2238 or would be removed.
2262 """
2239 """
2263
2240
2264 def remove(removefn, path):
2241 def remove(removefn, path):
2265 try:
2242 try:
2266 removefn(path)
2243 removefn(path)
2267 except OSError:
2244 except OSError:
2268 m = _(b'%s cannot be removed') % path
2245 m = _(b'%s cannot be removed') % path
2269 if abortonerror:
2246 if abortonerror:
2270 raise error.Abort(m)
2247 raise error.Abort(m)
2271 else:
2248 else:
2272 repo.ui.warn(_(b'warning: %s\n') % m)
2249 repo.ui.warn(_(b'warning: %s\n') % m)
2273
2250
2274 # There's no API to copy a matcher. So mutate the passed matcher and
2251 # There's no API to copy a matcher. So mutate the passed matcher and
2275 # restore it when we're done.
2252 # restore it when we're done.
2276 oldtraversedir = matcher.traversedir
2253 oldtraversedir = matcher.traversedir
2277
2254
2278 res = []
2255 res = []
2279
2256
2280 try:
2257 try:
2281 if removeemptydirs:
2258 if removeemptydirs:
2282 directories = []
2259 directories = []
2283 matcher.traversedir = directories.append
2260 matcher.traversedir = directories.append
2284
2261
2285 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2262 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2286
2263
2287 if removefiles:
2264 if removefiles:
2288 for f in sorted(status.unknown + status.ignored):
2265 for f in sorted(status.unknown + status.ignored):
2289 if not noop:
2266 if not noop:
2290 repo.ui.note(_(b'removing file %s\n') % f)
2267 repo.ui.note(_(b'removing file %s\n') % f)
2291 remove(repo.wvfs.unlink, f)
2268 remove(repo.wvfs.unlink, f)
2292 res.append(f)
2269 res.append(f)
2293
2270
2294 if removeemptydirs:
2271 if removeemptydirs:
2295 for f in sorted(directories, reverse=True):
2272 for f in sorted(directories, reverse=True):
2296 if matcher(f) and not repo.wvfs.listdir(f):
2273 if matcher(f) and not repo.wvfs.listdir(f):
2297 if not noop:
2274 if not noop:
2298 repo.ui.note(_(b'removing directory %s\n') % f)
2275 repo.ui.note(_(b'removing directory %s\n') % f)
2299 remove(repo.wvfs.rmdir, f)
2276 remove(repo.wvfs.rmdir, f)
2300 res.append(f)
2277 res.append(f)
2301
2278
2302 return res
2279 return res
2303
2280
2304 finally:
2281 finally:
2305 matcher.traversedir = oldtraversedir
2282 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now