##// END OF EJS Templates
large-files: use `running_status` in `mergeupdate`
marmoute -
r51036:42288fa0 default
parent child Browse files
Show More
@@ -1,1904 +1,1904 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 dirstate,
25 dirstate,
26 error,
26 error,
27 exchange,
27 exchange,
28 extensions,
28 extensions,
29 exthelper,
29 exthelper,
30 filemerge,
30 filemerge,
31 hg,
31 hg,
32 logcmdutil,
32 logcmdutil,
33 match as matchmod,
33 match as matchmod,
34 merge,
34 merge,
35 mergestate as mergestatemod,
35 mergestate as mergestatemod,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 smartset,
39 smartset,
40 subrepo,
40 subrepo,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from mercurial.upgrade_utils import (
45 from mercurial.upgrade_utils import (
46 actions as upgrade_actions,
46 actions as upgrade_actions,
47 )
47 )
48
48
49 from . import (
49 from . import (
50 lfcommands,
50 lfcommands,
51 lfutil,
51 lfutil,
52 storefactory,
52 storefactory,
53 )
53 )
54
54
55 ACTION_ADD = mergestatemod.ACTION_ADD
55 ACTION_ADD = mergestatemod.ACTION_ADD
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
57 ACTION_GET = mergestatemod.ACTION_GET
57 ACTION_GET = mergestatemod.ACTION_GET
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
60
60
61 eh = exthelper.exthelper()
61 eh = exthelper.exthelper()
62
62
63 lfstatus = lfutil.lfstatus
63 lfstatus = lfutil.lfstatus
64
64
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
66
66
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
68
68
69
69
70 def composelargefilematcher(match, manifest):
70 def composelargefilematcher(match, manifest):
71 """create a matcher that matches only the largefiles in the original
71 """create a matcher that matches only the largefiles in the original
72 matcher"""
72 matcher"""
73 m = copy.copy(match)
73 m = copy.copy(match)
74 lfile = lambda f: lfutil.standin(f) in manifest
74 lfile = lambda f: lfutil.standin(f) in manifest
75 m._files = [lf for lf in m._files if lfile(lf)]
75 m._files = [lf for lf in m._files if lfile(lf)]
76 m._fileset = set(m._files)
76 m._fileset = set(m._files)
77 m.always = lambda: False
77 m.always = lambda: False
78 origmatchfn = m.matchfn
78 origmatchfn = m.matchfn
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
80 return m
80 return m
81
81
82
82
83 def composenormalfilematcher(match, manifest, exclude=None):
83 def composenormalfilematcher(match, manifest, exclude=None):
84 excluded = set()
84 excluded = set()
85 if exclude is not None:
85 if exclude is not None:
86 excluded.update(exclude)
86 excluded.update(exclude)
87
87
88 m = copy.copy(match)
88 m = copy.copy(match)
89 notlfile = lambda f: not (
89 notlfile = lambda f: not (
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
91 )
91 )
92 m._files = [lf for lf in m._files if notlfile(lf)]
92 m._files = [lf for lf in m._files if notlfile(lf)]
93 m._fileset = set(m._files)
93 m._fileset = set(m._files)
94 m.always = lambda: False
94 m.always = lambda: False
95 origmatchfn = m.matchfn
95 origmatchfn = m.matchfn
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
97 return m
97 return m
98
98
99
99
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
101 large = opts.get('large')
101 large = opts.get('large')
102 lfsize = lfutil.getminsize(
102 lfsize = lfutil.getminsize(
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
104 )
104 )
105
105
106 lfmatcher = None
106 lfmatcher = None
107 if lfutil.islfilesrepo(repo):
107 if lfutil.islfilesrepo(repo):
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
109 if lfpats:
109 if lfpats:
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
111
111
112 lfnames = []
112 lfnames = []
113 m = matcher
113 m = matcher
114
114
115 wctx = repo[None]
115 wctx = repo[None]
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
117 exact = m.exact(f)
117 exact = m.exact(f)
118 lfile = lfutil.standin(f) in wctx
118 lfile = lfutil.standin(f) in wctx
119 nfile = f in wctx
119 nfile = f in wctx
120 exists = lfile or nfile
120 exists = lfile or nfile
121
121
122 # Don't warn the user when they attempt to add a normal tracked file.
122 # Don't warn the user when they attempt to add a normal tracked file.
123 # The normal add code will do that for us.
123 # The normal add code will do that for us.
124 if exact and exists:
124 if exact and exists:
125 if lfile:
125 if lfile:
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
127 continue
127 continue
128
128
129 if (exact or not exists) and not lfutil.isstandin(f):
129 if (exact or not exists) and not lfutil.isstandin(f):
130 # In case the file was removed previously, but not committed
130 # In case the file was removed previously, but not committed
131 # (issue3507)
131 # (issue3507)
132 if not repo.wvfs.exists(f):
132 if not repo.wvfs.exists(f):
133 continue
133 continue
134
134
135 abovemin = (
135 abovemin = (
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
137 )
137 )
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
139 lfnames.append(f)
139 lfnames.append(f)
140 if ui.verbose or not exact:
140 if ui.verbose or not exact:
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
142
142
143 bad = []
143 bad = []
144
144
145 # Need to lock, otherwise there could be a race condition between
145 # Need to lock, otherwise there could be a race condition between
146 # when standins are created and added to the repo.
146 # when standins are created and added to the repo.
147 with repo.wlock():
147 with repo.wlock():
148 if not opts.get('dry_run'):
148 if not opts.get('dry_run'):
149 standins = []
149 standins = []
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
151 for f in lfnames:
151 for f in lfnames:
152 standinname = lfutil.standin(f)
152 standinname = lfutil.standin(f)
153 lfutil.writestandin(
153 lfutil.writestandin(
154 repo,
154 repo,
155 standinname,
155 standinname,
156 hash=b'',
156 hash=b'',
157 executable=lfutil.getexecutable(repo.wjoin(f)),
157 executable=lfutil.getexecutable(repo.wjoin(f)),
158 )
158 )
159 standins.append(standinname)
159 standins.append(standinname)
160 lfdirstate.set_tracked(f)
160 lfdirstate.set_tracked(f)
161 lfdirstate.write(repo.currenttransaction())
161 lfdirstate.write(repo.currenttransaction())
162 bad += [
162 bad += [
163 lfutil.splitstandin(f)
163 lfutil.splitstandin(f)
164 for f in repo[None].add(standins)
164 for f in repo[None].add(standins)
165 if f in m.files()
165 if f in m.files()
166 ]
166 ]
167
167
168 added = [f for f in lfnames if f not in bad]
168 added = [f for f in lfnames if f not in bad]
169 return added, bad
169 return added, bad
170
170
171
171
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
173 after = opts.get('after')
173 after = opts.get('after')
174 m = composelargefilematcher(matcher, repo[None].manifest())
174 m = composelargefilematcher(matcher, repo[None].manifest())
175 with lfstatus(repo):
175 with lfstatus(repo):
176 s = repo.status(match=m, clean=not isaddremove)
176 s = repo.status(match=m, clean=not isaddremove)
177 manifest = repo[None].manifest()
177 manifest = repo[None].manifest()
178 modified, added, deleted, clean = [
178 modified, added, deleted, clean = [
179 [f for f in list if lfutil.standin(f) in manifest]
179 [f for f in list if lfutil.standin(f) in manifest]
180 for list in (s.modified, s.added, s.deleted, s.clean)
180 for list in (s.modified, s.added, s.deleted, s.clean)
181 ]
181 ]
182
182
183 def warn(files, msg):
183 def warn(files, msg):
184 for f in files:
184 for f in files:
185 ui.warn(msg % uipathfn(f))
185 ui.warn(msg % uipathfn(f))
186 return int(len(files) > 0)
186 return int(len(files) > 0)
187
187
188 if after:
188 if after:
189 remove = deleted
189 remove = deleted
190 result = warn(
190 result = warn(
191 modified + added + clean, _(b'not removing %s: file still exists\n')
191 modified + added + clean, _(b'not removing %s: file still exists\n')
192 )
192 )
193 else:
193 else:
194 remove = deleted + clean
194 remove = deleted + clean
195 result = warn(
195 result = warn(
196 modified,
196 modified,
197 _(
197 _(
198 b'not removing %s: file is modified (use -f'
198 b'not removing %s: file is modified (use -f'
199 b' to force removal)\n'
199 b' to force removal)\n'
200 ),
200 ),
201 )
201 )
202 result = (
202 result = (
203 warn(
203 warn(
204 added,
204 added,
205 _(
205 _(
206 b'not removing %s: file has been marked for add'
206 b'not removing %s: file has been marked for add'
207 b' (use forget to undo)\n'
207 b' (use forget to undo)\n'
208 ),
208 ),
209 )
209 )
210 or result
210 or result
211 )
211 )
212
212
213 # Need to lock because standin files are deleted then removed from the
213 # Need to lock because standin files are deleted then removed from the
214 # repository and we could race in-between.
214 # repository and we could race in-between.
215 with repo.wlock():
215 with repo.wlock():
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
217 for f in sorted(remove):
217 for f in sorted(remove):
218 if ui.verbose or not m.exact(f):
218 if ui.verbose or not m.exact(f):
219 ui.status(_(b'removing %s\n') % uipathfn(f))
219 ui.status(_(b'removing %s\n') % uipathfn(f))
220
220
221 if not dryrun:
221 if not dryrun:
222 if not after:
222 if not after:
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
224
224
225 if dryrun:
225 if dryrun:
226 return result
226 return result
227
227
228 remove = [lfutil.standin(f) for f in remove]
228 remove = [lfutil.standin(f) for f in remove]
229 # If this is being called by addremove, let the original addremove
229 # If this is being called by addremove, let the original addremove
230 # function handle this.
230 # function handle this.
231 if not isaddremove:
231 if not isaddremove:
232 for f in remove:
232 for f in remove:
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
234 repo[None].forget(remove)
234 repo[None].forget(remove)
235
235
236 for f in remove:
236 for f in remove:
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
238
238
239 lfdirstate.write(repo.currenttransaction())
239 lfdirstate.write(repo.currenttransaction())
240
240
241 return result
241 return result
242
242
243
243
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
245 # appear at their right place in the manifests.
245 # appear at their right place in the manifests.
246 @eh.wrapfunction(webcommands, b'decodepath')
246 @eh.wrapfunction(webcommands, b'decodepath')
247 def decodepath(orig, path):
247 def decodepath(orig, path):
248 return lfutil.splitstandin(path) or path
248 return lfutil.splitstandin(path) or path
249
249
250
250
251 # -- Wrappers: modify existing commands --------------------------------
251 # -- Wrappers: modify existing commands --------------------------------
252
252
253
253
254 @eh.wrapcommand(
254 @eh.wrapcommand(
255 b'add',
255 b'add',
256 opts=[
256 opts=[
257 (b'', b'large', None, _(b'add as largefile')),
257 (b'', b'large', None, _(b'add as largefile')),
258 (b'', b'normal', None, _(b'add as normal file')),
258 (b'', b'normal', None, _(b'add as normal file')),
259 (
259 (
260 b'',
260 b'',
261 b'lfsize',
261 b'lfsize',
262 b'',
262 b'',
263 _(
263 _(
264 b'add all files above this size (in megabytes) '
264 b'add all files above this size (in megabytes) '
265 b'as largefiles (default: 10)'
265 b'as largefiles (default: 10)'
266 ),
266 ),
267 ),
267 ),
268 ],
268 ],
269 )
269 )
270 def overrideadd(orig, ui, repo, *pats, **opts):
270 def overrideadd(orig, ui, repo, *pats, **opts):
271 if opts.get('normal') and opts.get('large'):
271 if opts.get('normal') and opts.get('large'):
272 raise error.Abort(_(b'--normal cannot be used with --large'))
272 raise error.Abort(_(b'--normal cannot be used with --large'))
273 return orig(ui, repo, *pats, **opts)
273 return orig(ui, repo, *pats, **opts)
274
274
275
275
276 @eh.wrapfunction(cmdutil, b'add')
276 @eh.wrapfunction(cmdutil, b'add')
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
278 # The --normal flag short circuits this override
278 # The --normal flag short circuits this override
279 if opts.get('normal'):
279 if opts.get('normal'):
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
281
281
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
283 normalmatcher = composenormalfilematcher(
283 normalmatcher = composenormalfilematcher(
284 matcher, repo[None].manifest(), ladded
284 matcher, repo[None].manifest(), ladded
285 )
285 )
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
287
287
288 bad.extend(f for f in lbad)
288 bad.extend(f for f in lbad)
289 return bad
289 return bad
290
290
291
291
292 @eh.wrapfunction(cmdutil, b'remove')
292 @eh.wrapfunction(cmdutil, b'remove')
293 def cmdutilremove(
293 def cmdutilremove(
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
295 ):
295 ):
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
297 result = orig(
297 result = orig(
298 ui,
298 ui,
299 repo,
299 repo,
300 normalmatcher,
300 normalmatcher,
301 prefix,
301 prefix,
302 uipathfn,
302 uipathfn,
303 after,
303 after,
304 force,
304 force,
305 subrepos,
305 subrepos,
306 dryrun,
306 dryrun,
307 )
307 )
308 return (
308 return (
309 removelargefiles(
309 removelargefiles(
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
311 )
311 )
312 or result
312 or result
313 )
313 )
314
314
315
315
316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
317 @contextlib.contextmanager
317 @contextlib.contextmanager
318 def _changing(orig, self, repo, change_type):
318 def _changing(orig, self, repo, change_type):
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
320 try:
320 try:
321 lfd = getattr(self, '_large_file_dirstate', False)
321 lfd = getattr(self, '_large_file_dirstate', False)
322 if sub_dirstate is None and not lfd:
322 if sub_dirstate is None and not lfd:
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
324 self._sub_dirstate = sub_dirstate
324 self._sub_dirstate = sub_dirstate
325 if not lfd:
325 if not lfd:
326 assert self._sub_dirstate is not None
326 assert self._sub_dirstate is not None
327 with orig(self, repo, change_type):
327 with orig(self, repo, change_type):
328 if sub_dirstate is None:
328 if sub_dirstate is None:
329 yield
329 yield
330 else:
330 else:
331 with sub_dirstate._changing(repo, change_type):
331 with sub_dirstate._changing(repo, change_type):
332 yield
332 yield
333 finally:
333 finally:
334 self._sub_dirstate = pre
334 self._sub_dirstate = pre
335
335
336
336
337 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
337 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
338 def overridestatusfn(orig, repo, rev2, **opts):
338 def overridestatusfn(orig, repo, rev2, **opts):
339 with lfstatus(repo._repo):
339 with lfstatus(repo._repo):
340 return orig(repo, rev2, **opts)
340 return orig(repo, rev2, **opts)
341
341
342
342
343 @eh.wrapcommand(b'status')
343 @eh.wrapcommand(b'status')
344 def overridestatus(orig, ui, repo, *pats, **opts):
344 def overridestatus(orig, ui, repo, *pats, **opts):
345 with lfstatus(repo):
345 with lfstatus(repo):
346 return orig(ui, repo, *pats, **opts)
346 return orig(ui, repo, *pats, **opts)
347
347
348
348
349 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
349 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
350 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
350 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
351 with lfstatus(repo._repo):
351 with lfstatus(repo._repo):
352 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
352 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
353
353
354
354
355 @eh.wrapcommand(b'log')
355 @eh.wrapcommand(b'log')
356 def overridelog(orig, ui, repo, *pats, **opts):
356 def overridelog(orig, ui, repo, *pats, **opts):
357 def overridematchandpats(
357 def overridematchandpats(
358 orig,
358 orig,
359 ctx,
359 ctx,
360 pats=(),
360 pats=(),
361 opts=None,
361 opts=None,
362 globbed=False,
362 globbed=False,
363 default=b'relpath',
363 default=b'relpath',
364 badfn=None,
364 badfn=None,
365 ):
365 ):
366 """Matcher that merges root directory with .hglf, suitable for log.
366 """Matcher that merges root directory with .hglf, suitable for log.
367 It is still possible to match .hglf directly.
367 It is still possible to match .hglf directly.
368 For any listed files run log on the standin too.
368 For any listed files run log on the standin too.
369 matchfn tries both the given filename and with .hglf stripped.
369 matchfn tries both the given filename and with .hglf stripped.
370 """
370 """
371 if opts is None:
371 if opts is None:
372 opts = {}
372 opts = {}
373 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
373 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
374 m, p = copy.copy(matchandpats)
374 m, p = copy.copy(matchandpats)
375
375
376 if m.always():
376 if m.always():
377 # We want to match everything anyway, so there's no benefit trying
377 # We want to match everything anyway, so there's no benefit trying
378 # to add standins.
378 # to add standins.
379 return matchandpats
379 return matchandpats
380
380
381 pats = set(p)
381 pats = set(p)
382
382
383 def fixpats(pat, tostandin=lfutil.standin):
383 def fixpats(pat, tostandin=lfutil.standin):
384 if pat.startswith(b'set:'):
384 if pat.startswith(b'set:'):
385 return pat
385 return pat
386
386
387 kindpat = matchmod._patsplit(pat, None)
387 kindpat = matchmod._patsplit(pat, None)
388
388
389 if kindpat[0] is not None:
389 if kindpat[0] is not None:
390 return kindpat[0] + b':' + tostandin(kindpat[1])
390 return kindpat[0] + b':' + tostandin(kindpat[1])
391 return tostandin(kindpat[1])
391 return tostandin(kindpat[1])
392
392
393 cwd = repo.getcwd()
393 cwd = repo.getcwd()
394 if cwd:
394 if cwd:
395 hglf = lfutil.shortname
395 hglf = lfutil.shortname
396 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
396 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
397
397
398 def tostandin(f):
398 def tostandin(f):
399 # The file may already be a standin, so truncate the back
399 # The file may already be a standin, so truncate the back
400 # prefix and test before mangling it. This avoids turning
400 # prefix and test before mangling it. This avoids turning
401 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
401 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
402 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
402 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
403 return f
403 return f
404
404
405 # An absolute path is from outside the repo, so truncate the
405 # An absolute path is from outside the repo, so truncate the
406 # path to the root before building the standin. Otherwise cwd
406 # path to the root before building the standin. Otherwise cwd
407 # is somewhere in the repo, relative to root, and needs to be
407 # is somewhere in the repo, relative to root, and needs to be
408 # prepended before building the standin.
408 # prepended before building the standin.
409 if os.path.isabs(cwd):
409 if os.path.isabs(cwd):
410 f = f[len(back) :]
410 f = f[len(back) :]
411 else:
411 else:
412 f = cwd + b'/' + f
412 f = cwd + b'/' + f
413 return back + lfutil.standin(f)
413 return back + lfutil.standin(f)
414
414
415 else:
415 else:
416
416
417 def tostandin(f):
417 def tostandin(f):
418 if lfutil.isstandin(f):
418 if lfutil.isstandin(f):
419 return f
419 return f
420 return lfutil.standin(f)
420 return lfutil.standin(f)
421
421
422 pats.update(fixpats(f, tostandin) for f in p)
422 pats.update(fixpats(f, tostandin) for f in p)
423
423
424 for i in range(0, len(m._files)):
424 for i in range(0, len(m._files)):
425 # Don't add '.hglf' to m.files, since that is already covered by '.'
425 # Don't add '.hglf' to m.files, since that is already covered by '.'
426 if m._files[i] == b'.':
426 if m._files[i] == b'.':
427 continue
427 continue
428 standin = lfutil.standin(m._files[i])
428 standin = lfutil.standin(m._files[i])
429 # If the "standin" is a directory, append instead of replace to
429 # If the "standin" is a directory, append instead of replace to
430 # support naming a directory on the command line with only
430 # support naming a directory on the command line with only
431 # largefiles. The original directory is kept to support normal
431 # largefiles. The original directory is kept to support normal
432 # files.
432 # files.
433 if standin in ctx:
433 if standin in ctx:
434 m._files[i] = standin
434 m._files[i] = standin
435 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
435 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
436 m._files.append(standin)
436 m._files.append(standin)
437
437
438 m._fileset = set(m._files)
438 m._fileset = set(m._files)
439 m.always = lambda: False
439 m.always = lambda: False
440 origmatchfn = m.matchfn
440 origmatchfn = m.matchfn
441
441
442 def lfmatchfn(f):
442 def lfmatchfn(f):
443 lf = lfutil.splitstandin(f)
443 lf = lfutil.splitstandin(f)
444 if lf is not None and origmatchfn(lf):
444 if lf is not None and origmatchfn(lf):
445 return True
445 return True
446 r = origmatchfn(f)
446 r = origmatchfn(f)
447 return r
447 return r
448
448
449 m.matchfn = lfmatchfn
449 m.matchfn = lfmatchfn
450
450
451 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
451 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
452 return m, pats
452 return m, pats
453
453
454 # For hg log --patch, the match object is used in two different senses:
454 # For hg log --patch, the match object is used in two different senses:
455 # (1) to determine what revisions should be printed out, and
455 # (1) to determine what revisions should be printed out, and
456 # (2) to determine what files to print out diffs for.
456 # (2) to determine what files to print out diffs for.
457 # The magic matchandpats override should be used for case (1) but not for
457 # The magic matchandpats override should be used for case (1) but not for
458 # case (2).
458 # case (2).
459 oldmatchandpats = scmutil.matchandpats
459 oldmatchandpats = scmutil.matchandpats
460
460
461 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
461 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
462 wctx = repo[None]
462 wctx = repo[None]
463 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
463 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
464 return lambda ctx: match
464 return lambda ctx: match
465
465
466 wrappedmatchandpats = extensions.wrappedfunction(
466 wrappedmatchandpats = extensions.wrappedfunction(
467 scmutil, b'matchandpats', overridematchandpats
467 scmutil, b'matchandpats', overridematchandpats
468 )
468 )
469 wrappedmakefilematcher = extensions.wrappedfunction(
469 wrappedmakefilematcher = extensions.wrappedfunction(
470 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
470 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
471 )
471 )
472 with wrappedmatchandpats, wrappedmakefilematcher:
472 with wrappedmatchandpats, wrappedmakefilematcher:
473 return orig(ui, repo, *pats, **opts)
473 return orig(ui, repo, *pats, **opts)
474
474
475
475
476 @eh.wrapcommand(
476 @eh.wrapcommand(
477 b'verify',
477 b'verify',
478 opts=[
478 opts=[
479 (
479 (
480 b'',
480 b'',
481 b'large',
481 b'large',
482 None,
482 None,
483 _(b'verify that all largefiles in current revision exists'),
483 _(b'verify that all largefiles in current revision exists'),
484 ),
484 ),
485 (
485 (
486 b'',
486 b'',
487 b'lfa',
487 b'lfa',
488 None,
488 None,
489 _(b'verify largefiles in all revisions, not just current'),
489 _(b'verify largefiles in all revisions, not just current'),
490 ),
490 ),
491 (
491 (
492 b'',
492 b'',
493 b'lfc',
493 b'lfc',
494 None,
494 None,
495 _(b'verify local largefile contents, not just existence'),
495 _(b'verify local largefile contents, not just existence'),
496 ),
496 ),
497 ],
497 ],
498 )
498 )
499 def overrideverify(orig, ui, repo, *pats, **opts):
499 def overrideverify(orig, ui, repo, *pats, **opts):
500 large = opts.pop('large', False)
500 large = opts.pop('large', False)
501 all = opts.pop('lfa', False)
501 all = opts.pop('lfa', False)
502 contents = opts.pop('lfc', False)
502 contents = opts.pop('lfc', False)
503
503
504 result = orig(ui, repo, *pats, **opts)
504 result = orig(ui, repo, *pats, **opts)
505 if large or all or contents:
505 if large or all or contents:
506 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
506 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
507 return result
507 return result
508
508
509
509
510 @eh.wrapcommand(
510 @eh.wrapcommand(
511 b'debugstate',
511 b'debugstate',
512 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
512 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
513 )
513 )
514 def overridedebugstate(orig, ui, repo, *pats, **opts):
514 def overridedebugstate(orig, ui, repo, *pats, **opts):
515 large = opts.pop('large', False)
515 large = opts.pop('large', False)
516 if large:
516 if large:
517
517
518 class fakerepo:
518 class fakerepo:
519 dirstate = lfutil.openlfdirstate(ui, repo)
519 dirstate = lfutil.openlfdirstate(ui, repo)
520
520
521 orig(ui, fakerepo, *pats, **opts)
521 orig(ui, fakerepo, *pats, **opts)
522 else:
522 else:
523 orig(ui, repo, *pats, **opts)
523 orig(ui, repo, *pats, **opts)
524
524
525
525
526 # Before starting the manifest merge, merge.updates will call
526 # Before starting the manifest merge, merge.updates will call
527 # _checkunknownfile to check if there are any files in the merged-in
527 # _checkunknownfile to check if there are any files in the merged-in
528 # changeset that collide with unknown files in the working copy.
528 # changeset that collide with unknown files in the working copy.
529 #
529 #
530 # The largefiles are seen as unknown, so this prevents us from merging
530 # The largefiles are seen as unknown, so this prevents us from merging
531 # in a file 'foo' if we already have a largefile with the same name.
531 # in a file 'foo' if we already have a largefile with the same name.
532 #
532 #
533 # The overridden function filters the unknown files by removing any
533 # The overridden function filters the unknown files by removing any
534 # largefiles. This makes the merge proceed and we can then handle this
534 # largefiles. This makes the merge proceed and we can then handle this
535 # case further in the overridden calculateupdates function below.
535 # case further in the overridden calculateupdates function below.
536 @eh.wrapfunction(merge, b'_checkunknownfile')
536 @eh.wrapfunction(merge, b'_checkunknownfile')
537 def overridecheckunknownfile(
537 def overridecheckunknownfile(
538 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
538 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
539 ):
539 ):
540 if lfutil.standin(dirstate.normalize(f)) in wctx:
540 if lfutil.standin(dirstate.normalize(f)) in wctx:
541 return False
541 return False
542 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
542 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
543
543
544
544
545 # The manifest merge handles conflicts on the manifest level. We want
545 # The manifest merge handles conflicts on the manifest level. We want
546 # to handle changes in largefile-ness of files at this level too.
546 # to handle changes in largefile-ness of files at this level too.
547 #
547 #
548 # The strategy is to run the original calculateupdates and then process
548 # The strategy is to run the original calculateupdates and then process
549 # the action list it outputs. There are two cases we need to deal with:
549 # the action list it outputs. There are two cases we need to deal with:
550 #
550 #
551 # 1. Normal file in p1, largefile in p2. Here the largefile is
551 # 1. Normal file in p1, largefile in p2. Here the largefile is
552 # detected via its standin file, which will enter the working copy
552 # detected via its standin file, which will enter the working copy
553 # with a "get" action. It is not "merge" since the standin is all
553 # with a "get" action. It is not "merge" since the standin is all
554 # Mercurial is concerned with at this level -- the link to the
554 # Mercurial is concerned with at this level -- the link to the
555 # existing normal file is not relevant here.
555 # existing normal file is not relevant here.
556 #
556 #
557 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
557 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
558 # since the largefile will be present in the working copy and
558 # since the largefile will be present in the working copy and
559 # different from the normal file in p2. Mercurial therefore
559 # different from the normal file in p2. Mercurial therefore
560 # triggers a merge action.
560 # triggers a merge action.
561 #
561 #
562 # In both cases, we prompt the user and emit new actions to either
562 # In both cases, we prompt the user and emit new actions to either
563 # remove the standin (if the normal file was kept) or to remove the
563 # remove the standin (if the normal file was kept) or to remove the
564 # normal file and get the standin (if the largefile was kept). The
564 # normal file and get the standin (if the largefile was kept). The
565 # default prompt answer is to use the largefile version since it was
565 # default prompt answer is to use the largefile version since it was
566 # presumably changed on purpose.
566 # presumably changed on purpose.
567 #
567 #
568 # Finally, the merge.applyupdates function will then take care of
568 # Finally, the merge.applyupdates function will then take care of
569 # writing the files into the working copy and lfcommands.updatelfiles
569 # writing the files into the working copy and lfcommands.updatelfiles
570 # will update the largefiles.
570 # will update the largefiles.
571 @eh.wrapfunction(merge, b'calculateupdates')
571 @eh.wrapfunction(merge, b'calculateupdates')
572 def overridecalculateupdates(
572 def overridecalculateupdates(
573 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
573 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
574 ):
574 ):
575 overwrite = force and not branchmerge
575 overwrite = force and not branchmerge
576 mresult = origfn(
576 mresult = origfn(
577 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
577 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
578 )
578 )
579
579
580 if overwrite:
580 if overwrite:
581 return mresult
581 return mresult
582
582
583 # Convert to dictionary with filename as key and action as value.
583 # Convert to dictionary with filename as key and action as value.
584 lfiles = set()
584 lfiles = set()
585 for f in mresult.files():
585 for f in mresult.files():
586 splitstandin = lfutil.splitstandin(f)
586 splitstandin = lfutil.splitstandin(f)
587 if splitstandin is not None and splitstandin in p1:
587 if splitstandin is not None and splitstandin in p1:
588 lfiles.add(splitstandin)
588 lfiles.add(splitstandin)
589 elif lfutil.standin(f) in p1:
589 elif lfutil.standin(f) in p1:
590 lfiles.add(f)
590 lfiles.add(f)
591
591
592 for lfile in sorted(lfiles):
592 for lfile in sorted(lfiles):
593 standin = lfutil.standin(lfile)
593 standin = lfutil.standin(lfile)
594 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
594 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
595 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
595 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
596
596
597 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
597 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
598 if sm == ACTION_DELETED_CHANGED:
598 if sm == ACTION_DELETED_CHANGED:
599 f1, f2, fa, move, anc = sargs
599 f1, f2, fa, move, anc = sargs
600 sargs = (p2[f2].flags(), False)
600 sargs = (p2[f2].flags(), False)
601 # Case 1: normal file in the working copy, largefile in
601 # Case 1: normal file in the working copy, largefile in
602 # the second parent
602 # the second parent
603 usermsg = (
603 usermsg = (
604 _(
604 _(
605 b'remote turned local normal file %s into a largefile\n'
605 b'remote turned local normal file %s into a largefile\n'
606 b'use (l)argefile or keep (n)ormal file?'
606 b'use (l)argefile or keep (n)ormal file?'
607 b'$$ &Largefile $$ &Normal file'
607 b'$$ &Largefile $$ &Normal file'
608 )
608 )
609 % lfile
609 % lfile
610 )
610 )
611 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
611 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
612 mresult.addfile(
612 mresult.addfile(
613 lfile, ACTION_REMOVE, None, b'replaced by standin'
613 lfile, ACTION_REMOVE, None, b'replaced by standin'
614 )
614 )
615 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
615 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
616 else: # keep local normal file
616 else: # keep local normal file
617 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
617 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
618 if branchmerge:
618 if branchmerge:
619 mresult.addfile(
619 mresult.addfile(
620 standin,
620 standin,
621 ACTION_KEEP,
621 ACTION_KEEP,
622 None,
622 None,
623 b'replaced by non-standin',
623 b'replaced by non-standin',
624 )
624 )
625 else:
625 else:
626 mresult.addfile(
626 mresult.addfile(
627 standin,
627 standin,
628 ACTION_REMOVE,
628 ACTION_REMOVE,
629 None,
629 None,
630 b'replaced by non-standin',
630 b'replaced by non-standin',
631 )
631 )
632 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
632 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
633 if lm == ACTION_DELETED_CHANGED:
633 if lm == ACTION_DELETED_CHANGED:
634 f1, f2, fa, move, anc = largs
634 f1, f2, fa, move, anc = largs
635 largs = (p2[f2].flags(), False)
635 largs = (p2[f2].flags(), False)
636 # Case 2: largefile in the working copy, normal file in
636 # Case 2: largefile in the working copy, normal file in
637 # the second parent
637 # the second parent
638 usermsg = (
638 usermsg = (
639 _(
639 _(
640 b'remote turned local largefile %s into a normal file\n'
640 b'remote turned local largefile %s into a normal file\n'
641 b'keep (l)argefile or use (n)ormal file?'
641 b'keep (l)argefile or use (n)ormal file?'
642 b'$$ &Largefile $$ &Normal file'
642 b'$$ &Largefile $$ &Normal file'
643 )
643 )
644 % lfile
644 % lfile
645 )
645 )
646 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
646 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
647 if branchmerge:
647 if branchmerge:
648 # largefile can be restored from standin safely
648 # largefile can be restored from standin safely
649 mresult.addfile(
649 mresult.addfile(
650 lfile,
650 lfile,
651 ACTION_KEEP,
651 ACTION_KEEP,
652 None,
652 None,
653 b'replaced by standin',
653 b'replaced by standin',
654 )
654 )
655 mresult.addfile(
655 mresult.addfile(
656 standin, ACTION_KEEP, None, b'replaces standin'
656 standin, ACTION_KEEP, None, b'replaces standin'
657 )
657 )
658 else:
658 else:
659 # "lfile" should be marked as "removed" without
659 # "lfile" should be marked as "removed" without
660 # removal of itself
660 # removal of itself
661 mresult.addfile(
661 mresult.addfile(
662 lfile,
662 lfile,
663 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
663 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
664 None,
664 None,
665 b'forget non-standin largefile',
665 b'forget non-standin largefile',
666 )
666 )
667
667
668 # linear-merge should treat this largefile as 're-added'
668 # linear-merge should treat this largefile as 're-added'
669 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
669 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
670 else: # pick remote normal file
670 else: # pick remote normal file
671 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
671 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
672 mresult.addfile(
672 mresult.addfile(
673 standin,
673 standin,
674 ACTION_REMOVE,
674 ACTION_REMOVE,
675 None,
675 None,
676 b'replaced by non-standin',
676 b'replaced by non-standin',
677 )
677 )
678
678
679 return mresult
679 return mresult
680
680
681
681
682 @eh.wrapfunction(mergestatemod, b'recordupdates')
682 @eh.wrapfunction(mergestatemod, b'recordupdates')
683 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
683 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
684 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
684 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
685 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
685 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
686 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
686 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
687 # this should be executed before 'orig', to execute 'remove'
687 # this should be executed before 'orig', to execute 'remove'
688 # before all other actions
688 # before all other actions
689 repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
689 repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
690 # make sure lfile doesn't get synclfdirstate'd as normal
690 # make sure lfile doesn't get synclfdirstate'd as normal
691 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
691 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
692
692
693 return orig(repo, actions, branchmerge, getfiledata)
693 return orig(repo, actions, branchmerge, getfiledata)
694
694
695
695
696 # Override filemerge to prompt the user about how they wish to merge
696 # Override filemerge to prompt the user about how they wish to merge
697 # largefiles. This will handle identical edits without prompting the user.
697 # largefiles. This will handle identical edits without prompting the user.
698 @eh.wrapfunction(filemerge, b'filemerge')
698 @eh.wrapfunction(filemerge, b'filemerge')
699 def overridefilemerge(
699 def overridefilemerge(
700 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
700 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
701 ):
701 ):
702 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
702 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
703 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
703 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
704
704
705 ahash = lfutil.readasstandin(fca).lower()
705 ahash = lfutil.readasstandin(fca).lower()
706 dhash = lfutil.readasstandin(fcd).lower()
706 dhash = lfutil.readasstandin(fcd).lower()
707 ohash = lfutil.readasstandin(fco).lower()
707 ohash = lfutil.readasstandin(fco).lower()
708 if (
708 if (
709 ohash != ahash
709 ohash != ahash
710 and ohash != dhash
710 and ohash != dhash
711 and (
711 and (
712 dhash == ahash
712 dhash == ahash
713 or repo.ui.promptchoice(
713 or repo.ui.promptchoice(
714 _(
714 _(
715 b'largefile %s has a merge conflict\nancestor was %s\n'
715 b'largefile %s has a merge conflict\nancestor was %s\n'
716 b'you can keep (l)ocal %s or take (o)ther %s.\n'
716 b'you can keep (l)ocal %s or take (o)ther %s.\n'
717 b'what do you want to do?'
717 b'what do you want to do?'
718 b'$$ &Local $$ &Other'
718 b'$$ &Local $$ &Other'
719 )
719 )
720 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
720 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
721 0,
721 0,
722 )
722 )
723 == 1
723 == 1
724 )
724 )
725 ):
725 ):
726 repo.wwrite(fcd.path(), fco.data(), fco.flags())
726 repo.wwrite(fcd.path(), fco.data(), fco.flags())
727 return 0, False
727 return 0, False
728
728
729
729
730 @eh.wrapfunction(copiesmod, b'pathcopies')
730 @eh.wrapfunction(copiesmod, b'pathcopies')
731 def copiespathcopies(orig, ctx1, ctx2, match=None):
731 def copiespathcopies(orig, ctx1, ctx2, match=None):
732 copies = orig(ctx1, ctx2, match=match)
732 copies = orig(ctx1, ctx2, match=match)
733 updated = {}
733 updated = {}
734
734
735 for k, v in copies.items():
735 for k, v in copies.items():
736 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
736 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
737
737
738 return updated
738 return updated
739
739
740
740
741 # Copy first changes the matchers to match standins instead of
741 # Copy first changes the matchers to match standins instead of
742 # largefiles. Then it overrides util.copyfile in that function it
742 # largefiles. Then it overrides util.copyfile in that function it
743 # checks if the destination largefile already exists. It also keeps a
743 # checks if the destination largefile already exists. It also keeps a
744 # list of copied files so that the largefiles can be copied and the
744 # list of copied files so that the largefiles can be copied and the
745 # dirstate updated.
745 # dirstate updated.
746 @eh.wrapfunction(cmdutil, b'copy')
746 @eh.wrapfunction(cmdutil, b'copy')
747 def overridecopy(orig, ui, repo, pats, opts, rename=False):
747 def overridecopy(orig, ui, repo, pats, opts, rename=False):
748 # doesn't remove largefile on rename
748 # doesn't remove largefile on rename
749 if len(pats) < 2:
749 if len(pats) < 2:
750 # this isn't legal, let the original function deal with it
750 # this isn't legal, let the original function deal with it
751 return orig(ui, repo, pats, opts, rename)
751 return orig(ui, repo, pats, opts, rename)
752
752
753 # This could copy both lfiles and normal files in one command,
753 # This could copy both lfiles and normal files in one command,
754 # but we don't want to do that. First replace their matcher to
754 # but we don't want to do that. First replace their matcher to
755 # only match normal files and run it, then replace it to just
755 # only match normal files and run it, then replace it to just
756 # match largefiles and run it again.
756 # match largefiles and run it again.
757 nonormalfiles = False
757 nonormalfiles = False
758 nolfiles = False
758 nolfiles = False
759 manifest = repo[None].manifest()
759 manifest = repo[None].manifest()
760
760
761 def normalfilesmatchfn(
761 def normalfilesmatchfn(
762 orig,
762 orig,
763 ctx,
763 ctx,
764 pats=(),
764 pats=(),
765 opts=None,
765 opts=None,
766 globbed=False,
766 globbed=False,
767 default=b'relpath',
767 default=b'relpath',
768 badfn=None,
768 badfn=None,
769 ):
769 ):
770 if opts is None:
770 if opts is None:
771 opts = {}
771 opts = {}
772 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
772 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
773 return composenormalfilematcher(match, manifest)
773 return composenormalfilematcher(match, manifest)
774
774
775 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
775 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
776 try:
776 try:
777 result = orig(ui, repo, pats, opts, rename)
777 result = orig(ui, repo, pats, opts, rename)
778 except error.Abort as e:
778 except error.Abort as e:
779 if e.message != _(b'no files to copy'):
779 if e.message != _(b'no files to copy'):
780 raise e
780 raise e
781 else:
781 else:
782 nonormalfiles = True
782 nonormalfiles = True
783 result = 0
783 result = 0
784
784
785 # The first rename can cause our current working directory to be removed.
785 # The first rename can cause our current working directory to be removed.
786 # In that case there is nothing left to copy/rename so just quit.
786 # In that case there is nothing left to copy/rename so just quit.
787 try:
787 try:
788 repo.getcwd()
788 repo.getcwd()
789 except OSError:
789 except OSError:
790 return result
790 return result
791
791
792 def makestandin(relpath):
792 def makestandin(relpath):
793 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
793 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
794 return repo.wvfs.join(lfutil.standin(path))
794 return repo.wvfs.join(lfutil.standin(path))
795
795
796 fullpats = scmutil.expandpats(pats)
796 fullpats = scmutil.expandpats(pats)
797 dest = fullpats[-1]
797 dest = fullpats[-1]
798
798
799 if os.path.isdir(dest):
799 if os.path.isdir(dest):
800 if not os.path.isdir(makestandin(dest)):
800 if not os.path.isdir(makestandin(dest)):
801 os.makedirs(makestandin(dest))
801 os.makedirs(makestandin(dest))
802
802
803 try:
803 try:
804 # When we call orig below it creates the standins but we don't add
804 # When we call orig below it creates the standins but we don't add
805 # them to the dir state until later so lock during that time.
805 # them to the dir state until later so lock during that time.
806 wlock = repo.wlock()
806 wlock = repo.wlock()
807
807
808 manifest = repo[None].manifest()
808 manifest = repo[None].manifest()
809
809
810 def overridematch(
810 def overridematch(
811 orig,
811 orig,
812 ctx,
812 ctx,
813 pats=(),
813 pats=(),
814 opts=None,
814 opts=None,
815 globbed=False,
815 globbed=False,
816 default=b'relpath',
816 default=b'relpath',
817 badfn=None,
817 badfn=None,
818 ):
818 ):
819 if opts is None:
819 if opts is None:
820 opts = {}
820 opts = {}
821 newpats = []
821 newpats = []
822 # The patterns were previously mangled to add the standin
822 # The patterns were previously mangled to add the standin
823 # directory; we need to remove that now
823 # directory; we need to remove that now
824 for pat in pats:
824 for pat in pats:
825 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
825 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
826 newpats.append(pat.replace(lfutil.shortname, b''))
826 newpats.append(pat.replace(lfutil.shortname, b''))
827 else:
827 else:
828 newpats.append(pat)
828 newpats.append(pat)
829 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
829 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
830 m = copy.copy(match)
830 m = copy.copy(match)
831 lfile = lambda f: lfutil.standin(f) in manifest
831 lfile = lambda f: lfutil.standin(f) in manifest
832 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
832 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
833 m._fileset = set(m._files)
833 m._fileset = set(m._files)
834 origmatchfn = m.matchfn
834 origmatchfn = m.matchfn
835
835
836 def matchfn(f):
836 def matchfn(f):
837 lfile = lfutil.splitstandin(f)
837 lfile = lfutil.splitstandin(f)
838 return (
838 return (
839 lfile is not None
839 lfile is not None
840 and (f in manifest)
840 and (f in manifest)
841 and origmatchfn(lfile)
841 and origmatchfn(lfile)
842 or None
842 or None
843 )
843 )
844
844
845 m.matchfn = matchfn
845 m.matchfn = matchfn
846 return m
846 return m
847
847
848 listpats = []
848 listpats = []
849 for pat in pats:
849 for pat in pats:
850 if matchmod.patkind(pat) is not None:
850 if matchmod.patkind(pat) is not None:
851 listpats.append(pat)
851 listpats.append(pat)
852 else:
852 else:
853 listpats.append(makestandin(pat))
853 listpats.append(makestandin(pat))
854
854
855 copiedfiles = []
855 copiedfiles = []
856
856
857 def overridecopyfile(orig, src, dest, *args, **kwargs):
857 def overridecopyfile(orig, src, dest, *args, **kwargs):
858 if lfutil.shortname in src and dest.startswith(
858 if lfutil.shortname in src and dest.startswith(
859 repo.wjoin(lfutil.shortname)
859 repo.wjoin(lfutil.shortname)
860 ):
860 ):
861 destlfile = dest.replace(lfutil.shortname, b'')
861 destlfile = dest.replace(lfutil.shortname, b'')
862 if not opts[b'force'] and os.path.exists(destlfile):
862 if not opts[b'force'] and os.path.exists(destlfile):
863 raise IOError(
863 raise IOError(
864 b'', _(b'destination largefile already exists')
864 b'', _(b'destination largefile already exists')
865 )
865 )
866 copiedfiles.append((src, dest))
866 copiedfiles.append((src, dest))
867 orig(src, dest, *args, **kwargs)
867 orig(src, dest, *args, **kwargs)
868
868
869 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
869 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
870 with extensions.wrappedfunction(scmutil, b'match', overridematch):
870 with extensions.wrappedfunction(scmutil, b'match', overridematch):
871 result += orig(ui, repo, listpats, opts, rename)
871 result += orig(ui, repo, listpats, opts, rename)
872
872
873 lfdirstate = lfutil.openlfdirstate(ui, repo)
873 lfdirstate = lfutil.openlfdirstate(ui, repo)
874 for (src, dest) in copiedfiles:
874 for (src, dest) in copiedfiles:
875 if lfutil.shortname in src and dest.startswith(
875 if lfutil.shortname in src and dest.startswith(
876 repo.wjoin(lfutil.shortname)
876 repo.wjoin(lfutil.shortname)
877 ):
877 ):
878 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
878 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
879 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
879 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
880 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
880 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
881 if not os.path.isdir(destlfiledir):
881 if not os.path.isdir(destlfiledir):
882 os.makedirs(destlfiledir)
882 os.makedirs(destlfiledir)
883 if rename:
883 if rename:
884 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
884 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
885
885
886 # The file is gone, but this deletes any empty parent
886 # The file is gone, but this deletes any empty parent
887 # directories as a side-effect.
887 # directories as a side-effect.
888 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
888 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
889 lfdirstate.set_untracked(srclfile)
889 lfdirstate.set_untracked(srclfile)
890 else:
890 else:
891 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
891 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
892
892
893 lfdirstate.set_tracked(destlfile)
893 lfdirstate.set_tracked(destlfile)
894 lfdirstate.write(repo.currenttransaction())
894 lfdirstate.write(repo.currenttransaction())
895 except error.Abort as e:
895 except error.Abort as e:
896 if e.message != _(b'no files to copy'):
896 if e.message != _(b'no files to copy'):
897 raise e
897 raise e
898 else:
898 else:
899 nolfiles = True
899 nolfiles = True
900 finally:
900 finally:
901 wlock.release()
901 wlock.release()
902
902
903 if nolfiles and nonormalfiles:
903 if nolfiles and nonormalfiles:
904 raise error.Abort(_(b'no files to copy'))
904 raise error.Abort(_(b'no files to copy'))
905
905
906 return result
906 return result
907
907
908
908
909 # When the user calls revert, we have to be careful to not revert any
909 # When the user calls revert, we have to be careful to not revert any
910 # changes to other largefiles accidentally. This means we have to keep
910 # changes to other largefiles accidentally. This means we have to keep
911 # track of the largefiles that are being reverted so we only pull down
911 # track of the largefiles that are being reverted so we only pull down
912 # the necessary largefiles.
912 # the necessary largefiles.
913 #
913 #
914 # Standins are only updated (to match the hash of largefiles) before
914 # Standins are only updated (to match the hash of largefiles) before
915 # commits. Update the standins then run the original revert, changing
915 # commits. Update the standins then run the original revert, changing
916 # the matcher to hit standins instead of largefiles. Based on the
916 # the matcher to hit standins instead of largefiles. Based on the
917 # resulting standins update the largefiles.
917 # resulting standins update the largefiles.
918 @eh.wrapfunction(cmdutil, b'revert')
918 @eh.wrapfunction(cmdutil, b'revert')
919 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
919 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
920 # Because we put the standins in a bad state (by updating them)
920 # Because we put the standins in a bad state (by updating them)
921 # and then return them to a correct state we need to lock to
921 # and then return them to a correct state we need to lock to
922 # prevent others from changing them in their incorrect state.
922 # prevent others from changing them in their incorrect state.
923 with repo.wlock(), repo.dirstate.running_status(repo):
923 with repo.wlock(), repo.dirstate.running_status(repo):
924 lfdirstate = lfutil.openlfdirstate(ui, repo)
924 lfdirstate = lfutil.openlfdirstate(ui, repo)
925 s = lfutil.lfdirstatestatus(lfdirstate, repo)
925 s = lfutil.lfdirstatestatus(lfdirstate, repo)
926 lfdirstate.write(repo.currenttransaction())
926 lfdirstate.write(repo.currenttransaction())
927 for lfile in s.modified:
927 for lfile in s.modified:
928 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
928 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
929 for lfile in s.deleted:
929 for lfile in s.deleted:
930 fstandin = lfutil.standin(lfile)
930 fstandin = lfutil.standin(lfile)
931 if repo.wvfs.exists(fstandin):
931 if repo.wvfs.exists(fstandin):
932 repo.wvfs.unlink(fstandin)
932 repo.wvfs.unlink(fstandin)
933
933
934 oldstandins = lfutil.getstandinsstate(repo)
934 oldstandins = lfutil.getstandinsstate(repo)
935
935
936 def overridematch(
936 def overridematch(
937 orig,
937 orig,
938 mctx,
938 mctx,
939 pats=(),
939 pats=(),
940 opts=None,
940 opts=None,
941 globbed=False,
941 globbed=False,
942 default=b'relpath',
942 default=b'relpath',
943 badfn=None,
943 badfn=None,
944 ):
944 ):
945 if opts is None:
945 if opts is None:
946 opts = {}
946 opts = {}
947 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
947 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
948 m = copy.copy(match)
948 m = copy.copy(match)
949
949
950 # revert supports recursing into subrepos, and though largefiles
950 # revert supports recursing into subrepos, and though largefiles
951 # currently doesn't work correctly in that case, this match is
951 # currently doesn't work correctly in that case, this match is
952 # called, so the lfdirstate above may not be the correct one for
952 # called, so the lfdirstate above may not be the correct one for
953 # this invocation of match.
953 # this invocation of match.
954 lfdirstate = lfutil.openlfdirstate(
954 lfdirstate = lfutil.openlfdirstate(
955 mctx.repo().ui, mctx.repo(), False
955 mctx.repo().ui, mctx.repo(), False
956 )
956 )
957
957
958 wctx = repo[None]
958 wctx = repo[None]
959 matchfiles = []
959 matchfiles = []
960 for f in m._files:
960 for f in m._files:
961 standin = lfutil.standin(f)
961 standin = lfutil.standin(f)
962 if standin in ctx or standin in mctx:
962 if standin in ctx or standin in mctx:
963 matchfiles.append(standin)
963 matchfiles.append(standin)
964 elif standin in wctx or lfdirstate.get_entry(f).removed:
964 elif standin in wctx or lfdirstate.get_entry(f).removed:
965 continue
965 continue
966 else:
966 else:
967 matchfiles.append(f)
967 matchfiles.append(f)
968 m._files = matchfiles
968 m._files = matchfiles
969 m._fileset = set(m._files)
969 m._fileset = set(m._files)
970 origmatchfn = m.matchfn
970 origmatchfn = m.matchfn
971
971
972 def matchfn(f):
972 def matchfn(f):
973 lfile = lfutil.splitstandin(f)
973 lfile = lfutil.splitstandin(f)
974 if lfile is not None:
974 if lfile is not None:
975 return origmatchfn(lfile) and (f in ctx or f in mctx)
975 return origmatchfn(lfile) and (f in ctx or f in mctx)
976 return origmatchfn(f)
976 return origmatchfn(f)
977
977
978 m.matchfn = matchfn
978 m.matchfn = matchfn
979 return m
979 return m
980
980
981 with extensions.wrappedfunction(scmutil, b'match', overridematch):
981 with extensions.wrappedfunction(scmutil, b'match', overridematch):
982 orig(ui, repo, ctx, *pats, **opts)
982 orig(ui, repo, ctx, *pats, **opts)
983
983
984 newstandins = lfutil.getstandinsstate(repo)
984 newstandins = lfutil.getstandinsstate(repo)
985 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
985 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
986 # lfdirstate should be 'normallookup'-ed for updated files,
986 # lfdirstate should be 'normallookup'-ed for updated files,
987 # because reverting doesn't touch dirstate for 'normal' files
987 # because reverting doesn't touch dirstate for 'normal' files
988 # when target revision is explicitly specified: in such case,
988 # when target revision is explicitly specified: in such case,
989 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
989 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
990 # of target (standin) file.
990 # of target (standin) file.
991 lfcommands.updatelfiles(
991 lfcommands.updatelfiles(
992 ui, repo, filelist, printmessage=False, normallookup=True
992 ui, repo, filelist, printmessage=False, normallookup=True
993 )
993 )
994
994
995
995
996 # after pulling changesets, we need to take some extra care to get
996 # after pulling changesets, we need to take some extra care to get
997 # largefiles updated remotely
997 # largefiles updated remotely
998 @eh.wrapcommand(
998 @eh.wrapcommand(
999 b'pull',
999 b'pull',
1000 opts=[
1000 opts=[
1001 (
1001 (
1002 b'',
1002 b'',
1003 b'all-largefiles',
1003 b'all-largefiles',
1004 None,
1004 None,
1005 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1005 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1006 ),
1006 ),
1007 (
1007 (
1008 b'',
1008 b'',
1009 b'lfrev',
1009 b'lfrev',
1010 [],
1010 [],
1011 _(b'download largefiles for these revisions'),
1011 _(b'download largefiles for these revisions'),
1012 _(b'REV'),
1012 _(b'REV'),
1013 ),
1013 ),
1014 ],
1014 ],
1015 )
1015 )
1016 def overridepull(orig, ui, repo, source=None, **opts):
1016 def overridepull(orig, ui, repo, source=None, **opts):
1017 revsprepull = len(repo)
1017 revsprepull = len(repo)
1018 if not source:
1018 if not source:
1019 source = b'default'
1019 source = b'default'
1020 repo.lfpullsource = source
1020 repo.lfpullsource = source
1021 result = orig(ui, repo, source, **opts)
1021 result = orig(ui, repo, source, **opts)
1022 revspostpull = len(repo)
1022 revspostpull = len(repo)
1023 lfrevs = opts.get('lfrev', [])
1023 lfrevs = opts.get('lfrev', [])
1024 if opts.get('all_largefiles'):
1024 if opts.get('all_largefiles'):
1025 lfrevs.append(b'pulled()')
1025 lfrevs.append(b'pulled()')
1026 if lfrevs and revspostpull > revsprepull:
1026 if lfrevs and revspostpull > revsprepull:
1027 numcached = 0
1027 numcached = 0
1028 repo.firstpulled = revsprepull # for pulled() revset expression
1028 repo.firstpulled = revsprepull # for pulled() revset expression
1029 try:
1029 try:
1030 for rev in logcmdutil.revrange(repo, lfrevs):
1030 for rev in logcmdutil.revrange(repo, lfrevs):
1031 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1031 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1032 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1032 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1033 numcached += len(cached)
1033 numcached += len(cached)
1034 finally:
1034 finally:
1035 del repo.firstpulled
1035 del repo.firstpulled
1036 ui.status(_(b"%d largefiles cached\n") % numcached)
1036 ui.status(_(b"%d largefiles cached\n") % numcached)
1037 return result
1037 return result
1038
1038
1039
1039
1040 @eh.wrapcommand(
1040 @eh.wrapcommand(
1041 b'push',
1041 b'push',
1042 opts=[
1042 opts=[
1043 (
1043 (
1044 b'',
1044 b'',
1045 b'lfrev',
1045 b'lfrev',
1046 [],
1046 [],
1047 _(b'upload largefiles for these revisions'),
1047 _(b'upload largefiles for these revisions'),
1048 _(b'REV'),
1048 _(b'REV'),
1049 )
1049 )
1050 ],
1050 ],
1051 )
1051 )
1052 def overridepush(orig, ui, repo, *args, **kwargs):
1052 def overridepush(orig, ui, repo, *args, **kwargs):
1053 """Override push command and store --lfrev parameters in opargs"""
1053 """Override push command and store --lfrev parameters in opargs"""
1054 lfrevs = kwargs.pop('lfrev', None)
1054 lfrevs = kwargs.pop('lfrev', None)
1055 if lfrevs:
1055 if lfrevs:
1056 opargs = kwargs.setdefault('opargs', {})
1056 opargs = kwargs.setdefault('opargs', {})
1057 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1057 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1058 return orig(ui, repo, *args, **kwargs)
1058 return orig(ui, repo, *args, **kwargs)
1059
1059
1060
1060
1061 @eh.wrapfunction(exchange, b'pushoperation')
1061 @eh.wrapfunction(exchange, b'pushoperation')
1062 def exchangepushoperation(orig, *args, **kwargs):
1062 def exchangepushoperation(orig, *args, **kwargs):
1063 """Override pushoperation constructor and store lfrevs parameter"""
1063 """Override pushoperation constructor and store lfrevs parameter"""
1064 lfrevs = kwargs.pop('lfrevs', None)
1064 lfrevs = kwargs.pop('lfrevs', None)
1065 pushop = orig(*args, **kwargs)
1065 pushop = orig(*args, **kwargs)
1066 pushop.lfrevs = lfrevs
1066 pushop.lfrevs = lfrevs
1067 return pushop
1067 return pushop
1068
1068
1069
1069
1070 @eh.revsetpredicate(b'pulled()')
1070 @eh.revsetpredicate(b'pulled()')
1071 def pulledrevsetsymbol(repo, subset, x):
1071 def pulledrevsetsymbol(repo, subset, x):
1072 """Changesets that just has been pulled.
1072 """Changesets that just has been pulled.
1073
1073
1074 Only available with largefiles from pull --lfrev expressions.
1074 Only available with largefiles from pull --lfrev expressions.
1075
1075
1076 .. container:: verbose
1076 .. container:: verbose
1077
1077
1078 Some examples:
1078 Some examples:
1079
1079
1080 - pull largefiles for all new changesets::
1080 - pull largefiles for all new changesets::
1081
1081
1082 hg pull -lfrev "pulled()"
1082 hg pull -lfrev "pulled()"
1083
1083
1084 - pull largefiles for all new branch heads::
1084 - pull largefiles for all new branch heads::
1085
1085
1086 hg pull -lfrev "head(pulled()) and not closed()"
1086 hg pull -lfrev "head(pulled()) and not closed()"
1087
1087
1088 """
1088 """
1089
1089
1090 try:
1090 try:
1091 firstpulled = repo.firstpulled
1091 firstpulled = repo.firstpulled
1092 except AttributeError:
1092 except AttributeError:
1093 raise error.Abort(_(b"pulled() only available in --lfrev"))
1093 raise error.Abort(_(b"pulled() only available in --lfrev"))
1094 return smartset.baseset([r for r in subset if r >= firstpulled])
1094 return smartset.baseset([r for r in subset if r >= firstpulled])
1095
1095
1096
1096
1097 @eh.wrapcommand(
1097 @eh.wrapcommand(
1098 b'clone',
1098 b'clone',
1099 opts=[
1099 opts=[
1100 (
1100 (
1101 b'',
1101 b'',
1102 b'all-largefiles',
1102 b'all-largefiles',
1103 None,
1103 None,
1104 _(b'download all versions of all largefiles'),
1104 _(b'download all versions of all largefiles'),
1105 )
1105 )
1106 ],
1106 ],
1107 )
1107 )
1108 def overrideclone(orig, ui, source, dest=None, **opts):
1108 def overrideclone(orig, ui, source, dest=None, **opts):
1109 d = dest
1109 d = dest
1110 if d is None:
1110 if d is None:
1111 d = hg.defaultdest(source)
1111 d = hg.defaultdest(source)
1112 if opts.get('all_largefiles') and not hg.islocal(d):
1112 if opts.get('all_largefiles') and not hg.islocal(d):
1113 raise error.Abort(
1113 raise error.Abort(
1114 _(b'--all-largefiles is incompatible with non-local destination %s')
1114 _(b'--all-largefiles is incompatible with non-local destination %s')
1115 % d
1115 % d
1116 )
1116 )
1117
1117
1118 return orig(ui, source, dest, **opts)
1118 return orig(ui, source, dest, **opts)
1119
1119
1120
1120
1121 @eh.wrapfunction(hg, b'clone')
1121 @eh.wrapfunction(hg, b'clone')
1122 def hgclone(orig, ui, opts, *args, **kwargs):
1122 def hgclone(orig, ui, opts, *args, **kwargs):
1123 result = orig(ui, opts, *args, **kwargs)
1123 result = orig(ui, opts, *args, **kwargs)
1124
1124
1125 if result is not None:
1125 if result is not None:
1126 sourcerepo, destrepo = result
1126 sourcerepo, destrepo = result
1127 repo = destrepo.local()
1127 repo = destrepo.local()
1128
1128
1129 # When cloning to a remote repo (like through SSH), no repo is available
1129 # When cloning to a remote repo (like through SSH), no repo is available
1130 # from the peer. Therefore the largefiles can't be downloaded and the
1130 # from the peer. Therefore the largefiles can't be downloaded and the
1131 # hgrc can't be updated.
1131 # hgrc can't be updated.
1132 if not repo:
1132 if not repo:
1133 return result
1133 return result
1134
1134
1135 # Caching is implicitly limited to 'rev' option, since the dest repo was
1135 # Caching is implicitly limited to 'rev' option, since the dest repo was
1136 # truncated at that point. The user may expect a download count with
1136 # truncated at that point. The user may expect a download count with
1137 # this option, so attempt whether or not this is a largefile repo.
1137 # this option, so attempt whether or not this is a largefile repo.
1138 if opts.get(b'all_largefiles'):
1138 if opts.get(b'all_largefiles'):
1139 success, missing = lfcommands.downloadlfiles(ui, repo)
1139 success, missing = lfcommands.downloadlfiles(ui, repo)
1140
1140
1141 if missing != 0:
1141 if missing != 0:
1142 return None
1142 return None
1143
1143
1144 return result
1144 return result
1145
1145
1146
1146
1147 @eh.wrapcommand(b'rebase', extension=b'rebase')
1147 @eh.wrapcommand(b'rebase', extension=b'rebase')
1148 def overriderebasecmd(orig, ui, repo, **opts):
1148 def overriderebasecmd(orig, ui, repo, **opts):
1149 if not util.safehasattr(repo, b'_largefilesenabled'):
1149 if not util.safehasattr(repo, b'_largefilesenabled'):
1150 return orig(ui, repo, **opts)
1150 return orig(ui, repo, **opts)
1151
1151
1152 resuming = opts.get('continue')
1152 resuming = opts.get('continue')
1153 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1153 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1154 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1154 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1155 try:
1155 try:
1156 with ui.configoverride(
1156 with ui.configoverride(
1157 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1157 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1158 ):
1158 ):
1159 return orig(ui, repo, **opts)
1159 return orig(ui, repo, **opts)
1160 finally:
1160 finally:
1161 repo._lfstatuswriters.pop()
1161 repo._lfstatuswriters.pop()
1162 repo._lfcommithooks.pop()
1162 repo._lfcommithooks.pop()
1163
1163
1164
1164
1165 @eh.extsetup
1165 @eh.extsetup
1166 def overriderebase(ui):
1166 def overriderebase(ui):
1167 try:
1167 try:
1168 rebase = extensions.find(b'rebase')
1168 rebase = extensions.find(b'rebase')
1169 except KeyError:
1169 except KeyError:
1170 pass
1170 pass
1171 else:
1171 else:
1172
1172
1173 def _dorebase(orig, *args, **kwargs):
1173 def _dorebase(orig, *args, **kwargs):
1174 kwargs['inmemory'] = False
1174 kwargs['inmemory'] = False
1175 return orig(*args, **kwargs)
1175 return orig(*args, **kwargs)
1176
1176
1177 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1177 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1178
1178
1179
1179
1180 @eh.wrapcommand(b'archive')
1180 @eh.wrapcommand(b'archive')
1181 def overridearchivecmd(orig, ui, repo, dest, **opts):
1181 def overridearchivecmd(orig, ui, repo, dest, **opts):
1182 with lfstatus(repo.unfiltered()):
1182 with lfstatus(repo.unfiltered()):
1183 return orig(ui, repo.unfiltered(), dest, **opts)
1183 return orig(ui, repo.unfiltered(), dest, **opts)
1184
1184
1185
1185
1186 @eh.wrapfunction(webcommands, b'archive')
1186 @eh.wrapfunction(webcommands, b'archive')
1187 def hgwebarchive(orig, web):
1187 def hgwebarchive(orig, web):
1188 with lfstatus(web.repo):
1188 with lfstatus(web.repo):
1189 return orig(web)
1189 return orig(web)
1190
1190
1191
1191
1192 @eh.wrapfunction(archival, b'archive')
1192 @eh.wrapfunction(archival, b'archive')
1193 def overridearchive(
1193 def overridearchive(
1194 orig,
1194 orig,
1195 repo,
1195 repo,
1196 dest,
1196 dest,
1197 node,
1197 node,
1198 kind,
1198 kind,
1199 decode=True,
1199 decode=True,
1200 match=None,
1200 match=None,
1201 prefix=b'',
1201 prefix=b'',
1202 mtime=None,
1202 mtime=None,
1203 subrepos=None,
1203 subrepos=None,
1204 ):
1204 ):
1205 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1205 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1206 # unfiltered repo's attr, so check that as well.
1206 # unfiltered repo's attr, so check that as well.
1207 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1207 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1208 return orig(
1208 return orig(
1209 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1209 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1210 )
1210 )
1211
1211
1212 # No need to lock because we are only reading history and
1212 # No need to lock because we are only reading history and
1213 # largefile caches, neither of which are modified.
1213 # largefile caches, neither of which are modified.
1214 if node is not None:
1214 if node is not None:
1215 lfcommands.cachelfiles(repo.ui, repo, node)
1215 lfcommands.cachelfiles(repo.ui, repo, node)
1216
1216
1217 if kind not in archival.archivers:
1217 if kind not in archival.archivers:
1218 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1218 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1219
1219
1220 ctx = repo[node]
1220 ctx = repo[node]
1221
1221
1222 if kind == b'files':
1222 if kind == b'files':
1223 if prefix:
1223 if prefix:
1224 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1224 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1225 else:
1225 else:
1226 prefix = archival.tidyprefix(dest, kind, prefix)
1226 prefix = archival.tidyprefix(dest, kind, prefix)
1227
1227
1228 def write(name, mode, islink, getdata):
1228 def write(name, mode, islink, getdata):
1229 if match and not match(name):
1229 if match and not match(name):
1230 return
1230 return
1231 data = getdata()
1231 data = getdata()
1232 if decode:
1232 if decode:
1233 data = repo.wwritedata(name, data)
1233 data = repo.wwritedata(name, data)
1234 archiver.addfile(prefix + name, mode, islink, data)
1234 archiver.addfile(prefix + name, mode, islink, data)
1235
1235
1236 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1236 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1237
1237
1238 if repo.ui.configbool(b"ui", b"archivemeta"):
1238 if repo.ui.configbool(b"ui", b"archivemeta"):
1239 write(
1239 write(
1240 b'.hg_archival.txt',
1240 b'.hg_archival.txt',
1241 0o644,
1241 0o644,
1242 False,
1242 False,
1243 lambda: archival.buildmetadata(ctx),
1243 lambda: archival.buildmetadata(ctx),
1244 )
1244 )
1245
1245
1246 for f in ctx:
1246 for f in ctx:
1247 ff = ctx.flags(f)
1247 ff = ctx.flags(f)
1248 getdata = ctx[f].data
1248 getdata = ctx[f].data
1249 lfile = lfutil.splitstandin(f)
1249 lfile = lfutil.splitstandin(f)
1250 if lfile is not None:
1250 if lfile is not None:
1251 if node is not None:
1251 if node is not None:
1252 path = lfutil.findfile(repo, getdata().strip())
1252 path = lfutil.findfile(repo, getdata().strip())
1253
1253
1254 if path is None:
1254 if path is None:
1255 raise error.Abort(
1255 raise error.Abort(
1256 _(
1256 _(
1257 b'largefile %s not found in repo store or system cache'
1257 b'largefile %s not found in repo store or system cache'
1258 )
1258 )
1259 % lfile
1259 % lfile
1260 )
1260 )
1261 else:
1261 else:
1262 path = lfile
1262 path = lfile
1263
1263
1264 f = lfile
1264 f = lfile
1265
1265
1266 getdata = lambda: util.readfile(path)
1266 getdata = lambda: util.readfile(path)
1267 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1267 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1268
1268
1269 if subrepos:
1269 if subrepos:
1270 for subpath in sorted(ctx.substate):
1270 for subpath in sorted(ctx.substate):
1271 sub = ctx.workingsub(subpath)
1271 sub = ctx.workingsub(subpath)
1272 submatch = matchmod.subdirmatcher(subpath, match)
1272 submatch = matchmod.subdirmatcher(subpath, match)
1273 subprefix = prefix + subpath + b'/'
1273 subprefix = prefix + subpath + b'/'
1274
1274
1275 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1275 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1276 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1276 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1277 # allow only hgsubrepos to set this, instead of the current scheme
1277 # allow only hgsubrepos to set this, instead of the current scheme
1278 # where the parent sets this for the child.
1278 # where the parent sets this for the child.
1279 with (
1279 with (
1280 util.safehasattr(sub, '_repo')
1280 util.safehasattr(sub, '_repo')
1281 and lfstatus(sub._repo)
1281 and lfstatus(sub._repo)
1282 or util.nullcontextmanager()
1282 or util.nullcontextmanager()
1283 ):
1283 ):
1284 sub.archive(archiver, subprefix, submatch)
1284 sub.archive(archiver, subprefix, submatch)
1285
1285
1286 archiver.done()
1286 archiver.done()
1287
1287
1288
1288
1289 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1289 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1290 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1290 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1291 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1291 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1292 if not lfenabled or not repo._repo.lfstatus:
1292 if not lfenabled or not repo._repo.lfstatus:
1293 return orig(repo, archiver, prefix, match, decode)
1293 return orig(repo, archiver, prefix, match, decode)
1294
1294
1295 repo._get(repo._state + (b'hg',))
1295 repo._get(repo._state + (b'hg',))
1296 rev = repo._state[1]
1296 rev = repo._state[1]
1297 ctx = repo._repo[rev]
1297 ctx = repo._repo[rev]
1298
1298
1299 if ctx.node() is not None:
1299 if ctx.node() is not None:
1300 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1300 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1301
1301
1302 def write(name, mode, islink, getdata):
1302 def write(name, mode, islink, getdata):
1303 # At this point, the standin has been replaced with the largefile name,
1303 # At this point, the standin has been replaced with the largefile name,
1304 # so the normal matcher works here without the lfutil variants.
1304 # so the normal matcher works here without the lfutil variants.
1305 if match and not match(f):
1305 if match and not match(f):
1306 return
1306 return
1307 data = getdata()
1307 data = getdata()
1308 if decode:
1308 if decode:
1309 data = repo._repo.wwritedata(name, data)
1309 data = repo._repo.wwritedata(name, data)
1310
1310
1311 archiver.addfile(prefix + name, mode, islink, data)
1311 archiver.addfile(prefix + name, mode, islink, data)
1312
1312
1313 for f in ctx:
1313 for f in ctx:
1314 ff = ctx.flags(f)
1314 ff = ctx.flags(f)
1315 getdata = ctx[f].data
1315 getdata = ctx[f].data
1316 lfile = lfutil.splitstandin(f)
1316 lfile = lfutil.splitstandin(f)
1317 if lfile is not None:
1317 if lfile is not None:
1318 if ctx.node() is not None:
1318 if ctx.node() is not None:
1319 path = lfutil.findfile(repo._repo, getdata().strip())
1319 path = lfutil.findfile(repo._repo, getdata().strip())
1320
1320
1321 if path is None:
1321 if path is None:
1322 raise error.Abort(
1322 raise error.Abort(
1323 _(
1323 _(
1324 b'largefile %s not found in repo store or system cache'
1324 b'largefile %s not found in repo store or system cache'
1325 )
1325 )
1326 % lfile
1326 % lfile
1327 )
1327 )
1328 else:
1328 else:
1329 path = lfile
1329 path = lfile
1330
1330
1331 f = lfile
1331 f = lfile
1332
1332
1333 getdata = lambda: util.readfile(os.path.join(prefix, path))
1333 getdata = lambda: util.readfile(os.path.join(prefix, path))
1334
1334
1335 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1335 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1336
1336
1337 for subpath in sorted(ctx.substate):
1337 for subpath in sorted(ctx.substate):
1338 sub = ctx.workingsub(subpath)
1338 sub = ctx.workingsub(subpath)
1339 submatch = matchmod.subdirmatcher(subpath, match)
1339 submatch = matchmod.subdirmatcher(subpath, match)
1340 subprefix = prefix + subpath + b'/'
1340 subprefix = prefix + subpath + b'/'
1341 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1341 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1342 # infer and possibly set lfstatus at the top of this function. That
1342 # infer and possibly set lfstatus at the top of this function. That
1343 # would allow only hgsubrepos to set this, instead of the current scheme
1343 # would allow only hgsubrepos to set this, instead of the current scheme
1344 # where the parent sets this for the child.
1344 # where the parent sets this for the child.
1345 with (
1345 with (
1346 util.safehasattr(sub, '_repo')
1346 util.safehasattr(sub, '_repo')
1347 and lfstatus(sub._repo)
1347 and lfstatus(sub._repo)
1348 or util.nullcontextmanager()
1348 or util.nullcontextmanager()
1349 ):
1349 ):
1350 sub.archive(archiver, subprefix, submatch, decode)
1350 sub.archive(archiver, subprefix, submatch, decode)
1351
1351
1352
1352
1353 # If a largefile is modified, the change is not reflected in its
1353 # If a largefile is modified, the change is not reflected in its
1354 # standin until a commit. cmdutil.bailifchanged() raises an exception
1354 # standin until a commit. cmdutil.bailifchanged() raises an exception
1355 # if the repo has uncommitted changes. Wrap it to also check if
1355 # if the repo has uncommitted changes. Wrap it to also check if
1356 # largefiles were changed. This is used by bisect, backout and fetch.
1356 # largefiles were changed. This is used by bisect, backout and fetch.
1357 @eh.wrapfunction(cmdutil, b'bailifchanged')
1357 @eh.wrapfunction(cmdutil, b'bailifchanged')
1358 def overridebailifchanged(orig, repo, *args, **kwargs):
1358 def overridebailifchanged(orig, repo, *args, **kwargs):
1359 orig(repo, *args, **kwargs)
1359 orig(repo, *args, **kwargs)
1360 with lfstatus(repo):
1360 with lfstatus(repo):
1361 s = repo.status()
1361 s = repo.status()
1362 if s.modified or s.added or s.removed or s.deleted:
1362 if s.modified or s.added or s.removed or s.deleted:
1363 raise error.Abort(_(b'uncommitted changes'))
1363 raise error.Abort(_(b'uncommitted changes'))
1364
1364
1365
1365
1366 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1366 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1367 def postcommitstatus(orig, repo, *args, **kwargs):
1367 def postcommitstatus(orig, repo, *args, **kwargs):
1368 with lfstatus(repo):
1368 with lfstatus(repo):
1369 return orig(repo, *args, **kwargs)
1369 return orig(repo, *args, **kwargs)
1370
1370
1371
1371
1372 @eh.wrapfunction(cmdutil, b'forget')
1372 @eh.wrapfunction(cmdutil, b'forget')
1373 def cmdutilforget(
1373 def cmdutilforget(
1374 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1374 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1375 ):
1375 ):
1376 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1376 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1377 bad, forgot = orig(
1377 bad, forgot = orig(
1378 ui,
1378 ui,
1379 repo,
1379 repo,
1380 normalmatcher,
1380 normalmatcher,
1381 prefix,
1381 prefix,
1382 uipathfn,
1382 uipathfn,
1383 explicitonly,
1383 explicitonly,
1384 dryrun,
1384 dryrun,
1385 interactive,
1385 interactive,
1386 )
1386 )
1387 m = composelargefilematcher(match, repo[None].manifest())
1387 m = composelargefilematcher(match, repo[None].manifest())
1388
1388
1389 with lfstatus(repo):
1389 with lfstatus(repo):
1390 s = repo.status(match=m, clean=True)
1390 s = repo.status(match=m, clean=True)
1391 manifest = repo[None].manifest()
1391 manifest = repo[None].manifest()
1392 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1392 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1393 forget = [f for f in forget if lfutil.standin(f) in manifest]
1393 forget = [f for f in forget if lfutil.standin(f) in manifest]
1394
1394
1395 for f in forget:
1395 for f in forget:
1396 fstandin = lfutil.standin(f)
1396 fstandin = lfutil.standin(f)
1397 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1397 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1398 ui.warn(
1398 ui.warn(
1399 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1399 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1400 )
1400 )
1401 bad.append(f)
1401 bad.append(f)
1402
1402
1403 for f in forget:
1403 for f in forget:
1404 if ui.verbose or not m.exact(f):
1404 if ui.verbose or not m.exact(f):
1405 ui.status(_(b'removing %s\n') % uipathfn(f))
1405 ui.status(_(b'removing %s\n') % uipathfn(f))
1406
1406
1407 # Need to lock because standin files are deleted then removed from the
1407 # Need to lock because standin files are deleted then removed from the
1408 # repository and we could race in-between.
1408 # repository and we could race in-between.
1409 with repo.wlock():
1409 with repo.wlock():
1410 lfdirstate = lfutil.openlfdirstate(ui, repo)
1410 lfdirstate = lfutil.openlfdirstate(ui, repo)
1411 for f in forget:
1411 for f in forget:
1412 lfdirstate.set_untracked(f)
1412 lfdirstate.set_untracked(f)
1413 lfdirstate.write(repo.currenttransaction())
1413 lfdirstate.write(repo.currenttransaction())
1414 standins = [lfutil.standin(f) for f in forget]
1414 standins = [lfutil.standin(f) for f in forget]
1415 for f in standins:
1415 for f in standins:
1416 repo.wvfs.unlinkpath(f, ignoremissing=True)
1416 repo.wvfs.unlinkpath(f, ignoremissing=True)
1417 rejected = repo[None].forget(standins)
1417 rejected = repo[None].forget(standins)
1418
1418
1419 bad.extend(f for f in rejected if f in m.files())
1419 bad.extend(f for f in rejected if f in m.files())
1420 forgot.extend(f for f in forget if f not in rejected)
1420 forgot.extend(f for f in forget if f not in rejected)
1421 return bad, forgot
1421 return bad, forgot
1422
1422
1423
1423
1424 def _getoutgoings(repo, other, missing, addfunc):
1424 def _getoutgoings(repo, other, missing, addfunc):
1425 """get pairs of filename and largefile hash in outgoing revisions
1425 """get pairs of filename and largefile hash in outgoing revisions
1426 in 'missing'.
1426 in 'missing'.
1427
1427
1428 largefiles already existing on 'other' repository are ignored.
1428 largefiles already existing on 'other' repository are ignored.
1429
1429
1430 'addfunc' is invoked with each unique pairs of filename and
1430 'addfunc' is invoked with each unique pairs of filename and
1431 largefile hash value.
1431 largefile hash value.
1432 """
1432 """
1433 knowns = set()
1433 knowns = set()
1434 lfhashes = set()
1434 lfhashes = set()
1435
1435
1436 def dedup(fn, lfhash):
1436 def dedup(fn, lfhash):
1437 k = (fn, lfhash)
1437 k = (fn, lfhash)
1438 if k not in knowns:
1438 if k not in knowns:
1439 knowns.add(k)
1439 knowns.add(k)
1440 lfhashes.add(lfhash)
1440 lfhashes.add(lfhash)
1441
1441
1442 lfutil.getlfilestoupload(repo, missing, dedup)
1442 lfutil.getlfilestoupload(repo, missing, dedup)
1443 if lfhashes:
1443 if lfhashes:
1444 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1444 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1445 for fn, lfhash in knowns:
1445 for fn, lfhash in knowns:
1446 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1446 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1447 addfunc(fn, lfhash)
1447 addfunc(fn, lfhash)
1448
1448
1449
1449
1450 def outgoinghook(ui, repo, other, opts, missing):
1450 def outgoinghook(ui, repo, other, opts, missing):
1451 if opts.pop(b'large', None):
1451 if opts.pop(b'large', None):
1452 lfhashes = set()
1452 lfhashes = set()
1453 if ui.debugflag:
1453 if ui.debugflag:
1454 toupload = {}
1454 toupload = {}
1455
1455
1456 def addfunc(fn, lfhash):
1456 def addfunc(fn, lfhash):
1457 if fn not in toupload:
1457 if fn not in toupload:
1458 toupload[fn] = [] # pytype: disable=unsupported-operands
1458 toupload[fn] = [] # pytype: disable=unsupported-operands
1459 toupload[fn].append(lfhash)
1459 toupload[fn].append(lfhash)
1460 lfhashes.add(lfhash)
1460 lfhashes.add(lfhash)
1461
1461
1462 def showhashes(fn):
1462 def showhashes(fn):
1463 for lfhash in sorted(toupload[fn]):
1463 for lfhash in sorted(toupload[fn]):
1464 ui.debug(b' %s\n' % lfhash)
1464 ui.debug(b' %s\n' % lfhash)
1465
1465
1466 else:
1466 else:
1467 toupload = set()
1467 toupload = set()
1468
1468
1469 def addfunc(fn, lfhash):
1469 def addfunc(fn, lfhash):
1470 toupload.add(fn)
1470 toupload.add(fn)
1471 lfhashes.add(lfhash)
1471 lfhashes.add(lfhash)
1472
1472
1473 def showhashes(fn):
1473 def showhashes(fn):
1474 pass
1474 pass
1475
1475
1476 _getoutgoings(repo, other, missing, addfunc)
1476 _getoutgoings(repo, other, missing, addfunc)
1477
1477
1478 if not toupload:
1478 if not toupload:
1479 ui.status(_(b'largefiles: no files to upload\n'))
1479 ui.status(_(b'largefiles: no files to upload\n'))
1480 else:
1480 else:
1481 ui.status(
1481 ui.status(
1482 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1482 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1483 )
1483 )
1484 for file in sorted(toupload):
1484 for file in sorted(toupload):
1485 ui.status(lfutil.splitstandin(file) + b'\n')
1485 ui.status(lfutil.splitstandin(file) + b'\n')
1486 showhashes(file)
1486 showhashes(file)
1487 ui.status(b'\n')
1487 ui.status(b'\n')
1488
1488
1489
1489
1490 @eh.wrapcommand(
1490 @eh.wrapcommand(
1491 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1491 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1492 )
1492 )
1493 def _outgoingcmd(orig, *args, **kwargs):
1493 def _outgoingcmd(orig, *args, **kwargs):
1494 # Nothing to do here other than add the extra help option- the hook above
1494 # Nothing to do here other than add the extra help option- the hook above
1495 # processes it.
1495 # processes it.
1496 return orig(*args, **kwargs)
1496 return orig(*args, **kwargs)
1497
1497
1498
1498
1499 def summaryremotehook(ui, repo, opts, changes):
1499 def summaryremotehook(ui, repo, opts, changes):
1500 largeopt = opts.get(b'large', False)
1500 largeopt = opts.get(b'large', False)
1501 if changes is None:
1501 if changes is None:
1502 if largeopt:
1502 if largeopt:
1503 return (False, True) # only outgoing check is needed
1503 return (False, True) # only outgoing check is needed
1504 else:
1504 else:
1505 return (False, False)
1505 return (False, False)
1506 elif largeopt:
1506 elif largeopt:
1507 url, branch, peer, outgoing = changes[1]
1507 url, branch, peer, outgoing = changes[1]
1508 if peer is None:
1508 if peer is None:
1509 # i18n: column positioning for "hg summary"
1509 # i18n: column positioning for "hg summary"
1510 ui.status(_(b'largefiles: (no remote repo)\n'))
1510 ui.status(_(b'largefiles: (no remote repo)\n'))
1511 return
1511 return
1512
1512
1513 toupload = set()
1513 toupload = set()
1514 lfhashes = set()
1514 lfhashes = set()
1515
1515
1516 def addfunc(fn, lfhash):
1516 def addfunc(fn, lfhash):
1517 toupload.add(fn)
1517 toupload.add(fn)
1518 lfhashes.add(lfhash)
1518 lfhashes.add(lfhash)
1519
1519
1520 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1520 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1521
1521
1522 if not toupload:
1522 if not toupload:
1523 # i18n: column positioning for "hg summary"
1523 # i18n: column positioning for "hg summary"
1524 ui.status(_(b'largefiles: (no files to upload)\n'))
1524 ui.status(_(b'largefiles: (no files to upload)\n'))
1525 else:
1525 else:
1526 # i18n: column positioning for "hg summary"
1526 # i18n: column positioning for "hg summary"
1527 ui.status(
1527 ui.status(
1528 _(b'largefiles: %d entities for %d files to upload\n')
1528 _(b'largefiles: %d entities for %d files to upload\n')
1529 % (len(lfhashes), len(toupload))
1529 % (len(lfhashes), len(toupload))
1530 )
1530 )
1531
1531
1532
1532
1533 @eh.wrapcommand(
1533 @eh.wrapcommand(
1534 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1534 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1535 )
1535 )
1536 def overridesummary(orig, ui, repo, *pats, **opts):
1536 def overridesummary(orig, ui, repo, *pats, **opts):
1537 with lfstatus(repo):
1537 with lfstatus(repo):
1538 orig(ui, repo, *pats, **opts)
1538 orig(ui, repo, *pats, **opts)
1539
1539
1540
1540
1541 @eh.wrapfunction(scmutil, b'addremove')
1541 @eh.wrapfunction(scmutil, b'addremove')
1542 def scmutiladdremove(
1542 def scmutiladdremove(
1543 orig,
1543 orig,
1544 repo,
1544 repo,
1545 matcher,
1545 matcher,
1546 prefix,
1546 prefix,
1547 uipathfn,
1547 uipathfn,
1548 opts=None,
1548 opts=None,
1549 open_tr=None,
1549 open_tr=None,
1550 ):
1550 ):
1551 if opts is None:
1551 if opts is None:
1552 opts = {}
1552 opts = {}
1553 if not lfutil.islfilesrepo(repo):
1553 if not lfutil.islfilesrepo(repo):
1554 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1554 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1555
1555
1556 # open the transaction and changing_files context
1556 # open the transaction and changing_files context
1557 if open_tr is not None:
1557 if open_tr is not None:
1558 open_tr()
1558 open_tr()
1559
1559
1560 # Get the list of missing largefiles so we can remove them
1560 # Get the list of missing largefiles so we can remove them
1561 with repo.dirstate.running_status(repo):
1561 with repo.dirstate.running_status(repo):
1562 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1562 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1563 unsure, s, mtime_boundary = lfdirstate.status(
1563 unsure, s, mtime_boundary = lfdirstate.status(
1564 matchmod.always(),
1564 matchmod.always(),
1565 subrepos=[],
1565 subrepos=[],
1566 ignored=False,
1566 ignored=False,
1567 clean=False,
1567 clean=False,
1568 unknown=False,
1568 unknown=False,
1569 )
1569 )
1570
1570
1571 # Call into the normal remove code, but the removing of the standin, we want
1571 # Call into the normal remove code, but the removing of the standin, we want
1572 # to have handled by original addremove. Monkey patching here makes sure
1572 # to have handled by original addremove. Monkey patching here makes sure
1573 # we don't remove the standin in the largefiles code, preventing a very
1573 # we don't remove the standin in the largefiles code, preventing a very
1574 # confused state later.
1574 # confused state later.
1575 if s.deleted:
1575 if s.deleted:
1576 m = copy.copy(matcher)
1576 m = copy.copy(matcher)
1577
1577
1578 # The m._files and m._map attributes are not changed to the deleted list
1578 # The m._files and m._map attributes are not changed to the deleted list
1579 # because that affects the m.exact() test, which in turn governs whether
1579 # because that affects the m.exact() test, which in turn governs whether
1580 # or not the file name is printed, and how. Simply limit the original
1580 # or not the file name is printed, and how. Simply limit the original
1581 # matches to those in the deleted status list.
1581 # matches to those in the deleted status list.
1582 matchfn = m.matchfn
1582 matchfn = m.matchfn
1583 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1583 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1584
1584
1585 removelargefiles(
1585 removelargefiles(
1586 repo.ui,
1586 repo.ui,
1587 repo,
1587 repo,
1588 True,
1588 True,
1589 m,
1589 m,
1590 uipathfn,
1590 uipathfn,
1591 opts.get(b'dry_run'),
1591 opts.get(b'dry_run'),
1592 **pycompat.strkwargs(opts)
1592 **pycompat.strkwargs(opts)
1593 )
1593 )
1594 # Call into the normal add code, and any files that *should* be added as
1594 # Call into the normal add code, and any files that *should* be added as
1595 # largefiles will be
1595 # largefiles will be
1596 added, bad = addlargefiles(
1596 added, bad = addlargefiles(
1597 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1597 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1598 )
1598 )
1599 # Now that we've handled largefiles, hand off to the original addremove
1599 # Now that we've handled largefiles, hand off to the original addremove
1600 # function to take care of the rest. Make sure it doesn't do anything with
1600 # function to take care of the rest. Make sure it doesn't do anything with
1601 # largefiles by passing a matcher that will ignore them.
1601 # largefiles by passing a matcher that will ignore them.
1602 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1602 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1603
1603
1604 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1604 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1605
1605
1606
1606
1607 # Calling purge with --all will cause the largefiles to be deleted.
1607 # Calling purge with --all will cause the largefiles to be deleted.
1608 # Override repo.status to prevent this from happening.
1608 # Override repo.status to prevent this from happening.
1609 @eh.wrapcommand(b'purge')
1609 @eh.wrapcommand(b'purge')
1610 def overridepurge(orig, ui, repo, *dirs, **opts):
1610 def overridepurge(orig, ui, repo, *dirs, **opts):
1611 # XXX Monkey patching a repoview will not work. The assigned attribute will
1611 # XXX Monkey patching a repoview will not work. The assigned attribute will
1612 # be set on the unfiltered repo, but we will only lookup attributes in the
1612 # be set on the unfiltered repo, but we will only lookup attributes in the
1613 # unfiltered repo if the lookup in the repoview object itself fails. As the
1613 # unfiltered repo if the lookup in the repoview object itself fails. As the
1614 # monkey patched method exists on the repoview class the lookup will not
1614 # monkey patched method exists on the repoview class the lookup will not
1615 # fail. As a result, the original version will shadow the monkey patched
1615 # fail. As a result, the original version will shadow the monkey patched
1616 # one, defeating the monkey patch.
1616 # one, defeating the monkey patch.
1617 #
1617 #
1618 # As a work around we use an unfiltered repo here. We should do something
1618 # As a work around we use an unfiltered repo here. We should do something
1619 # cleaner instead.
1619 # cleaner instead.
1620 repo = repo.unfiltered()
1620 repo = repo.unfiltered()
1621 oldstatus = repo.status
1621 oldstatus = repo.status
1622
1622
1623 def overridestatus(
1623 def overridestatus(
1624 node1=b'.',
1624 node1=b'.',
1625 node2=None,
1625 node2=None,
1626 match=None,
1626 match=None,
1627 ignored=False,
1627 ignored=False,
1628 clean=False,
1628 clean=False,
1629 unknown=False,
1629 unknown=False,
1630 listsubrepos=False,
1630 listsubrepos=False,
1631 ):
1631 ):
1632 r = oldstatus(
1632 r = oldstatus(
1633 node1, node2, match, ignored, clean, unknown, listsubrepos
1633 node1, node2, match, ignored, clean, unknown, listsubrepos
1634 )
1634 )
1635 lfdirstate = lfutil.openlfdirstate(ui, repo)
1635 lfdirstate = lfutil.openlfdirstate(ui, repo)
1636 unknown = [
1636 unknown = [
1637 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1637 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1638 ]
1638 ]
1639 ignored = [
1639 ignored = [
1640 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1640 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1641 ]
1641 ]
1642 return scmutil.status(
1642 return scmutil.status(
1643 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1643 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1644 )
1644 )
1645
1645
1646 repo.status = overridestatus
1646 repo.status = overridestatus
1647 orig(ui, repo, *dirs, **opts)
1647 orig(ui, repo, *dirs, **opts)
1648 repo.status = oldstatus
1648 repo.status = oldstatus
1649
1649
1650
1650
1651 @eh.wrapcommand(b'rollback')
1651 @eh.wrapcommand(b'rollback')
1652 def overriderollback(orig, ui, repo, **opts):
1652 def overriderollback(orig, ui, repo, **opts):
1653 with repo.wlock():
1653 with repo.wlock():
1654 before = repo.dirstate.parents()
1654 before = repo.dirstate.parents()
1655 orphans = {
1655 orphans = {
1656 f
1656 f
1657 for f in repo.dirstate
1657 for f in repo.dirstate
1658 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1658 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1659 }
1659 }
1660 result = orig(ui, repo, **opts)
1660 result = orig(ui, repo, **opts)
1661 after = repo.dirstate.parents()
1661 after = repo.dirstate.parents()
1662 if before == after:
1662 if before == after:
1663 return result # no need to restore standins
1663 return result # no need to restore standins
1664
1664
1665 pctx = repo[b'.']
1665 pctx = repo[b'.']
1666 for f in repo.dirstate:
1666 for f in repo.dirstate:
1667 if lfutil.isstandin(f):
1667 if lfutil.isstandin(f):
1668 orphans.discard(f)
1668 orphans.discard(f)
1669 if repo.dirstate.get_entry(f).removed:
1669 if repo.dirstate.get_entry(f).removed:
1670 repo.wvfs.unlinkpath(f, ignoremissing=True)
1670 repo.wvfs.unlinkpath(f, ignoremissing=True)
1671 elif f in pctx:
1671 elif f in pctx:
1672 fctx = pctx[f]
1672 fctx = pctx[f]
1673 repo.wwrite(f, fctx.data(), fctx.flags())
1673 repo.wwrite(f, fctx.data(), fctx.flags())
1674 else:
1674 else:
1675 # content of standin is not so important in 'a',
1675 # content of standin is not so important in 'a',
1676 # 'm' or 'n' (coming from the 2nd parent) cases
1676 # 'm' or 'n' (coming from the 2nd parent) cases
1677 lfutil.writestandin(repo, f, b'', False)
1677 lfutil.writestandin(repo, f, b'', False)
1678 for standin in orphans:
1678 for standin in orphans:
1679 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1679 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1680
1680
1681 return result
1681 return result
1682
1682
1683
1683
1684 @eh.wrapcommand(b'transplant', extension=b'transplant')
1684 @eh.wrapcommand(b'transplant', extension=b'transplant')
1685 def overridetransplant(orig, ui, repo, *revs, **opts):
1685 def overridetransplant(orig, ui, repo, *revs, **opts):
1686 resuming = opts.get('continue')
1686 resuming = opts.get('continue')
1687 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1687 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1688 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1688 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1689 try:
1689 try:
1690 result = orig(ui, repo, *revs, **opts)
1690 result = orig(ui, repo, *revs, **opts)
1691 finally:
1691 finally:
1692 repo._lfstatuswriters.pop()
1692 repo._lfstatuswriters.pop()
1693 repo._lfcommithooks.pop()
1693 repo._lfcommithooks.pop()
1694 return result
1694 return result
1695
1695
1696
1696
1697 @eh.wrapcommand(b'cat')
1697 @eh.wrapcommand(b'cat')
1698 def overridecat(orig, ui, repo, file1, *pats, **opts):
1698 def overridecat(orig, ui, repo, file1, *pats, **opts):
1699 opts = pycompat.byteskwargs(opts)
1699 opts = pycompat.byteskwargs(opts)
1700 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1700 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1701 err = 1
1701 err = 1
1702 notbad = set()
1702 notbad = set()
1703 m = scmutil.match(ctx, (file1,) + pats, opts)
1703 m = scmutil.match(ctx, (file1,) + pats, opts)
1704 origmatchfn = m.matchfn
1704 origmatchfn = m.matchfn
1705
1705
1706 def lfmatchfn(f):
1706 def lfmatchfn(f):
1707 if origmatchfn(f):
1707 if origmatchfn(f):
1708 return True
1708 return True
1709 lf = lfutil.splitstandin(f)
1709 lf = lfutil.splitstandin(f)
1710 if lf is None:
1710 if lf is None:
1711 return False
1711 return False
1712 notbad.add(lf)
1712 notbad.add(lf)
1713 return origmatchfn(lf)
1713 return origmatchfn(lf)
1714
1714
1715 m.matchfn = lfmatchfn
1715 m.matchfn = lfmatchfn
1716 origbadfn = m.bad
1716 origbadfn = m.bad
1717
1717
1718 def lfbadfn(f, msg):
1718 def lfbadfn(f, msg):
1719 if not f in notbad:
1719 if not f in notbad:
1720 origbadfn(f, msg)
1720 origbadfn(f, msg)
1721
1721
1722 m.bad = lfbadfn
1722 m.bad = lfbadfn
1723
1723
1724 origvisitdirfn = m.visitdir
1724 origvisitdirfn = m.visitdir
1725
1725
1726 def lfvisitdirfn(dir):
1726 def lfvisitdirfn(dir):
1727 if dir == lfutil.shortname:
1727 if dir == lfutil.shortname:
1728 return True
1728 return True
1729 ret = origvisitdirfn(dir)
1729 ret = origvisitdirfn(dir)
1730 if ret:
1730 if ret:
1731 return ret
1731 return ret
1732 lf = lfutil.splitstandin(dir)
1732 lf = lfutil.splitstandin(dir)
1733 if lf is None:
1733 if lf is None:
1734 return False
1734 return False
1735 return origvisitdirfn(lf)
1735 return origvisitdirfn(lf)
1736
1736
1737 m.visitdir = lfvisitdirfn
1737 m.visitdir = lfvisitdirfn
1738
1738
1739 for f in ctx.walk(m):
1739 for f in ctx.walk(m):
1740 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1740 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1741 lf = lfutil.splitstandin(f)
1741 lf = lfutil.splitstandin(f)
1742 if lf is None or origmatchfn(f):
1742 if lf is None or origmatchfn(f):
1743 # duplicating unreachable code from commands.cat
1743 # duplicating unreachable code from commands.cat
1744 data = ctx[f].data()
1744 data = ctx[f].data()
1745 if opts.get(b'decode'):
1745 if opts.get(b'decode'):
1746 data = repo.wwritedata(f, data)
1746 data = repo.wwritedata(f, data)
1747 fp.write(data)
1747 fp.write(data)
1748 else:
1748 else:
1749 hash = lfutil.readasstandin(ctx[f])
1749 hash = lfutil.readasstandin(ctx[f])
1750 if not lfutil.inusercache(repo.ui, hash):
1750 if not lfutil.inusercache(repo.ui, hash):
1751 store = storefactory.openstore(repo)
1751 store = storefactory.openstore(repo)
1752 success, missing = store.get([(lf, hash)])
1752 success, missing = store.get([(lf, hash)])
1753 if len(success) != 1:
1753 if len(success) != 1:
1754 raise error.Abort(
1754 raise error.Abort(
1755 _(
1755 _(
1756 b'largefile %s is not in cache and could not be '
1756 b'largefile %s is not in cache and could not be '
1757 b'downloaded'
1757 b'downloaded'
1758 )
1758 )
1759 % lf
1759 % lf
1760 )
1760 )
1761 path = lfutil.usercachepath(repo.ui, hash)
1761 path = lfutil.usercachepath(repo.ui, hash)
1762 with open(path, b"rb") as fpin:
1762 with open(path, b"rb") as fpin:
1763 for chunk in util.filechunkiter(fpin):
1763 for chunk in util.filechunkiter(fpin):
1764 fp.write(chunk)
1764 fp.write(chunk)
1765 err = 0
1765 err = 0
1766 return err
1766 return err
1767
1767
1768
1768
1769 @eh.wrapfunction(merge, b'_update')
1769 @eh.wrapfunction(merge, b'_update')
1770 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1770 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1771 matcher = kwargs.get('matcher', None)
1771 matcher = kwargs.get('matcher', None)
1772 # note if this is a partial update
1772 # note if this is a partial update
1773 partial = matcher and not matcher.always()
1773 partial = matcher and not matcher.always()
1774 with repo.wlock(), repo.dirstate.changing_parents(repo):
1774 with repo.wlock(), repo.dirstate.changing_parents(repo):
1775 # branch | | |
1775 # branch | | |
1776 # merge | force | partial | action
1776 # merge | force | partial | action
1777 # -------+-------+---------+--------------
1777 # -------+-------+---------+--------------
1778 # x | x | x | linear-merge
1778 # x | x | x | linear-merge
1779 # o | x | x | branch-merge
1779 # o | x | x | branch-merge
1780 # x | o | x | overwrite (as clean update)
1780 # x | o | x | overwrite (as clean update)
1781 # o | o | x | force-branch-merge (*1)
1781 # o | o | x | force-branch-merge (*1)
1782 # x | x | o | (*)
1782 # x | x | o | (*)
1783 # o | x | o | (*)
1783 # o | x | o | (*)
1784 # x | o | o | overwrite (as revert)
1784 # x | o | o | overwrite (as revert)
1785 # o | o | o | (*)
1785 # o | o | o | (*)
1786 #
1786 #
1787 # (*) don't care
1787 # (*) don't care
1788 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1788 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1789
1789 with repo.dirstate.running_status(repo):
1790 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1790 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1791 unsure, s, mtime_boundary = lfdirstate.status(
1791 unsure, s, mtime_boundary = lfdirstate.status(
1792 matchmod.always(),
1792 matchmod.always(),
1793 subrepos=[],
1793 subrepos=[],
1794 ignored=False,
1794 ignored=False,
1795 clean=True,
1795 clean=True,
1796 unknown=False,
1796 unknown=False,
1797 )
1797 )
1798 oldclean = set(s.clean)
1798 oldclean = set(s.clean)
1799 pctx = repo[b'.']
1799 pctx = repo[b'.']
1800 dctx = repo[node]
1800 dctx = repo[node]
1801 for lfile in unsure + s.modified:
1801 for lfile in unsure + s.modified:
1802 lfileabs = repo.wvfs.join(lfile)
1802 lfileabs = repo.wvfs.join(lfile)
1803 if not repo.wvfs.exists(lfileabs):
1803 if not repo.wvfs.exists(lfileabs):
1804 continue
1804 continue
1805 lfhash = lfutil.hashfile(lfileabs)
1805 lfhash = lfutil.hashfile(lfileabs)
1806 standin = lfutil.standin(lfile)
1806 standin = lfutil.standin(lfile)
1807 lfutil.writestandin(
1807 lfutil.writestandin(
1808 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1808 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1809 )
1809 )
1810 if standin in pctx and lfhash == lfutil.readasstandin(
1810 if standin in pctx and lfhash == lfutil.readasstandin(
1811 pctx[standin]
1811 pctx[standin]
1812 ):
1812 ):
1813 oldclean.add(lfile)
1813 oldclean.add(lfile)
1814 for lfile in s.added:
1814 for lfile in s.added:
1815 fstandin = lfutil.standin(lfile)
1815 fstandin = lfutil.standin(lfile)
1816 if fstandin not in dctx:
1816 if fstandin not in dctx:
1817 # in this case, content of standin file is meaningless
1817 # in this case, content of standin file is meaningless
1818 # (in dctx, lfile is unknown, or normal file)
1818 # (in dctx, lfile is unknown, or normal file)
1819 continue
1819 continue
1820 lfutil.updatestandin(repo, lfile, fstandin)
1820 lfutil.updatestandin(repo, lfile, fstandin)
1821 # mark all clean largefiles as dirty, just in case the update gets
1821 # mark all clean largefiles as dirty, just in case the update gets
1822 # interrupted before largefiles and lfdirstate are synchronized
1822 # interrupted before largefiles and lfdirstate are synchronized
1823 for lfile in oldclean:
1823 for lfile in oldclean:
1824 entry = lfdirstate.get_entry(lfile)
1824 entry = lfdirstate.get_entry(lfile)
1825 lfdirstate.hacky_extension_update_file(
1825 lfdirstate.hacky_extension_update_file(
1826 lfile,
1826 lfile,
1827 wc_tracked=entry.tracked,
1827 wc_tracked=entry.tracked,
1828 p1_tracked=entry.p1_tracked,
1828 p1_tracked=entry.p1_tracked,
1829 p2_info=entry.p2_info,
1829 p2_info=entry.p2_info,
1830 possibly_dirty=True,
1830 possibly_dirty=True,
1831 )
1831 )
1832 lfdirstate.write(repo.currenttransaction())
1832 lfdirstate.write(repo.currenttransaction())
1833
1833
1834 oldstandins = lfutil.getstandinsstate(repo)
1834 oldstandins = lfutil.getstandinsstate(repo)
1835 wc = kwargs.get('wc')
1835 wc = kwargs.get('wc')
1836 if wc and wc.isinmemory():
1836 if wc and wc.isinmemory():
1837 # largefiles is not a good candidate for in-memory merge (large
1837 # largefiles is not a good candidate for in-memory merge (large
1838 # files, custom dirstate, matcher usage).
1838 # files, custom dirstate, matcher usage).
1839 raise error.ProgrammingError(
1839 raise error.ProgrammingError(
1840 b'largefiles is not compatible with in-memory merge'
1840 b'largefiles is not compatible with in-memory merge'
1841 )
1841 )
1842 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1842 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1843
1843
1844 newstandins = lfutil.getstandinsstate(repo)
1844 newstandins = lfutil.getstandinsstate(repo)
1845 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1845 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1846
1846
1847 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1847 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1848 # all the ones that didn't change as clean
1848 # all the ones that didn't change as clean
1849 for lfile in oldclean.difference(filelist):
1849 for lfile in oldclean.difference(filelist):
1850 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1850 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1851
1851
1852 if branchmerge or force or partial:
1852 if branchmerge or force or partial:
1853 filelist.extend(s.deleted + s.removed)
1853 filelist.extend(s.deleted + s.removed)
1854
1854
1855 lfcommands.updatelfiles(
1855 lfcommands.updatelfiles(
1856 repo.ui, repo, filelist=filelist, normallookup=partial
1856 repo.ui, repo, filelist=filelist, normallookup=partial
1857 )
1857 )
1858
1858
1859 return result
1859 return result
1860
1860
1861
1861
1862 @eh.wrapfunction(scmutil, b'marktouched')
1862 @eh.wrapfunction(scmutil, b'marktouched')
1863 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1863 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1864 result = orig(repo, files, *args, **kwargs)
1864 result = orig(repo, files, *args, **kwargs)
1865
1865
1866 filelist = []
1866 filelist = []
1867 for f in files:
1867 for f in files:
1868 lf = lfutil.splitstandin(f)
1868 lf = lfutil.splitstandin(f)
1869 if lf is not None:
1869 if lf is not None:
1870 filelist.append(lf)
1870 filelist.append(lf)
1871 if filelist:
1871 if filelist:
1872 lfcommands.updatelfiles(
1872 lfcommands.updatelfiles(
1873 repo.ui,
1873 repo.ui,
1874 repo,
1874 repo,
1875 filelist=filelist,
1875 filelist=filelist,
1876 printmessage=False,
1876 printmessage=False,
1877 normallookup=True,
1877 normallookup=True,
1878 )
1878 )
1879
1879
1880 return result
1880 return result
1881
1881
1882
1882
1883 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1883 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1884 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1884 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1885 def upgraderequirements(orig, repo):
1885 def upgraderequirements(orig, repo):
1886 reqs = orig(repo)
1886 reqs = orig(repo)
1887 if b'largefiles' in repo.requirements:
1887 if b'largefiles' in repo.requirements:
1888 reqs.add(b'largefiles')
1888 reqs.add(b'largefiles')
1889 return reqs
1889 return reqs
1890
1890
1891
1891
1892 _lfscheme = b'largefile://'
1892 _lfscheme = b'largefile://'
1893
1893
1894
1894
1895 @eh.wrapfunction(urlmod, b'open')
1895 @eh.wrapfunction(urlmod, b'open')
1896 def openlargefile(orig, ui, url_, data=None, **kwargs):
1896 def openlargefile(orig, ui, url_, data=None, **kwargs):
1897 if url_.startswith(_lfscheme):
1897 if url_.startswith(_lfscheme):
1898 if data:
1898 if data:
1899 msg = b"cannot use data on a 'largefile://' url"
1899 msg = b"cannot use data on a 'largefile://' url"
1900 raise error.ProgrammingError(msg)
1900 raise error.ProgrammingError(msg)
1901 lfid = url_[len(_lfscheme) :]
1901 lfid = url_[len(_lfscheme) :]
1902 return storefactory.getlfile(ui, lfid)
1902 return storefactory.getlfile(ui, lfid)
1903 else:
1903 else:
1904 return orig(ui, url_, data=data, **kwargs)
1904 return orig(ui, url_, data=data, **kwargs)
General Comments 0
You need to be logged in to leave comments. Login now