##// END OF EJS Templates
largefiles: rely on the higher level `changing_giles` in `mergerecordupdates`...
marmoute -
r50943:c3c8ac54 default
parent child Browse files
Show More
@@ -1,1903 +1,1897 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 dirstate,
25 dirstate,
26 error,
26 error,
27 exchange,
27 exchange,
28 extensions,
28 extensions,
29 exthelper,
29 exthelper,
30 filemerge,
30 filemerge,
31 hg,
31 hg,
32 logcmdutil,
32 logcmdutil,
33 match as matchmod,
33 match as matchmod,
34 merge,
34 merge,
35 mergestate as mergestatemod,
35 mergestate as mergestatemod,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 smartset,
39 smartset,
40 subrepo,
40 subrepo,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from mercurial.upgrade_utils import (
45 from mercurial.upgrade_utils import (
46 actions as upgrade_actions,
46 actions as upgrade_actions,
47 )
47 )
48
48
49 from . import (
49 from . import (
50 lfcommands,
50 lfcommands,
51 lfutil,
51 lfutil,
52 storefactory,
52 storefactory,
53 )
53 )
54
54
55 ACTION_ADD = mergestatemod.ACTION_ADD
55 ACTION_ADD = mergestatemod.ACTION_ADD
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
57 ACTION_GET = mergestatemod.ACTION_GET
57 ACTION_GET = mergestatemod.ACTION_GET
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
60
60
61 eh = exthelper.exthelper()
61 eh = exthelper.exthelper()
62
62
63 lfstatus = lfutil.lfstatus
63 lfstatus = lfutil.lfstatus
64
64
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
66
66
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
68
68
69
69
70 def composelargefilematcher(match, manifest):
70 def composelargefilematcher(match, manifest):
71 """create a matcher that matches only the largefiles in the original
71 """create a matcher that matches only the largefiles in the original
72 matcher"""
72 matcher"""
73 m = copy.copy(match)
73 m = copy.copy(match)
74 lfile = lambda f: lfutil.standin(f) in manifest
74 lfile = lambda f: lfutil.standin(f) in manifest
75 m._files = [lf for lf in m._files if lfile(lf)]
75 m._files = [lf for lf in m._files if lfile(lf)]
76 m._fileset = set(m._files)
76 m._fileset = set(m._files)
77 m.always = lambda: False
77 m.always = lambda: False
78 origmatchfn = m.matchfn
78 origmatchfn = m.matchfn
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
80 return m
80 return m
81
81
82
82
83 def composenormalfilematcher(match, manifest, exclude=None):
83 def composenormalfilematcher(match, manifest, exclude=None):
84 excluded = set()
84 excluded = set()
85 if exclude is not None:
85 if exclude is not None:
86 excluded.update(exclude)
86 excluded.update(exclude)
87
87
88 m = copy.copy(match)
88 m = copy.copy(match)
89 notlfile = lambda f: not (
89 notlfile = lambda f: not (
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
91 )
91 )
92 m._files = [lf for lf in m._files if notlfile(lf)]
92 m._files = [lf for lf in m._files if notlfile(lf)]
93 m._fileset = set(m._files)
93 m._fileset = set(m._files)
94 m.always = lambda: False
94 m.always = lambda: False
95 origmatchfn = m.matchfn
95 origmatchfn = m.matchfn
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
97 return m
97 return m
98
98
99
99
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
101 large = opts.get('large')
101 large = opts.get('large')
102 lfsize = lfutil.getminsize(
102 lfsize = lfutil.getminsize(
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
104 )
104 )
105
105
106 lfmatcher = None
106 lfmatcher = None
107 if lfutil.islfilesrepo(repo):
107 if lfutil.islfilesrepo(repo):
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
109 if lfpats:
109 if lfpats:
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
111
111
112 lfnames = []
112 lfnames = []
113 m = matcher
113 m = matcher
114
114
115 wctx = repo[None]
115 wctx = repo[None]
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
117 exact = m.exact(f)
117 exact = m.exact(f)
118 lfile = lfutil.standin(f) in wctx
118 lfile = lfutil.standin(f) in wctx
119 nfile = f in wctx
119 nfile = f in wctx
120 exists = lfile or nfile
120 exists = lfile or nfile
121
121
122 # Don't warn the user when they attempt to add a normal tracked file.
122 # Don't warn the user when they attempt to add a normal tracked file.
123 # The normal add code will do that for us.
123 # The normal add code will do that for us.
124 if exact and exists:
124 if exact and exists:
125 if lfile:
125 if lfile:
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
127 continue
127 continue
128
128
129 if (exact or not exists) and not lfutil.isstandin(f):
129 if (exact or not exists) and not lfutil.isstandin(f):
130 # In case the file was removed previously, but not committed
130 # In case the file was removed previously, but not committed
131 # (issue3507)
131 # (issue3507)
132 if not repo.wvfs.exists(f):
132 if not repo.wvfs.exists(f):
133 continue
133 continue
134
134
135 abovemin = (
135 abovemin = (
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
137 )
137 )
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
139 lfnames.append(f)
139 lfnames.append(f)
140 if ui.verbose or not exact:
140 if ui.verbose or not exact:
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
142
142
143 bad = []
143 bad = []
144
144
145 # Need to lock, otherwise there could be a race condition between
145 # Need to lock, otherwise there could be a race condition between
146 # when standins are created and added to the repo.
146 # when standins are created and added to the repo.
147 with repo.wlock():
147 with repo.wlock():
148 if not opts.get('dry_run'):
148 if not opts.get('dry_run'):
149 standins = []
149 standins = []
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
151 for f in lfnames:
151 for f in lfnames:
152 standinname = lfutil.standin(f)
152 standinname = lfutil.standin(f)
153 lfutil.writestandin(
153 lfutil.writestandin(
154 repo,
154 repo,
155 standinname,
155 standinname,
156 hash=b'',
156 hash=b'',
157 executable=lfutil.getexecutable(repo.wjoin(f)),
157 executable=lfutil.getexecutable(repo.wjoin(f)),
158 )
158 )
159 standins.append(standinname)
159 standins.append(standinname)
160 lfdirstate.set_tracked(f)
160 lfdirstate.set_tracked(f)
161 lfdirstate.write(repo.currenttransaction())
161 lfdirstate.write(repo.currenttransaction())
162 bad += [
162 bad += [
163 lfutil.splitstandin(f)
163 lfutil.splitstandin(f)
164 for f in repo[None].add(standins)
164 for f in repo[None].add(standins)
165 if f in m.files()
165 if f in m.files()
166 ]
166 ]
167
167
168 added = [f for f in lfnames if f not in bad]
168 added = [f for f in lfnames if f not in bad]
169 return added, bad
169 return added, bad
170
170
171
171
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
173 after = opts.get('after')
173 after = opts.get('after')
174 m = composelargefilematcher(matcher, repo[None].manifest())
174 m = composelargefilematcher(matcher, repo[None].manifest())
175 with lfstatus(repo):
175 with lfstatus(repo):
176 s = repo.status(match=m, clean=not isaddremove)
176 s = repo.status(match=m, clean=not isaddremove)
177 manifest = repo[None].manifest()
177 manifest = repo[None].manifest()
178 modified, added, deleted, clean = [
178 modified, added, deleted, clean = [
179 [f for f in list if lfutil.standin(f) in manifest]
179 [f for f in list if lfutil.standin(f) in manifest]
180 for list in (s.modified, s.added, s.deleted, s.clean)
180 for list in (s.modified, s.added, s.deleted, s.clean)
181 ]
181 ]
182
182
183 def warn(files, msg):
183 def warn(files, msg):
184 for f in files:
184 for f in files:
185 ui.warn(msg % uipathfn(f))
185 ui.warn(msg % uipathfn(f))
186 return int(len(files) > 0)
186 return int(len(files) > 0)
187
187
188 if after:
188 if after:
189 remove = deleted
189 remove = deleted
190 result = warn(
190 result = warn(
191 modified + added + clean, _(b'not removing %s: file still exists\n')
191 modified + added + clean, _(b'not removing %s: file still exists\n')
192 )
192 )
193 else:
193 else:
194 remove = deleted + clean
194 remove = deleted + clean
195 result = warn(
195 result = warn(
196 modified,
196 modified,
197 _(
197 _(
198 b'not removing %s: file is modified (use -f'
198 b'not removing %s: file is modified (use -f'
199 b' to force removal)\n'
199 b' to force removal)\n'
200 ),
200 ),
201 )
201 )
202 result = (
202 result = (
203 warn(
203 warn(
204 added,
204 added,
205 _(
205 _(
206 b'not removing %s: file has been marked for add'
206 b'not removing %s: file has been marked for add'
207 b' (use forget to undo)\n'
207 b' (use forget to undo)\n'
208 ),
208 ),
209 )
209 )
210 or result
210 or result
211 )
211 )
212
212
213 # Need to lock because standin files are deleted then removed from the
213 # Need to lock because standin files are deleted then removed from the
214 # repository and we could race in-between.
214 # repository and we could race in-between.
215 with repo.wlock():
215 with repo.wlock():
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
217 for f in sorted(remove):
217 for f in sorted(remove):
218 if ui.verbose or not m.exact(f):
218 if ui.verbose or not m.exact(f):
219 ui.status(_(b'removing %s\n') % uipathfn(f))
219 ui.status(_(b'removing %s\n') % uipathfn(f))
220
220
221 if not dryrun:
221 if not dryrun:
222 if not after:
222 if not after:
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
224
224
225 if dryrun:
225 if dryrun:
226 return result
226 return result
227
227
228 remove = [lfutil.standin(f) for f in remove]
228 remove = [lfutil.standin(f) for f in remove]
229 # If this is being called by addremove, let the original addremove
229 # If this is being called by addremove, let the original addremove
230 # function handle this.
230 # function handle this.
231 if not isaddremove:
231 if not isaddremove:
232 for f in remove:
232 for f in remove:
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
234 repo[None].forget(remove)
234 repo[None].forget(remove)
235
235
236 for f in remove:
236 for f in remove:
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
238
238
239 lfdirstate.write(repo.currenttransaction())
239 lfdirstate.write(repo.currenttransaction())
240
240
241 return result
241 return result
242
242
243
243
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
245 # appear at their right place in the manifests.
245 # appear at their right place in the manifests.
246 @eh.wrapfunction(webcommands, b'decodepath')
246 @eh.wrapfunction(webcommands, b'decodepath')
247 def decodepath(orig, path):
247 def decodepath(orig, path):
248 return lfutil.splitstandin(path) or path
248 return lfutil.splitstandin(path) or path
249
249
250
250
251 # -- Wrappers: modify existing commands --------------------------------
251 # -- Wrappers: modify existing commands --------------------------------
252
252
253
253
254 @eh.wrapcommand(
254 @eh.wrapcommand(
255 b'add',
255 b'add',
256 opts=[
256 opts=[
257 (b'', b'large', None, _(b'add as largefile')),
257 (b'', b'large', None, _(b'add as largefile')),
258 (b'', b'normal', None, _(b'add as normal file')),
258 (b'', b'normal', None, _(b'add as normal file')),
259 (
259 (
260 b'',
260 b'',
261 b'lfsize',
261 b'lfsize',
262 b'',
262 b'',
263 _(
263 _(
264 b'add all files above this size (in megabytes) '
264 b'add all files above this size (in megabytes) '
265 b'as largefiles (default: 10)'
265 b'as largefiles (default: 10)'
266 ),
266 ),
267 ),
267 ),
268 ],
268 ],
269 )
269 )
270 def overrideadd(orig, ui, repo, *pats, **opts):
270 def overrideadd(orig, ui, repo, *pats, **opts):
271 if opts.get('normal') and opts.get('large'):
271 if opts.get('normal') and opts.get('large'):
272 raise error.Abort(_(b'--normal cannot be used with --large'))
272 raise error.Abort(_(b'--normal cannot be used with --large'))
273 return orig(ui, repo, *pats, **opts)
273 return orig(ui, repo, *pats, **opts)
274
274
275
275
276 @eh.wrapfunction(cmdutil, b'add')
276 @eh.wrapfunction(cmdutil, b'add')
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
278 # The --normal flag short circuits this override
278 # The --normal flag short circuits this override
279 if opts.get('normal'):
279 if opts.get('normal'):
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
281
281
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
283 normalmatcher = composenormalfilematcher(
283 normalmatcher = composenormalfilematcher(
284 matcher, repo[None].manifest(), ladded
284 matcher, repo[None].manifest(), ladded
285 )
285 )
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
287
287
288 bad.extend(f for f in lbad)
288 bad.extend(f for f in lbad)
289 return bad
289 return bad
290
290
291
291
292 @eh.wrapfunction(cmdutil, b'remove')
292 @eh.wrapfunction(cmdutil, b'remove')
293 def cmdutilremove(
293 def cmdutilremove(
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
295 ):
295 ):
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
297 result = orig(
297 result = orig(
298 ui,
298 ui,
299 repo,
299 repo,
300 normalmatcher,
300 normalmatcher,
301 prefix,
301 prefix,
302 uipathfn,
302 uipathfn,
303 after,
303 after,
304 force,
304 force,
305 subrepos,
305 subrepos,
306 dryrun,
306 dryrun,
307 )
307 )
308 return (
308 return (
309 removelargefiles(
309 removelargefiles(
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
311 )
311 )
312 or result
312 or result
313 )
313 )
314
314
315
315
316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
317 @contextlib.contextmanager
317 @contextlib.contextmanager
318 def _changing(orig, self, repo, change_type):
318 def _changing(orig, self, repo, change_type):
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
320 try:
320 try:
321 lfd = getattr(self, '_large_file_dirstate', False)
321 lfd = getattr(self, '_large_file_dirstate', False)
322 if sub_dirstate is None and not lfd:
322 if sub_dirstate is None and not lfd:
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
324 self._sub_dirstate = sub_dirstate
324 self._sub_dirstate = sub_dirstate
325 if not lfd:
325 if not lfd:
326 assert self._sub_dirstate is not None
326 assert self._sub_dirstate is not None
327 with orig(self, repo, change_type):
327 with orig(self, repo, change_type):
328 if sub_dirstate is None:
328 if sub_dirstate is None:
329 yield
329 yield
330 else:
330 else:
331 with sub_dirstate._changing(repo, change_type):
331 with sub_dirstate._changing(repo, change_type):
332 yield
332 yield
333 finally:
333 finally:
334 self._sub_dirstate = pre
334 self._sub_dirstate = pre
335
335
336
336
337 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
337 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
338 def overridestatusfn(orig, repo, rev2, **opts):
338 def overridestatusfn(orig, repo, rev2, **opts):
339 with lfstatus(repo._repo):
339 with lfstatus(repo._repo):
340 return orig(repo, rev2, **opts)
340 return orig(repo, rev2, **opts)
341
341
342
342
343 @eh.wrapcommand(b'status')
343 @eh.wrapcommand(b'status')
344 def overridestatus(orig, ui, repo, *pats, **opts):
344 def overridestatus(orig, ui, repo, *pats, **opts):
345 with lfstatus(repo):
345 with lfstatus(repo):
346 return orig(ui, repo, *pats, **opts)
346 return orig(ui, repo, *pats, **opts)
347
347
348
348
349 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
349 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
350 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
350 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
351 with lfstatus(repo._repo):
351 with lfstatus(repo._repo):
352 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
352 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
353
353
354
354
355 @eh.wrapcommand(b'log')
355 @eh.wrapcommand(b'log')
356 def overridelog(orig, ui, repo, *pats, **opts):
356 def overridelog(orig, ui, repo, *pats, **opts):
357 def overridematchandpats(
357 def overridematchandpats(
358 orig,
358 orig,
359 ctx,
359 ctx,
360 pats=(),
360 pats=(),
361 opts=None,
361 opts=None,
362 globbed=False,
362 globbed=False,
363 default=b'relpath',
363 default=b'relpath',
364 badfn=None,
364 badfn=None,
365 ):
365 ):
366 """Matcher that merges root directory with .hglf, suitable for log.
366 """Matcher that merges root directory with .hglf, suitable for log.
367 It is still possible to match .hglf directly.
367 It is still possible to match .hglf directly.
368 For any listed files run log on the standin too.
368 For any listed files run log on the standin too.
369 matchfn tries both the given filename and with .hglf stripped.
369 matchfn tries both the given filename and with .hglf stripped.
370 """
370 """
371 if opts is None:
371 if opts is None:
372 opts = {}
372 opts = {}
373 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
373 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
374 m, p = copy.copy(matchandpats)
374 m, p = copy.copy(matchandpats)
375
375
376 if m.always():
376 if m.always():
377 # We want to match everything anyway, so there's no benefit trying
377 # We want to match everything anyway, so there's no benefit trying
378 # to add standins.
378 # to add standins.
379 return matchandpats
379 return matchandpats
380
380
381 pats = set(p)
381 pats = set(p)
382
382
383 def fixpats(pat, tostandin=lfutil.standin):
383 def fixpats(pat, tostandin=lfutil.standin):
384 if pat.startswith(b'set:'):
384 if pat.startswith(b'set:'):
385 return pat
385 return pat
386
386
387 kindpat = matchmod._patsplit(pat, None)
387 kindpat = matchmod._patsplit(pat, None)
388
388
389 if kindpat[0] is not None:
389 if kindpat[0] is not None:
390 return kindpat[0] + b':' + tostandin(kindpat[1])
390 return kindpat[0] + b':' + tostandin(kindpat[1])
391 return tostandin(kindpat[1])
391 return tostandin(kindpat[1])
392
392
393 cwd = repo.getcwd()
393 cwd = repo.getcwd()
394 if cwd:
394 if cwd:
395 hglf = lfutil.shortname
395 hglf = lfutil.shortname
396 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
396 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
397
397
398 def tostandin(f):
398 def tostandin(f):
399 # The file may already be a standin, so truncate the back
399 # The file may already be a standin, so truncate the back
400 # prefix and test before mangling it. This avoids turning
400 # prefix and test before mangling it. This avoids turning
401 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
401 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
402 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
402 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
403 return f
403 return f
404
404
405 # An absolute path is from outside the repo, so truncate the
405 # An absolute path is from outside the repo, so truncate the
406 # path to the root before building the standin. Otherwise cwd
406 # path to the root before building the standin. Otherwise cwd
407 # is somewhere in the repo, relative to root, and needs to be
407 # is somewhere in the repo, relative to root, and needs to be
408 # prepended before building the standin.
408 # prepended before building the standin.
409 if os.path.isabs(cwd):
409 if os.path.isabs(cwd):
410 f = f[len(back) :]
410 f = f[len(back) :]
411 else:
411 else:
412 f = cwd + b'/' + f
412 f = cwd + b'/' + f
413 return back + lfutil.standin(f)
413 return back + lfutil.standin(f)
414
414
415 else:
415 else:
416
416
417 def tostandin(f):
417 def tostandin(f):
418 if lfutil.isstandin(f):
418 if lfutil.isstandin(f):
419 return f
419 return f
420 return lfutil.standin(f)
420 return lfutil.standin(f)
421
421
422 pats.update(fixpats(f, tostandin) for f in p)
422 pats.update(fixpats(f, tostandin) for f in p)
423
423
424 for i in range(0, len(m._files)):
424 for i in range(0, len(m._files)):
425 # Don't add '.hglf' to m.files, since that is already covered by '.'
425 # Don't add '.hglf' to m.files, since that is already covered by '.'
426 if m._files[i] == b'.':
426 if m._files[i] == b'.':
427 continue
427 continue
428 standin = lfutil.standin(m._files[i])
428 standin = lfutil.standin(m._files[i])
429 # If the "standin" is a directory, append instead of replace to
429 # If the "standin" is a directory, append instead of replace to
430 # support naming a directory on the command line with only
430 # support naming a directory on the command line with only
431 # largefiles. The original directory is kept to support normal
431 # largefiles. The original directory is kept to support normal
432 # files.
432 # files.
433 if standin in ctx:
433 if standin in ctx:
434 m._files[i] = standin
434 m._files[i] = standin
435 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
435 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
436 m._files.append(standin)
436 m._files.append(standin)
437
437
438 m._fileset = set(m._files)
438 m._fileset = set(m._files)
439 m.always = lambda: False
439 m.always = lambda: False
440 origmatchfn = m.matchfn
440 origmatchfn = m.matchfn
441
441
442 def lfmatchfn(f):
442 def lfmatchfn(f):
443 lf = lfutil.splitstandin(f)
443 lf = lfutil.splitstandin(f)
444 if lf is not None and origmatchfn(lf):
444 if lf is not None and origmatchfn(lf):
445 return True
445 return True
446 r = origmatchfn(f)
446 r = origmatchfn(f)
447 return r
447 return r
448
448
449 m.matchfn = lfmatchfn
449 m.matchfn = lfmatchfn
450
450
451 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
451 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
452 return m, pats
452 return m, pats
453
453
454 # For hg log --patch, the match object is used in two different senses:
454 # For hg log --patch, the match object is used in two different senses:
455 # (1) to determine what revisions should be printed out, and
455 # (1) to determine what revisions should be printed out, and
456 # (2) to determine what files to print out diffs for.
456 # (2) to determine what files to print out diffs for.
457 # The magic matchandpats override should be used for case (1) but not for
457 # The magic matchandpats override should be used for case (1) but not for
458 # case (2).
458 # case (2).
459 oldmatchandpats = scmutil.matchandpats
459 oldmatchandpats = scmutil.matchandpats
460
460
461 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
461 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
462 wctx = repo[None]
462 wctx = repo[None]
463 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
463 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
464 return lambda ctx: match
464 return lambda ctx: match
465
465
466 wrappedmatchandpats = extensions.wrappedfunction(
466 wrappedmatchandpats = extensions.wrappedfunction(
467 scmutil, b'matchandpats', overridematchandpats
467 scmutil, b'matchandpats', overridematchandpats
468 )
468 )
469 wrappedmakefilematcher = extensions.wrappedfunction(
469 wrappedmakefilematcher = extensions.wrappedfunction(
470 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
470 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
471 )
471 )
472 with wrappedmatchandpats, wrappedmakefilematcher:
472 with wrappedmatchandpats, wrappedmakefilematcher:
473 return orig(ui, repo, *pats, **opts)
473 return orig(ui, repo, *pats, **opts)
474
474
475
475
476 @eh.wrapcommand(
476 @eh.wrapcommand(
477 b'verify',
477 b'verify',
478 opts=[
478 opts=[
479 (
479 (
480 b'',
480 b'',
481 b'large',
481 b'large',
482 None,
482 None,
483 _(b'verify that all largefiles in current revision exists'),
483 _(b'verify that all largefiles in current revision exists'),
484 ),
484 ),
485 (
485 (
486 b'',
486 b'',
487 b'lfa',
487 b'lfa',
488 None,
488 None,
489 _(b'verify largefiles in all revisions, not just current'),
489 _(b'verify largefiles in all revisions, not just current'),
490 ),
490 ),
491 (
491 (
492 b'',
492 b'',
493 b'lfc',
493 b'lfc',
494 None,
494 None,
495 _(b'verify local largefile contents, not just existence'),
495 _(b'verify local largefile contents, not just existence'),
496 ),
496 ),
497 ],
497 ],
498 )
498 )
499 def overrideverify(orig, ui, repo, *pats, **opts):
499 def overrideverify(orig, ui, repo, *pats, **opts):
500 large = opts.pop('large', False)
500 large = opts.pop('large', False)
501 all = opts.pop('lfa', False)
501 all = opts.pop('lfa', False)
502 contents = opts.pop('lfc', False)
502 contents = opts.pop('lfc', False)
503
503
504 result = orig(ui, repo, *pats, **opts)
504 result = orig(ui, repo, *pats, **opts)
505 if large or all or contents:
505 if large or all or contents:
506 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
506 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
507 return result
507 return result
508
508
509
509
510 @eh.wrapcommand(
510 @eh.wrapcommand(
511 b'debugstate',
511 b'debugstate',
512 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
512 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
513 )
513 )
514 def overridedebugstate(orig, ui, repo, *pats, **opts):
514 def overridedebugstate(orig, ui, repo, *pats, **opts):
515 large = opts.pop('large', False)
515 large = opts.pop('large', False)
516 if large:
516 if large:
517
517
518 class fakerepo:
518 class fakerepo:
519 dirstate = lfutil.openlfdirstate(ui, repo)
519 dirstate = lfutil.openlfdirstate(ui, repo)
520
520
521 orig(ui, fakerepo, *pats, **opts)
521 orig(ui, fakerepo, *pats, **opts)
522 else:
522 else:
523 orig(ui, repo, *pats, **opts)
523 orig(ui, repo, *pats, **opts)
524
524
525
525
526 # Before starting the manifest merge, merge.updates will call
526 # Before starting the manifest merge, merge.updates will call
527 # _checkunknownfile to check if there are any files in the merged-in
527 # _checkunknownfile to check if there are any files in the merged-in
528 # changeset that collide with unknown files in the working copy.
528 # changeset that collide with unknown files in the working copy.
529 #
529 #
530 # The largefiles are seen as unknown, so this prevents us from merging
530 # The largefiles are seen as unknown, so this prevents us from merging
531 # in a file 'foo' if we already have a largefile with the same name.
531 # in a file 'foo' if we already have a largefile with the same name.
532 #
532 #
533 # The overridden function filters the unknown files by removing any
533 # The overridden function filters the unknown files by removing any
534 # largefiles. This makes the merge proceed and we can then handle this
534 # largefiles. This makes the merge proceed and we can then handle this
535 # case further in the overridden calculateupdates function below.
535 # case further in the overridden calculateupdates function below.
536 @eh.wrapfunction(merge, b'_checkunknownfile')
536 @eh.wrapfunction(merge, b'_checkunknownfile')
537 def overridecheckunknownfile(
537 def overridecheckunknownfile(
538 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
538 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
539 ):
539 ):
540 if lfutil.standin(dirstate.normalize(f)) in wctx:
540 if lfutil.standin(dirstate.normalize(f)) in wctx:
541 return False
541 return False
542 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
542 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
543
543
544
544
545 # The manifest merge handles conflicts on the manifest level. We want
545 # The manifest merge handles conflicts on the manifest level. We want
546 # to handle changes in largefile-ness of files at this level too.
546 # to handle changes in largefile-ness of files at this level too.
547 #
547 #
548 # The strategy is to run the original calculateupdates and then process
548 # The strategy is to run the original calculateupdates and then process
549 # the action list it outputs. There are two cases we need to deal with:
549 # the action list it outputs. There are two cases we need to deal with:
550 #
550 #
551 # 1. Normal file in p1, largefile in p2. Here the largefile is
551 # 1. Normal file in p1, largefile in p2. Here the largefile is
552 # detected via its standin file, which will enter the working copy
552 # detected via its standin file, which will enter the working copy
553 # with a "get" action. It is not "merge" since the standin is all
553 # with a "get" action. It is not "merge" since the standin is all
554 # Mercurial is concerned with at this level -- the link to the
554 # Mercurial is concerned with at this level -- the link to the
555 # existing normal file is not relevant here.
555 # existing normal file is not relevant here.
556 #
556 #
557 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
557 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
558 # since the largefile will be present in the working copy and
558 # since the largefile will be present in the working copy and
559 # different from the normal file in p2. Mercurial therefore
559 # different from the normal file in p2. Mercurial therefore
560 # triggers a merge action.
560 # triggers a merge action.
561 #
561 #
562 # In both cases, we prompt the user and emit new actions to either
562 # In both cases, we prompt the user and emit new actions to either
563 # remove the standin (if the normal file was kept) or to remove the
563 # remove the standin (if the normal file was kept) or to remove the
564 # normal file and get the standin (if the largefile was kept). The
564 # normal file and get the standin (if the largefile was kept). The
565 # default prompt answer is to use the largefile version since it was
565 # default prompt answer is to use the largefile version since it was
566 # presumably changed on purpose.
566 # presumably changed on purpose.
567 #
567 #
568 # Finally, the merge.applyupdates function will then take care of
568 # Finally, the merge.applyupdates function will then take care of
569 # writing the files into the working copy and lfcommands.updatelfiles
569 # writing the files into the working copy and lfcommands.updatelfiles
570 # will update the largefiles.
570 # will update the largefiles.
571 @eh.wrapfunction(merge, b'calculateupdates')
571 @eh.wrapfunction(merge, b'calculateupdates')
572 def overridecalculateupdates(
572 def overridecalculateupdates(
573 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
573 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
574 ):
574 ):
575 overwrite = force and not branchmerge
575 overwrite = force and not branchmerge
576 mresult = origfn(
576 mresult = origfn(
577 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
577 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
578 )
578 )
579
579
580 if overwrite:
580 if overwrite:
581 return mresult
581 return mresult
582
582
583 # Convert to dictionary with filename as key and action as value.
583 # Convert to dictionary with filename as key and action as value.
584 lfiles = set()
584 lfiles = set()
585 for f in mresult.files():
585 for f in mresult.files():
586 splitstandin = lfutil.splitstandin(f)
586 splitstandin = lfutil.splitstandin(f)
587 if splitstandin is not None and splitstandin in p1:
587 if splitstandin is not None and splitstandin in p1:
588 lfiles.add(splitstandin)
588 lfiles.add(splitstandin)
589 elif lfutil.standin(f) in p1:
589 elif lfutil.standin(f) in p1:
590 lfiles.add(f)
590 lfiles.add(f)
591
591
592 for lfile in sorted(lfiles):
592 for lfile in sorted(lfiles):
593 standin = lfutil.standin(lfile)
593 standin = lfutil.standin(lfile)
594 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
594 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
595 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
595 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
596
596
597 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
597 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
598 if sm == ACTION_DELETED_CHANGED:
598 if sm == ACTION_DELETED_CHANGED:
599 f1, f2, fa, move, anc = sargs
599 f1, f2, fa, move, anc = sargs
600 sargs = (p2[f2].flags(), False)
600 sargs = (p2[f2].flags(), False)
601 # Case 1: normal file in the working copy, largefile in
601 # Case 1: normal file in the working copy, largefile in
602 # the second parent
602 # the second parent
603 usermsg = (
603 usermsg = (
604 _(
604 _(
605 b'remote turned local normal file %s into a largefile\n'
605 b'remote turned local normal file %s into a largefile\n'
606 b'use (l)argefile or keep (n)ormal file?'
606 b'use (l)argefile or keep (n)ormal file?'
607 b'$$ &Largefile $$ &Normal file'
607 b'$$ &Largefile $$ &Normal file'
608 )
608 )
609 % lfile
609 % lfile
610 )
610 )
611 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
611 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
612 mresult.addfile(
612 mresult.addfile(
613 lfile, ACTION_REMOVE, None, b'replaced by standin'
613 lfile, ACTION_REMOVE, None, b'replaced by standin'
614 )
614 )
615 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
615 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
616 else: # keep local normal file
616 else: # keep local normal file
617 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
617 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
618 if branchmerge:
618 if branchmerge:
619 mresult.addfile(
619 mresult.addfile(
620 standin,
620 standin,
621 ACTION_KEEP,
621 ACTION_KEEP,
622 None,
622 None,
623 b'replaced by non-standin',
623 b'replaced by non-standin',
624 )
624 )
625 else:
625 else:
626 mresult.addfile(
626 mresult.addfile(
627 standin,
627 standin,
628 ACTION_REMOVE,
628 ACTION_REMOVE,
629 None,
629 None,
630 b'replaced by non-standin',
630 b'replaced by non-standin',
631 )
631 )
632 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
632 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
633 if lm == ACTION_DELETED_CHANGED:
633 if lm == ACTION_DELETED_CHANGED:
634 f1, f2, fa, move, anc = largs
634 f1, f2, fa, move, anc = largs
635 largs = (p2[f2].flags(), False)
635 largs = (p2[f2].flags(), False)
636 # Case 2: largefile in the working copy, normal file in
636 # Case 2: largefile in the working copy, normal file in
637 # the second parent
637 # the second parent
638 usermsg = (
638 usermsg = (
639 _(
639 _(
640 b'remote turned local largefile %s into a normal file\n'
640 b'remote turned local largefile %s into a normal file\n'
641 b'keep (l)argefile or use (n)ormal file?'
641 b'keep (l)argefile or use (n)ormal file?'
642 b'$$ &Largefile $$ &Normal file'
642 b'$$ &Largefile $$ &Normal file'
643 )
643 )
644 % lfile
644 % lfile
645 )
645 )
646 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
646 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
647 if branchmerge:
647 if branchmerge:
648 # largefile can be restored from standin safely
648 # largefile can be restored from standin safely
649 mresult.addfile(
649 mresult.addfile(
650 lfile,
650 lfile,
651 ACTION_KEEP,
651 ACTION_KEEP,
652 None,
652 None,
653 b'replaced by standin',
653 b'replaced by standin',
654 )
654 )
655 mresult.addfile(
655 mresult.addfile(
656 standin, ACTION_KEEP, None, b'replaces standin'
656 standin, ACTION_KEEP, None, b'replaces standin'
657 )
657 )
658 else:
658 else:
659 # "lfile" should be marked as "removed" without
659 # "lfile" should be marked as "removed" without
660 # removal of itself
660 # removal of itself
661 mresult.addfile(
661 mresult.addfile(
662 lfile,
662 lfile,
663 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
663 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
664 None,
664 None,
665 b'forget non-standin largefile',
665 b'forget non-standin largefile',
666 )
666 )
667
667
668 # linear-merge should treat this largefile as 're-added'
668 # linear-merge should treat this largefile as 're-added'
669 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
669 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
670 else: # pick remote normal file
670 else: # pick remote normal file
671 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
671 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
672 mresult.addfile(
672 mresult.addfile(
673 standin,
673 standin,
674 ACTION_REMOVE,
674 ACTION_REMOVE,
675 None,
675 None,
676 b'replaced by non-standin',
676 b'replaced by non-standin',
677 )
677 )
678
678
679 return mresult
679 return mresult
680
680
681
681
682 @eh.wrapfunction(mergestatemod, b'recordupdates')
682 @eh.wrapfunction(mergestatemod, b'recordupdates')
683 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
683 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
684 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
684 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
685 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
685 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
686 with lfdirstate.changing_parents(repo):
686 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
687 for lfile, args, msg in actions[
687 # this should be executed before 'orig', to execute 'remove'
688 MERGE_ACTION_LARGEFILE_MARK_REMOVED
688 # before all other actions
689 ]:
689 repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
690 # this should be executed before 'orig', to execute 'remove'
690 # make sure lfile doesn't get synclfdirstate'd as normal
691 # before all other actions
691 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
692 repo.dirstate.update_file(
693 lfile, p1_tracked=True, wc_tracked=False
694 )
695 # make sure lfile doesn't get synclfdirstate'd as normal
696 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
697 lfdirstate.write(repo.currenttransaction())
698
692
699 return orig(repo, actions, branchmerge, getfiledata)
693 return orig(repo, actions, branchmerge, getfiledata)
700
694
701
695
702 # Override filemerge to prompt the user about how they wish to merge
696 # Override filemerge to prompt the user about how they wish to merge
703 # largefiles. This will handle identical edits without prompting the user.
697 # largefiles. This will handle identical edits without prompting the user.
704 @eh.wrapfunction(filemerge, b'filemerge')
698 @eh.wrapfunction(filemerge, b'filemerge')
705 def overridefilemerge(
699 def overridefilemerge(
706 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
700 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
707 ):
701 ):
708 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
702 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
709 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
703 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
710
704
711 ahash = lfutil.readasstandin(fca).lower()
705 ahash = lfutil.readasstandin(fca).lower()
712 dhash = lfutil.readasstandin(fcd).lower()
706 dhash = lfutil.readasstandin(fcd).lower()
713 ohash = lfutil.readasstandin(fco).lower()
707 ohash = lfutil.readasstandin(fco).lower()
714 if (
708 if (
715 ohash != ahash
709 ohash != ahash
716 and ohash != dhash
710 and ohash != dhash
717 and (
711 and (
718 dhash == ahash
712 dhash == ahash
719 or repo.ui.promptchoice(
713 or repo.ui.promptchoice(
720 _(
714 _(
721 b'largefile %s has a merge conflict\nancestor was %s\n'
715 b'largefile %s has a merge conflict\nancestor was %s\n'
722 b'you can keep (l)ocal %s or take (o)ther %s.\n'
716 b'you can keep (l)ocal %s or take (o)ther %s.\n'
723 b'what do you want to do?'
717 b'what do you want to do?'
724 b'$$ &Local $$ &Other'
718 b'$$ &Local $$ &Other'
725 )
719 )
726 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
720 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
727 0,
721 0,
728 )
722 )
729 == 1
723 == 1
730 )
724 )
731 ):
725 ):
732 repo.wwrite(fcd.path(), fco.data(), fco.flags())
726 repo.wwrite(fcd.path(), fco.data(), fco.flags())
733 return 0, False
727 return 0, False
734
728
735
729
736 @eh.wrapfunction(copiesmod, b'pathcopies')
730 @eh.wrapfunction(copiesmod, b'pathcopies')
737 def copiespathcopies(orig, ctx1, ctx2, match=None):
731 def copiespathcopies(orig, ctx1, ctx2, match=None):
738 copies = orig(ctx1, ctx2, match=match)
732 copies = orig(ctx1, ctx2, match=match)
739 updated = {}
733 updated = {}
740
734
741 for k, v in copies.items():
735 for k, v in copies.items():
742 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
736 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
743
737
744 return updated
738 return updated
745
739
746
740
747 # Copy first changes the matchers to match standins instead of
741 # Copy first changes the matchers to match standins instead of
748 # largefiles. Then it overrides util.copyfile in that function it
742 # largefiles. Then it overrides util.copyfile in that function it
749 # checks if the destination largefile already exists. It also keeps a
743 # checks if the destination largefile already exists. It also keeps a
750 # list of copied files so that the largefiles can be copied and the
744 # list of copied files so that the largefiles can be copied and the
751 # dirstate updated.
745 # dirstate updated.
752 @eh.wrapfunction(cmdutil, b'copy')
746 @eh.wrapfunction(cmdutil, b'copy')
753 def overridecopy(orig, ui, repo, pats, opts, rename=False):
747 def overridecopy(orig, ui, repo, pats, opts, rename=False):
754 # doesn't remove largefile on rename
748 # doesn't remove largefile on rename
755 if len(pats) < 2:
749 if len(pats) < 2:
756 # this isn't legal, let the original function deal with it
750 # this isn't legal, let the original function deal with it
757 return orig(ui, repo, pats, opts, rename)
751 return orig(ui, repo, pats, opts, rename)
758
752
759 # This could copy both lfiles and normal files in one command,
753 # This could copy both lfiles and normal files in one command,
760 # but we don't want to do that. First replace their matcher to
754 # but we don't want to do that. First replace their matcher to
761 # only match normal files and run it, then replace it to just
755 # only match normal files and run it, then replace it to just
762 # match largefiles and run it again.
756 # match largefiles and run it again.
763 nonormalfiles = False
757 nonormalfiles = False
764 nolfiles = False
758 nolfiles = False
765 manifest = repo[None].manifest()
759 manifest = repo[None].manifest()
766
760
767 def normalfilesmatchfn(
761 def normalfilesmatchfn(
768 orig,
762 orig,
769 ctx,
763 ctx,
770 pats=(),
764 pats=(),
771 opts=None,
765 opts=None,
772 globbed=False,
766 globbed=False,
773 default=b'relpath',
767 default=b'relpath',
774 badfn=None,
768 badfn=None,
775 ):
769 ):
776 if opts is None:
770 if opts is None:
777 opts = {}
771 opts = {}
778 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
772 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
779 return composenormalfilematcher(match, manifest)
773 return composenormalfilematcher(match, manifest)
780
774
781 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
775 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
782 try:
776 try:
783 result = orig(ui, repo, pats, opts, rename)
777 result = orig(ui, repo, pats, opts, rename)
784 except error.Abort as e:
778 except error.Abort as e:
785 if e.message != _(b'no files to copy'):
779 if e.message != _(b'no files to copy'):
786 raise e
780 raise e
787 else:
781 else:
788 nonormalfiles = True
782 nonormalfiles = True
789 result = 0
783 result = 0
790
784
791 # The first rename can cause our current working directory to be removed.
785 # The first rename can cause our current working directory to be removed.
792 # In that case there is nothing left to copy/rename so just quit.
786 # In that case there is nothing left to copy/rename so just quit.
793 try:
787 try:
794 repo.getcwd()
788 repo.getcwd()
795 except OSError:
789 except OSError:
796 return result
790 return result
797
791
798 def makestandin(relpath):
792 def makestandin(relpath):
799 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
793 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
800 return repo.wvfs.join(lfutil.standin(path))
794 return repo.wvfs.join(lfutil.standin(path))
801
795
802 fullpats = scmutil.expandpats(pats)
796 fullpats = scmutil.expandpats(pats)
803 dest = fullpats[-1]
797 dest = fullpats[-1]
804
798
805 if os.path.isdir(dest):
799 if os.path.isdir(dest):
806 if not os.path.isdir(makestandin(dest)):
800 if not os.path.isdir(makestandin(dest)):
807 os.makedirs(makestandin(dest))
801 os.makedirs(makestandin(dest))
808
802
809 try:
803 try:
810 # When we call orig below it creates the standins but we don't add
804 # When we call orig below it creates the standins but we don't add
811 # them to the dir state until later so lock during that time.
805 # them to the dir state until later so lock during that time.
812 wlock = repo.wlock()
806 wlock = repo.wlock()
813
807
814 manifest = repo[None].manifest()
808 manifest = repo[None].manifest()
815
809
816 def overridematch(
810 def overridematch(
817 orig,
811 orig,
818 ctx,
812 ctx,
819 pats=(),
813 pats=(),
820 opts=None,
814 opts=None,
821 globbed=False,
815 globbed=False,
822 default=b'relpath',
816 default=b'relpath',
823 badfn=None,
817 badfn=None,
824 ):
818 ):
825 if opts is None:
819 if opts is None:
826 opts = {}
820 opts = {}
827 newpats = []
821 newpats = []
828 # The patterns were previously mangled to add the standin
822 # The patterns were previously mangled to add the standin
829 # directory; we need to remove that now
823 # directory; we need to remove that now
830 for pat in pats:
824 for pat in pats:
831 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
825 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
832 newpats.append(pat.replace(lfutil.shortname, b''))
826 newpats.append(pat.replace(lfutil.shortname, b''))
833 else:
827 else:
834 newpats.append(pat)
828 newpats.append(pat)
835 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
829 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
836 m = copy.copy(match)
830 m = copy.copy(match)
837 lfile = lambda f: lfutil.standin(f) in manifest
831 lfile = lambda f: lfutil.standin(f) in manifest
838 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
832 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
839 m._fileset = set(m._files)
833 m._fileset = set(m._files)
840 origmatchfn = m.matchfn
834 origmatchfn = m.matchfn
841
835
842 def matchfn(f):
836 def matchfn(f):
843 lfile = lfutil.splitstandin(f)
837 lfile = lfutil.splitstandin(f)
844 return (
838 return (
845 lfile is not None
839 lfile is not None
846 and (f in manifest)
840 and (f in manifest)
847 and origmatchfn(lfile)
841 and origmatchfn(lfile)
848 or None
842 or None
849 )
843 )
850
844
851 m.matchfn = matchfn
845 m.matchfn = matchfn
852 return m
846 return m
853
847
854 listpats = []
848 listpats = []
855 for pat in pats:
849 for pat in pats:
856 if matchmod.patkind(pat) is not None:
850 if matchmod.patkind(pat) is not None:
857 listpats.append(pat)
851 listpats.append(pat)
858 else:
852 else:
859 listpats.append(makestandin(pat))
853 listpats.append(makestandin(pat))
860
854
861 copiedfiles = []
855 copiedfiles = []
862
856
863 def overridecopyfile(orig, src, dest, *args, **kwargs):
857 def overridecopyfile(orig, src, dest, *args, **kwargs):
864 if lfutil.shortname in src and dest.startswith(
858 if lfutil.shortname in src and dest.startswith(
865 repo.wjoin(lfutil.shortname)
859 repo.wjoin(lfutil.shortname)
866 ):
860 ):
867 destlfile = dest.replace(lfutil.shortname, b'')
861 destlfile = dest.replace(lfutil.shortname, b'')
868 if not opts[b'force'] and os.path.exists(destlfile):
862 if not opts[b'force'] and os.path.exists(destlfile):
869 raise IOError(
863 raise IOError(
870 b'', _(b'destination largefile already exists')
864 b'', _(b'destination largefile already exists')
871 )
865 )
872 copiedfiles.append((src, dest))
866 copiedfiles.append((src, dest))
873 orig(src, dest, *args, **kwargs)
867 orig(src, dest, *args, **kwargs)
874
868
875 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
869 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
876 with extensions.wrappedfunction(scmutil, b'match', overridematch):
870 with extensions.wrappedfunction(scmutil, b'match', overridematch):
877 result += orig(ui, repo, listpats, opts, rename)
871 result += orig(ui, repo, listpats, opts, rename)
878
872
879 lfdirstate = lfutil.openlfdirstate(ui, repo)
873 lfdirstate = lfutil.openlfdirstate(ui, repo)
880 for (src, dest) in copiedfiles:
874 for (src, dest) in copiedfiles:
881 if lfutil.shortname in src and dest.startswith(
875 if lfutil.shortname in src and dest.startswith(
882 repo.wjoin(lfutil.shortname)
876 repo.wjoin(lfutil.shortname)
883 ):
877 ):
884 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
878 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
885 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
879 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
886 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
880 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
887 if not os.path.isdir(destlfiledir):
881 if not os.path.isdir(destlfiledir):
888 os.makedirs(destlfiledir)
882 os.makedirs(destlfiledir)
889 if rename:
883 if rename:
890 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
884 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
891
885
892 # The file is gone, but this deletes any empty parent
886 # The file is gone, but this deletes any empty parent
893 # directories as a side-effect.
887 # directories as a side-effect.
894 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
888 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
895 lfdirstate.set_untracked(srclfile)
889 lfdirstate.set_untracked(srclfile)
896 else:
890 else:
897 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
891 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
898
892
899 lfdirstate.set_tracked(destlfile)
893 lfdirstate.set_tracked(destlfile)
900 lfdirstate.write(repo.currenttransaction())
894 lfdirstate.write(repo.currenttransaction())
901 except error.Abort as e:
895 except error.Abort as e:
902 if e.message != _(b'no files to copy'):
896 if e.message != _(b'no files to copy'):
903 raise e
897 raise e
904 else:
898 else:
905 nolfiles = True
899 nolfiles = True
906 finally:
900 finally:
907 wlock.release()
901 wlock.release()
908
902
909 if nolfiles and nonormalfiles:
903 if nolfiles and nonormalfiles:
910 raise error.Abort(_(b'no files to copy'))
904 raise error.Abort(_(b'no files to copy'))
911
905
912 return result
906 return result
913
907
914
908
915 # When the user calls revert, we have to be careful to not revert any
909 # When the user calls revert, we have to be careful to not revert any
916 # changes to other largefiles accidentally. This means we have to keep
910 # changes to other largefiles accidentally. This means we have to keep
917 # track of the largefiles that are being reverted so we only pull down
911 # track of the largefiles that are being reverted so we only pull down
918 # the necessary largefiles.
912 # the necessary largefiles.
919 #
913 #
920 # Standins are only updated (to match the hash of largefiles) before
914 # Standins are only updated (to match the hash of largefiles) before
921 # commits. Update the standins then run the original revert, changing
915 # commits. Update the standins then run the original revert, changing
922 # the matcher to hit standins instead of largefiles. Based on the
916 # the matcher to hit standins instead of largefiles. Based on the
923 # resulting standins update the largefiles.
917 # resulting standins update the largefiles.
924 @eh.wrapfunction(cmdutil, b'revert')
918 @eh.wrapfunction(cmdutil, b'revert')
925 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
919 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
926 # Because we put the standins in a bad state (by updating them)
920 # Because we put the standins in a bad state (by updating them)
927 # and then return them to a correct state we need to lock to
921 # and then return them to a correct state we need to lock to
928 # prevent others from changing them in their incorrect state.
922 # prevent others from changing them in their incorrect state.
929 with repo.wlock():
923 with repo.wlock():
930 lfdirstate = lfutil.openlfdirstate(ui, repo)
924 lfdirstate = lfutil.openlfdirstate(ui, repo)
931 s = lfutil.lfdirstatestatus(lfdirstate, repo)
925 s = lfutil.lfdirstatestatus(lfdirstate, repo)
932 lfdirstate.write(repo.currenttransaction())
926 lfdirstate.write(repo.currenttransaction())
933 for lfile in s.modified:
927 for lfile in s.modified:
934 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
928 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
935 for lfile in s.deleted:
929 for lfile in s.deleted:
936 fstandin = lfutil.standin(lfile)
930 fstandin = lfutil.standin(lfile)
937 if repo.wvfs.exists(fstandin):
931 if repo.wvfs.exists(fstandin):
938 repo.wvfs.unlink(fstandin)
932 repo.wvfs.unlink(fstandin)
939
933
940 oldstandins = lfutil.getstandinsstate(repo)
934 oldstandins = lfutil.getstandinsstate(repo)
941
935
942 def overridematch(
936 def overridematch(
943 orig,
937 orig,
944 mctx,
938 mctx,
945 pats=(),
939 pats=(),
946 opts=None,
940 opts=None,
947 globbed=False,
941 globbed=False,
948 default=b'relpath',
942 default=b'relpath',
949 badfn=None,
943 badfn=None,
950 ):
944 ):
951 if opts is None:
945 if opts is None:
952 opts = {}
946 opts = {}
953 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
947 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
954 m = copy.copy(match)
948 m = copy.copy(match)
955
949
956 # revert supports recursing into subrepos, and though largefiles
950 # revert supports recursing into subrepos, and though largefiles
957 # currently doesn't work correctly in that case, this match is
951 # currently doesn't work correctly in that case, this match is
958 # called, so the lfdirstate above may not be the correct one for
952 # called, so the lfdirstate above may not be the correct one for
959 # this invocation of match.
953 # this invocation of match.
960 lfdirstate = lfutil.openlfdirstate(
954 lfdirstate = lfutil.openlfdirstate(
961 mctx.repo().ui, mctx.repo(), False
955 mctx.repo().ui, mctx.repo(), False
962 )
956 )
963
957
964 wctx = repo[None]
958 wctx = repo[None]
965 matchfiles = []
959 matchfiles = []
966 for f in m._files:
960 for f in m._files:
967 standin = lfutil.standin(f)
961 standin = lfutil.standin(f)
968 if standin in ctx or standin in mctx:
962 if standin in ctx or standin in mctx:
969 matchfiles.append(standin)
963 matchfiles.append(standin)
970 elif standin in wctx or lfdirstate.get_entry(f).removed:
964 elif standin in wctx or lfdirstate.get_entry(f).removed:
971 continue
965 continue
972 else:
966 else:
973 matchfiles.append(f)
967 matchfiles.append(f)
974 m._files = matchfiles
968 m._files = matchfiles
975 m._fileset = set(m._files)
969 m._fileset = set(m._files)
976 origmatchfn = m.matchfn
970 origmatchfn = m.matchfn
977
971
978 def matchfn(f):
972 def matchfn(f):
979 lfile = lfutil.splitstandin(f)
973 lfile = lfutil.splitstandin(f)
980 if lfile is not None:
974 if lfile is not None:
981 return origmatchfn(lfile) and (f in ctx or f in mctx)
975 return origmatchfn(lfile) and (f in ctx or f in mctx)
982 return origmatchfn(f)
976 return origmatchfn(f)
983
977
984 m.matchfn = matchfn
978 m.matchfn = matchfn
985 return m
979 return m
986
980
987 with extensions.wrappedfunction(scmutil, b'match', overridematch):
981 with extensions.wrappedfunction(scmutil, b'match', overridematch):
988 orig(ui, repo, ctx, *pats, **opts)
982 orig(ui, repo, ctx, *pats, **opts)
989
983
990 newstandins = lfutil.getstandinsstate(repo)
984 newstandins = lfutil.getstandinsstate(repo)
991 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
985 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
992 # lfdirstate should be 'normallookup'-ed for updated files,
986 # lfdirstate should be 'normallookup'-ed for updated files,
993 # because reverting doesn't touch dirstate for 'normal' files
987 # because reverting doesn't touch dirstate for 'normal' files
994 # when target revision is explicitly specified: in such case,
988 # when target revision is explicitly specified: in such case,
995 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
989 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
996 # of target (standin) file.
990 # of target (standin) file.
997 lfcommands.updatelfiles(
991 lfcommands.updatelfiles(
998 ui, repo, filelist, printmessage=False, normallookup=True
992 ui, repo, filelist, printmessage=False, normallookup=True
999 )
993 )
1000
994
1001
995
1002 # after pulling changesets, we need to take some extra care to get
996 # after pulling changesets, we need to take some extra care to get
1003 # largefiles updated remotely
997 # largefiles updated remotely
1004 @eh.wrapcommand(
998 @eh.wrapcommand(
1005 b'pull',
999 b'pull',
1006 opts=[
1000 opts=[
1007 (
1001 (
1008 b'',
1002 b'',
1009 b'all-largefiles',
1003 b'all-largefiles',
1010 None,
1004 None,
1011 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1005 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1012 ),
1006 ),
1013 (
1007 (
1014 b'',
1008 b'',
1015 b'lfrev',
1009 b'lfrev',
1016 [],
1010 [],
1017 _(b'download largefiles for these revisions'),
1011 _(b'download largefiles for these revisions'),
1018 _(b'REV'),
1012 _(b'REV'),
1019 ),
1013 ),
1020 ],
1014 ],
1021 )
1015 )
1022 def overridepull(orig, ui, repo, source=None, **opts):
1016 def overridepull(orig, ui, repo, source=None, **opts):
1023 revsprepull = len(repo)
1017 revsprepull = len(repo)
1024 if not source:
1018 if not source:
1025 source = b'default'
1019 source = b'default'
1026 repo.lfpullsource = source
1020 repo.lfpullsource = source
1027 result = orig(ui, repo, source, **opts)
1021 result = orig(ui, repo, source, **opts)
1028 revspostpull = len(repo)
1022 revspostpull = len(repo)
1029 lfrevs = opts.get('lfrev', [])
1023 lfrevs = opts.get('lfrev', [])
1030 if opts.get('all_largefiles'):
1024 if opts.get('all_largefiles'):
1031 lfrevs.append(b'pulled()')
1025 lfrevs.append(b'pulled()')
1032 if lfrevs and revspostpull > revsprepull:
1026 if lfrevs and revspostpull > revsprepull:
1033 numcached = 0
1027 numcached = 0
1034 repo.firstpulled = revsprepull # for pulled() revset expression
1028 repo.firstpulled = revsprepull # for pulled() revset expression
1035 try:
1029 try:
1036 for rev in logcmdutil.revrange(repo, lfrevs):
1030 for rev in logcmdutil.revrange(repo, lfrevs):
1037 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1031 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1038 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1032 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1039 numcached += len(cached)
1033 numcached += len(cached)
1040 finally:
1034 finally:
1041 del repo.firstpulled
1035 del repo.firstpulled
1042 ui.status(_(b"%d largefiles cached\n") % numcached)
1036 ui.status(_(b"%d largefiles cached\n") % numcached)
1043 return result
1037 return result
1044
1038
1045
1039
1046 @eh.wrapcommand(
1040 @eh.wrapcommand(
1047 b'push',
1041 b'push',
1048 opts=[
1042 opts=[
1049 (
1043 (
1050 b'',
1044 b'',
1051 b'lfrev',
1045 b'lfrev',
1052 [],
1046 [],
1053 _(b'upload largefiles for these revisions'),
1047 _(b'upload largefiles for these revisions'),
1054 _(b'REV'),
1048 _(b'REV'),
1055 )
1049 )
1056 ],
1050 ],
1057 )
1051 )
1058 def overridepush(orig, ui, repo, *args, **kwargs):
1052 def overridepush(orig, ui, repo, *args, **kwargs):
1059 """Override push command and store --lfrev parameters in opargs"""
1053 """Override push command and store --lfrev parameters in opargs"""
1060 lfrevs = kwargs.pop('lfrev', None)
1054 lfrevs = kwargs.pop('lfrev', None)
1061 if lfrevs:
1055 if lfrevs:
1062 opargs = kwargs.setdefault('opargs', {})
1056 opargs = kwargs.setdefault('opargs', {})
1063 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1057 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1064 return orig(ui, repo, *args, **kwargs)
1058 return orig(ui, repo, *args, **kwargs)
1065
1059
1066
1060
1067 @eh.wrapfunction(exchange, b'pushoperation')
1061 @eh.wrapfunction(exchange, b'pushoperation')
1068 def exchangepushoperation(orig, *args, **kwargs):
1062 def exchangepushoperation(orig, *args, **kwargs):
1069 """Override pushoperation constructor and store lfrevs parameter"""
1063 """Override pushoperation constructor and store lfrevs parameter"""
1070 lfrevs = kwargs.pop('lfrevs', None)
1064 lfrevs = kwargs.pop('lfrevs', None)
1071 pushop = orig(*args, **kwargs)
1065 pushop = orig(*args, **kwargs)
1072 pushop.lfrevs = lfrevs
1066 pushop.lfrevs = lfrevs
1073 return pushop
1067 return pushop
1074
1068
1075
1069
1076 @eh.revsetpredicate(b'pulled()')
1070 @eh.revsetpredicate(b'pulled()')
1077 def pulledrevsetsymbol(repo, subset, x):
1071 def pulledrevsetsymbol(repo, subset, x):
1078 """Changesets that just has been pulled.
1072 """Changesets that just has been pulled.
1079
1073
1080 Only available with largefiles from pull --lfrev expressions.
1074 Only available with largefiles from pull --lfrev expressions.
1081
1075
1082 .. container:: verbose
1076 .. container:: verbose
1083
1077
1084 Some examples:
1078 Some examples:
1085
1079
1086 - pull largefiles for all new changesets::
1080 - pull largefiles for all new changesets::
1087
1081
1088 hg pull -lfrev "pulled()"
1082 hg pull -lfrev "pulled()"
1089
1083
1090 - pull largefiles for all new branch heads::
1084 - pull largefiles for all new branch heads::
1091
1085
1092 hg pull -lfrev "head(pulled()) and not closed()"
1086 hg pull -lfrev "head(pulled()) and not closed()"
1093
1087
1094 """
1088 """
1095
1089
1096 try:
1090 try:
1097 firstpulled = repo.firstpulled
1091 firstpulled = repo.firstpulled
1098 except AttributeError:
1092 except AttributeError:
1099 raise error.Abort(_(b"pulled() only available in --lfrev"))
1093 raise error.Abort(_(b"pulled() only available in --lfrev"))
1100 return smartset.baseset([r for r in subset if r >= firstpulled])
1094 return smartset.baseset([r for r in subset if r >= firstpulled])
1101
1095
1102
1096
1103 @eh.wrapcommand(
1097 @eh.wrapcommand(
1104 b'clone',
1098 b'clone',
1105 opts=[
1099 opts=[
1106 (
1100 (
1107 b'',
1101 b'',
1108 b'all-largefiles',
1102 b'all-largefiles',
1109 None,
1103 None,
1110 _(b'download all versions of all largefiles'),
1104 _(b'download all versions of all largefiles'),
1111 )
1105 )
1112 ],
1106 ],
1113 )
1107 )
1114 def overrideclone(orig, ui, source, dest=None, **opts):
1108 def overrideclone(orig, ui, source, dest=None, **opts):
1115 d = dest
1109 d = dest
1116 if d is None:
1110 if d is None:
1117 d = hg.defaultdest(source)
1111 d = hg.defaultdest(source)
1118 if opts.get('all_largefiles') and not hg.islocal(d):
1112 if opts.get('all_largefiles') and not hg.islocal(d):
1119 raise error.Abort(
1113 raise error.Abort(
1120 _(b'--all-largefiles is incompatible with non-local destination %s')
1114 _(b'--all-largefiles is incompatible with non-local destination %s')
1121 % d
1115 % d
1122 )
1116 )
1123
1117
1124 return orig(ui, source, dest, **opts)
1118 return orig(ui, source, dest, **opts)
1125
1119
1126
1120
1127 @eh.wrapfunction(hg, b'clone')
1121 @eh.wrapfunction(hg, b'clone')
1128 def hgclone(orig, ui, opts, *args, **kwargs):
1122 def hgclone(orig, ui, opts, *args, **kwargs):
1129 result = orig(ui, opts, *args, **kwargs)
1123 result = orig(ui, opts, *args, **kwargs)
1130
1124
1131 if result is not None:
1125 if result is not None:
1132 sourcerepo, destrepo = result
1126 sourcerepo, destrepo = result
1133 repo = destrepo.local()
1127 repo = destrepo.local()
1134
1128
1135 # When cloning to a remote repo (like through SSH), no repo is available
1129 # When cloning to a remote repo (like through SSH), no repo is available
1136 # from the peer. Therefore the largefiles can't be downloaded and the
1130 # from the peer. Therefore the largefiles can't be downloaded and the
1137 # hgrc can't be updated.
1131 # hgrc can't be updated.
1138 if not repo:
1132 if not repo:
1139 return result
1133 return result
1140
1134
1141 # Caching is implicitly limited to 'rev' option, since the dest repo was
1135 # Caching is implicitly limited to 'rev' option, since the dest repo was
1142 # truncated at that point. The user may expect a download count with
1136 # truncated at that point. The user may expect a download count with
1143 # this option, so attempt whether or not this is a largefile repo.
1137 # this option, so attempt whether or not this is a largefile repo.
1144 if opts.get(b'all_largefiles'):
1138 if opts.get(b'all_largefiles'):
1145 success, missing = lfcommands.downloadlfiles(ui, repo)
1139 success, missing = lfcommands.downloadlfiles(ui, repo)
1146
1140
1147 if missing != 0:
1141 if missing != 0:
1148 return None
1142 return None
1149
1143
1150 return result
1144 return result
1151
1145
1152
1146
1153 @eh.wrapcommand(b'rebase', extension=b'rebase')
1147 @eh.wrapcommand(b'rebase', extension=b'rebase')
1154 def overriderebasecmd(orig, ui, repo, **opts):
1148 def overriderebasecmd(orig, ui, repo, **opts):
1155 if not util.safehasattr(repo, b'_largefilesenabled'):
1149 if not util.safehasattr(repo, b'_largefilesenabled'):
1156 return orig(ui, repo, **opts)
1150 return orig(ui, repo, **opts)
1157
1151
1158 resuming = opts.get('continue')
1152 resuming = opts.get('continue')
1159 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1153 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1160 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1154 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1161 try:
1155 try:
1162 with ui.configoverride(
1156 with ui.configoverride(
1163 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1157 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1164 ):
1158 ):
1165 return orig(ui, repo, **opts)
1159 return orig(ui, repo, **opts)
1166 finally:
1160 finally:
1167 repo._lfstatuswriters.pop()
1161 repo._lfstatuswriters.pop()
1168 repo._lfcommithooks.pop()
1162 repo._lfcommithooks.pop()
1169
1163
1170
1164
1171 @eh.extsetup
1165 @eh.extsetup
1172 def overriderebase(ui):
1166 def overriderebase(ui):
1173 try:
1167 try:
1174 rebase = extensions.find(b'rebase')
1168 rebase = extensions.find(b'rebase')
1175 except KeyError:
1169 except KeyError:
1176 pass
1170 pass
1177 else:
1171 else:
1178
1172
1179 def _dorebase(orig, *args, **kwargs):
1173 def _dorebase(orig, *args, **kwargs):
1180 kwargs['inmemory'] = False
1174 kwargs['inmemory'] = False
1181 return orig(*args, **kwargs)
1175 return orig(*args, **kwargs)
1182
1176
1183 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1177 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1184
1178
1185
1179
1186 @eh.wrapcommand(b'archive')
1180 @eh.wrapcommand(b'archive')
1187 def overridearchivecmd(orig, ui, repo, dest, **opts):
1181 def overridearchivecmd(orig, ui, repo, dest, **opts):
1188 with lfstatus(repo.unfiltered()):
1182 with lfstatus(repo.unfiltered()):
1189 return orig(ui, repo.unfiltered(), dest, **opts)
1183 return orig(ui, repo.unfiltered(), dest, **opts)
1190
1184
1191
1185
1192 @eh.wrapfunction(webcommands, b'archive')
1186 @eh.wrapfunction(webcommands, b'archive')
1193 def hgwebarchive(orig, web):
1187 def hgwebarchive(orig, web):
1194 with lfstatus(web.repo):
1188 with lfstatus(web.repo):
1195 return orig(web)
1189 return orig(web)
1196
1190
1197
1191
1198 @eh.wrapfunction(archival, b'archive')
1192 @eh.wrapfunction(archival, b'archive')
1199 def overridearchive(
1193 def overridearchive(
1200 orig,
1194 orig,
1201 repo,
1195 repo,
1202 dest,
1196 dest,
1203 node,
1197 node,
1204 kind,
1198 kind,
1205 decode=True,
1199 decode=True,
1206 match=None,
1200 match=None,
1207 prefix=b'',
1201 prefix=b'',
1208 mtime=None,
1202 mtime=None,
1209 subrepos=None,
1203 subrepos=None,
1210 ):
1204 ):
1211 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1205 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1212 # unfiltered repo's attr, so check that as well.
1206 # unfiltered repo's attr, so check that as well.
1213 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1207 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1214 return orig(
1208 return orig(
1215 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1209 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1216 )
1210 )
1217
1211
1218 # No need to lock because we are only reading history and
1212 # No need to lock because we are only reading history and
1219 # largefile caches, neither of which are modified.
1213 # largefile caches, neither of which are modified.
1220 if node is not None:
1214 if node is not None:
1221 lfcommands.cachelfiles(repo.ui, repo, node)
1215 lfcommands.cachelfiles(repo.ui, repo, node)
1222
1216
1223 if kind not in archival.archivers:
1217 if kind not in archival.archivers:
1224 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1218 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1225
1219
1226 ctx = repo[node]
1220 ctx = repo[node]
1227
1221
1228 if kind == b'files':
1222 if kind == b'files':
1229 if prefix:
1223 if prefix:
1230 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1224 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1231 else:
1225 else:
1232 prefix = archival.tidyprefix(dest, kind, prefix)
1226 prefix = archival.tidyprefix(dest, kind, prefix)
1233
1227
1234 def write(name, mode, islink, getdata):
1228 def write(name, mode, islink, getdata):
1235 if match and not match(name):
1229 if match and not match(name):
1236 return
1230 return
1237 data = getdata()
1231 data = getdata()
1238 if decode:
1232 if decode:
1239 data = repo.wwritedata(name, data)
1233 data = repo.wwritedata(name, data)
1240 archiver.addfile(prefix + name, mode, islink, data)
1234 archiver.addfile(prefix + name, mode, islink, data)
1241
1235
1242 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1236 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1243
1237
1244 if repo.ui.configbool(b"ui", b"archivemeta"):
1238 if repo.ui.configbool(b"ui", b"archivemeta"):
1245 write(
1239 write(
1246 b'.hg_archival.txt',
1240 b'.hg_archival.txt',
1247 0o644,
1241 0o644,
1248 False,
1242 False,
1249 lambda: archival.buildmetadata(ctx),
1243 lambda: archival.buildmetadata(ctx),
1250 )
1244 )
1251
1245
1252 for f in ctx:
1246 for f in ctx:
1253 ff = ctx.flags(f)
1247 ff = ctx.flags(f)
1254 getdata = ctx[f].data
1248 getdata = ctx[f].data
1255 lfile = lfutil.splitstandin(f)
1249 lfile = lfutil.splitstandin(f)
1256 if lfile is not None:
1250 if lfile is not None:
1257 if node is not None:
1251 if node is not None:
1258 path = lfutil.findfile(repo, getdata().strip())
1252 path = lfutil.findfile(repo, getdata().strip())
1259
1253
1260 if path is None:
1254 if path is None:
1261 raise error.Abort(
1255 raise error.Abort(
1262 _(
1256 _(
1263 b'largefile %s not found in repo store or system cache'
1257 b'largefile %s not found in repo store or system cache'
1264 )
1258 )
1265 % lfile
1259 % lfile
1266 )
1260 )
1267 else:
1261 else:
1268 path = lfile
1262 path = lfile
1269
1263
1270 f = lfile
1264 f = lfile
1271
1265
1272 getdata = lambda: util.readfile(path)
1266 getdata = lambda: util.readfile(path)
1273 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1267 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1274
1268
1275 if subrepos:
1269 if subrepos:
1276 for subpath in sorted(ctx.substate):
1270 for subpath in sorted(ctx.substate):
1277 sub = ctx.workingsub(subpath)
1271 sub = ctx.workingsub(subpath)
1278 submatch = matchmod.subdirmatcher(subpath, match)
1272 submatch = matchmod.subdirmatcher(subpath, match)
1279 subprefix = prefix + subpath + b'/'
1273 subprefix = prefix + subpath + b'/'
1280
1274
1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1275 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1282 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1276 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1283 # allow only hgsubrepos to set this, instead of the current scheme
1277 # allow only hgsubrepos to set this, instead of the current scheme
1284 # where the parent sets this for the child.
1278 # where the parent sets this for the child.
1285 with (
1279 with (
1286 util.safehasattr(sub, '_repo')
1280 util.safehasattr(sub, '_repo')
1287 and lfstatus(sub._repo)
1281 and lfstatus(sub._repo)
1288 or util.nullcontextmanager()
1282 or util.nullcontextmanager()
1289 ):
1283 ):
1290 sub.archive(archiver, subprefix, submatch)
1284 sub.archive(archiver, subprefix, submatch)
1291
1285
1292 archiver.done()
1286 archiver.done()
1293
1287
1294
1288
1295 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1289 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1296 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1290 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1297 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1291 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1298 if not lfenabled or not repo._repo.lfstatus:
1292 if not lfenabled or not repo._repo.lfstatus:
1299 return orig(repo, archiver, prefix, match, decode)
1293 return orig(repo, archiver, prefix, match, decode)
1300
1294
1301 repo._get(repo._state + (b'hg',))
1295 repo._get(repo._state + (b'hg',))
1302 rev = repo._state[1]
1296 rev = repo._state[1]
1303 ctx = repo._repo[rev]
1297 ctx = repo._repo[rev]
1304
1298
1305 if ctx.node() is not None:
1299 if ctx.node() is not None:
1306 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1300 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1307
1301
1308 def write(name, mode, islink, getdata):
1302 def write(name, mode, islink, getdata):
1309 # At this point, the standin has been replaced with the largefile name,
1303 # At this point, the standin has been replaced with the largefile name,
1310 # so the normal matcher works here without the lfutil variants.
1304 # so the normal matcher works here without the lfutil variants.
1311 if match and not match(f):
1305 if match and not match(f):
1312 return
1306 return
1313 data = getdata()
1307 data = getdata()
1314 if decode:
1308 if decode:
1315 data = repo._repo.wwritedata(name, data)
1309 data = repo._repo.wwritedata(name, data)
1316
1310
1317 archiver.addfile(prefix + name, mode, islink, data)
1311 archiver.addfile(prefix + name, mode, islink, data)
1318
1312
1319 for f in ctx:
1313 for f in ctx:
1320 ff = ctx.flags(f)
1314 ff = ctx.flags(f)
1321 getdata = ctx[f].data
1315 getdata = ctx[f].data
1322 lfile = lfutil.splitstandin(f)
1316 lfile = lfutil.splitstandin(f)
1323 if lfile is not None:
1317 if lfile is not None:
1324 if ctx.node() is not None:
1318 if ctx.node() is not None:
1325 path = lfutil.findfile(repo._repo, getdata().strip())
1319 path = lfutil.findfile(repo._repo, getdata().strip())
1326
1320
1327 if path is None:
1321 if path is None:
1328 raise error.Abort(
1322 raise error.Abort(
1329 _(
1323 _(
1330 b'largefile %s not found in repo store or system cache'
1324 b'largefile %s not found in repo store or system cache'
1331 )
1325 )
1332 % lfile
1326 % lfile
1333 )
1327 )
1334 else:
1328 else:
1335 path = lfile
1329 path = lfile
1336
1330
1337 f = lfile
1331 f = lfile
1338
1332
1339 getdata = lambda: util.readfile(os.path.join(prefix, path))
1333 getdata = lambda: util.readfile(os.path.join(prefix, path))
1340
1334
1341 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1335 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1342
1336
1343 for subpath in sorted(ctx.substate):
1337 for subpath in sorted(ctx.substate):
1344 sub = ctx.workingsub(subpath)
1338 sub = ctx.workingsub(subpath)
1345 submatch = matchmod.subdirmatcher(subpath, match)
1339 submatch = matchmod.subdirmatcher(subpath, match)
1346 subprefix = prefix + subpath + b'/'
1340 subprefix = prefix + subpath + b'/'
1347 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1341 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1348 # infer and possibly set lfstatus at the top of this function. That
1342 # infer and possibly set lfstatus at the top of this function. That
1349 # would allow only hgsubrepos to set this, instead of the current scheme
1343 # would allow only hgsubrepos to set this, instead of the current scheme
1350 # where the parent sets this for the child.
1344 # where the parent sets this for the child.
1351 with (
1345 with (
1352 util.safehasattr(sub, '_repo')
1346 util.safehasattr(sub, '_repo')
1353 and lfstatus(sub._repo)
1347 and lfstatus(sub._repo)
1354 or util.nullcontextmanager()
1348 or util.nullcontextmanager()
1355 ):
1349 ):
1356 sub.archive(archiver, subprefix, submatch, decode)
1350 sub.archive(archiver, subprefix, submatch, decode)
1357
1351
1358
1352
1359 # If a largefile is modified, the change is not reflected in its
1353 # If a largefile is modified, the change is not reflected in its
1360 # standin until a commit. cmdutil.bailifchanged() raises an exception
1354 # standin until a commit. cmdutil.bailifchanged() raises an exception
1361 # if the repo has uncommitted changes. Wrap it to also check if
1355 # if the repo has uncommitted changes. Wrap it to also check if
1362 # largefiles were changed. This is used by bisect, backout and fetch.
1356 # largefiles were changed. This is used by bisect, backout and fetch.
1363 @eh.wrapfunction(cmdutil, b'bailifchanged')
1357 @eh.wrapfunction(cmdutil, b'bailifchanged')
1364 def overridebailifchanged(orig, repo, *args, **kwargs):
1358 def overridebailifchanged(orig, repo, *args, **kwargs):
1365 orig(repo, *args, **kwargs)
1359 orig(repo, *args, **kwargs)
1366 with lfstatus(repo):
1360 with lfstatus(repo):
1367 s = repo.status()
1361 s = repo.status()
1368 if s.modified or s.added or s.removed or s.deleted:
1362 if s.modified or s.added or s.removed or s.deleted:
1369 raise error.Abort(_(b'uncommitted changes'))
1363 raise error.Abort(_(b'uncommitted changes'))
1370
1364
1371
1365
1372 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1366 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1373 def postcommitstatus(orig, repo, *args, **kwargs):
1367 def postcommitstatus(orig, repo, *args, **kwargs):
1374 with lfstatus(repo):
1368 with lfstatus(repo):
1375 return orig(repo, *args, **kwargs)
1369 return orig(repo, *args, **kwargs)
1376
1370
1377
1371
1378 @eh.wrapfunction(cmdutil, b'forget')
1372 @eh.wrapfunction(cmdutil, b'forget')
1379 def cmdutilforget(
1373 def cmdutilforget(
1380 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1374 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1381 ):
1375 ):
1382 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1376 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1383 bad, forgot = orig(
1377 bad, forgot = orig(
1384 ui,
1378 ui,
1385 repo,
1379 repo,
1386 normalmatcher,
1380 normalmatcher,
1387 prefix,
1381 prefix,
1388 uipathfn,
1382 uipathfn,
1389 explicitonly,
1383 explicitonly,
1390 dryrun,
1384 dryrun,
1391 interactive,
1385 interactive,
1392 )
1386 )
1393 m = composelargefilematcher(match, repo[None].manifest())
1387 m = composelargefilematcher(match, repo[None].manifest())
1394
1388
1395 with lfstatus(repo):
1389 with lfstatus(repo):
1396 s = repo.status(match=m, clean=True)
1390 s = repo.status(match=m, clean=True)
1397 manifest = repo[None].manifest()
1391 manifest = repo[None].manifest()
1398 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1392 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1399 forget = [f for f in forget if lfutil.standin(f) in manifest]
1393 forget = [f for f in forget if lfutil.standin(f) in manifest]
1400
1394
1401 for f in forget:
1395 for f in forget:
1402 fstandin = lfutil.standin(f)
1396 fstandin = lfutil.standin(f)
1403 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1397 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1404 ui.warn(
1398 ui.warn(
1405 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1399 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1406 )
1400 )
1407 bad.append(f)
1401 bad.append(f)
1408
1402
1409 for f in forget:
1403 for f in forget:
1410 if ui.verbose or not m.exact(f):
1404 if ui.verbose or not m.exact(f):
1411 ui.status(_(b'removing %s\n') % uipathfn(f))
1405 ui.status(_(b'removing %s\n') % uipathfn(f))
1412
1406
1413 # Need to lock because standin files are deleted then removed from the
1407 # Need to lock because standin files are deleted then removed from the
1414 # repository and we could race in-between.
1408 # repository and we could race in-between.
1415 with repo.wlock():
1409 with repo.wlock():
1416 lfdirstate = lfutil.openlfdirstate(ui, repo)
1410 lfdirstate = lfutil.openlfdirstate(ui, repo)
1417 for f in forget:
1411 for f in forget:
1418 lfdirstate.set_untracked(f)
1412 lfdirstate.set_untracked(f)
1419 lfdirstate.write(repo.currenttransaction())
1413 lfdirstate.write(repo.currenttransaction())
1420 standins = [lfutil.standin(f) for f in forget]
1414 standins = [lfutil.standin(f) for f in forget]
1421 for f in standins:
1415 for f in standins:
1422 repo.wvfs.unlinkpath(f, ignoremissing=True)
1416 repo.wvfs.unlinkpath(f, ignoremissing=True)
1423 rejected = repo[None].forget(standins)
1417 rejected = repo[None].forget(standins)
1424
1418
1425 bad.extend(f for f in rejected if f in m.files())
1419 bad.extend(f for f in rejected if f in m.files())
1426 forgot.extend(f for f in forget if f not in rejected)
1420 forgot.extend(f for f in forget if f not in rejected)
1427 return bad, forgot
1421 return bad, forgot
1428
1422
1429
1423
1430 def _getoutgoings(repo, other, missing, addfunc):
1424 def _getoutgoings(repo, other, missing, addfunc):
1431 """get pairs of filename and largefile hash in outgoing revisions
1425 """get pairs of filename and largefile hash in outgoing revisions
1432 in 'missing'.
1426 in 'missing'.
1433
1427
1434 largefiles already existing on 'other' repository are ignored.
1428 largefiles already existing on 'other' repository are ignored.
1435
1429
1436 'addfunc' is invoked with each unique pairs of filename and
1430 'addfunc' is invoked with each unique pairs of filename and
1437 largefile hash value.
1431 largefile hash value.
1438 """
1432 """
1439 knowns = set()
1433 knowns = set()
1440 lfhashes = set()
1434 lfhashes = set()
1441
1435
1442 def dedup(fn, lfhash):
1436 def dedup(fn, lfhash):
1443 k = (fn, lfhash)
1437 k = (fn, lfhash)
1444 if k not in knowns:
1438 if k not in knowns:
1445 knowns.add(k)
1439 knowns.add(k)
1446 lfhashes.add(lfhash)
1440 lfhashes.add(lfhash)
1447
1441
1448 lfutil.getlfilestoupload(repo, missing, dedup)
1442 lfutil.getlfilestoupload(repo, missing, dedup)
1449 if lfhashes:
1443 if lfhashes:
1450 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1444 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1451 for fn, lfhash in knowns:
1445 for fn, lfhash in knowns:
1452 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1446 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1453 addfunc(fn, lfhash)
1447 addfunc(fn, lfhash)
1454
1448
1455
1449
1456 def outgoinghook(ui, repo, other, opts, missing):
1450 def outgoinghook(ui, repo, other, opts, missing):
1457 if opts.pop(b'large', None):
1451 if opts.pop(b'large', None):
1458 lfhashes = set()
1452 lfhashes = set()
1459 if ui.debugflag:
1453 if ui.debugflag:
1460 toupload = {}
1454 toupload = {}
1461
1455
1462 def addfunc(fn, lfhash):
1456 def addfunc(fn, lfhash):
1463 if fn not in toupload:
1457 if fn not in toupload:
1464 toupload[fn] = [] # pytype: disable=unsupported-operands
1458 toupload[fn] = [] # pytype: disable=unsupported-operands
1465 toupload[fn].append(lfhash)
1459 toupload[fn].append(lfhash)
1466 lfhashes.add(lfhash)
1460 lfhashes.add(lfhash)
1467
1461
1468 def showhashes(fn):
1462 def showhashes(fn):
1469 for lfhash in sorted(toupload[fn]):
1463 for lfhash in sorted(toupload[fn]):
1470 ui.debug(b' %s\n' % lfhash)
1464 ui.debug(b' %s\n' % lfhash)
1471
1465
1472 else:
1466 else:
1473 toupload = set()
1467 toupload = set()
1474
1468
1475 def addfunc(fn, lfhash):
1469 def addfunc(fn, lfhash):
1476 toupload.add(fn)
1470 toupload.add(fn)
1477 lfhashes.add(lfhash)
1471 lfhashes.add(lfhash)
1478
1472
1479 def showhashes(fn):
1473 def showhashes(fn):
1480 pass
1474 pass
1481
1475
1482 _getoutgoings(repo, other, missing, addfunc)
1476 _getoutgoings(repo, other, missing, addfunc)
1483
1477
1484 if not toupload:
1478 if not toupload:
1485 ui.status(_(b'largefiles: no files to upload\n'))
1479 ui.status(_(b'largefiles: no files to upload\n'))
1486 else:
1480 else:
1487 ui.status(
1481 ui.status(
1488 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1482 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1489 )
1483 )
1490 for file in sorted(toupload):
1484 for file in sorted(toupload):
1491 ui.status(lfutil.splitstandin(file) + b'\n')
1485 ui.status(lfutil.splitstandin(file) + b'\n')
1492 showhashes(file)
1486 showhashes(file)
1493 ui.status(b'\n')
1487 ui.status(b'\n')
1494
1488
1495
1489
1496 @eh.wrapcommand(
1490 @eh.wrapcommand(
1497 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1491 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1498 )
1492 )
1499 def _outgoingcmd(orig, *args, **kwargs):
1493 def _outgoingcmd(orig, *args, **kwargs):
1500 # Nothing to do here other than add the extra help option- the hook above
1494 # Nothing to do here other than add the extra help option- the hook above
1501 # processes it.
1495 # processes it.
1502 return orig(*args, **kwargs)
1496 return orig(*args, **kwargs)
1503
1497
1504
1498
1505 def summaryremotehook(ui, repo, opts, changes):
1499 def summaryremotehook(ui, repo, opts, changes):
1506 largeopt = opts.get(b'large', False)
1500 largeopt = opts.get(b'large', False)
1507 if changes is None:
1501 if changes is None:
1508 if largeopt:
1502 if largeopt:
1509 return (False, True) # only outgoing check is needed
1503 return (False, True) # only outgoing check is needed
1510 else:
1504 else:
1511 return (False, False)
1505 return (False, False)
1512 elif largeopt:
1506 elif largeopt:
1513 url, branch, peer, outgoing = changes[1]
1507 url, branch, peer, outgoing = changes[1]
1514 if peer is None:
1508 if peer is None:
1515 # i18n: column positioning for "hg summary"
1509 # i18n: column positioning for "hg summary"
1516 ui.status(_(b'largefiles: (no remote repo)\n'))
1510 ui.status(_(b'largefiles: (no remote repo)\n'))
1517 return
1511 return
1518
1512
1519 toupload = set()
1513 toupload = set()
1520 lfhashes = set()
1514 lfhashes = set()
1521
1515
1522 def addfunc(fn, lfhash):
1516 def addfunc(fn, lfhash):
1523 toupload.add(fn)
1517 toupload.add(fn)
1524 lfhashes.add(lfhash)
1518 lfhashes.add(lfhash)
1525
1519
1526 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1520 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1527
1521
1528 if not toupload:
1522 if not toupload:
1529 # i18n: column positioning for "hg summary"
1523 # i18n: column positioning for "hg summary"
1530 ui.status(_(b'largefiles: (no files to upload)\n'))
1524 ui.status(_(b'largefiles: (no files to upload)\n'))
1531 else:
1525 else:
1532 # i18n: column positioning for "hg summary"
1526 # i18n: column positioning for "hg summary"
1533 ui.status(
1527 ui.status(
1534 _(b'largefiles: %d entities for %d files to upload\n')
1528 _(b'largefiles: %d entities for %d files to upload\n')
1535 % (len(lfhashes), len(toupload))
1529 % (len(lfhashes), len(toupload))
1536 )
1530 )
1537
1531
1538
1532
1539 @eh.wrapcommand(
1533 @eh.wrapcommand(
1540 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1534 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1541 )
1535 )
1542 def overridesummary(orig, ui, repo, *pats, **opts):
1536 def overridesummary(orig, ui, repo, *pats, **opts):
1543 with lfstatus(repo):
1537 with lfstatus(repo):
1544 orig(ui, repo, *pats, **opts)
1538 orig(ui, repo, *pats, **opts)
1545
1539
1546
1540
1547 @eh.wrapfunction(scmutil, b'addremove')
1541 @eh.wrapfunction(scmutil, b'addremove')
1548 def scmutiladdremove(
1542 def scmutiladdremove(
1549 orig,
1543 orig,
1550 repo,
1544 repo,
1551 matcher,
1545 matcher,
1552 prefix,
1546 prefix,
1553 uipathfn,
1547 uipathfn,
1554 opts=None,
1548 opts=None,
1555 open_tr=None,
1549 open_tr=None,
1556 ):
1550 ):
1557 if opts is None:
1551 if opts is None:
1558 opts = {}
1552 opts = {}
1559 if not lfutil.islfilesrepo(repo):
1553 if not lfutil.islfilesrepo(repo):
1560 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1554 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1561 # Get the list of missing largefiles so we can remove them
1555 # Get the list of missing largefiles so we can remove them
1562 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1556 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1563 unsure, s, mtime_boundary = lfdirstate.status(
1557 unsure, s, mtime_boundary = lfdirstate.status(
1564 matchmod.always(),
1558 matchmod.always(),
1565 subrepos=[],
1559 subrepos=[],
1566 ignored=False,
1560 ignored=False,
1567 clean=False,
1561 clean=False,
1568 unknown=False,
1562 unknown=False,
1569 )
1563 )
1570
1564
1571 # open the transaction and changing_files context
1565 # open the transaction and changing_files context
1572 if open_tr is not None:
1566 if open_tr is not None:
1573 open_tr()
1567 open_tr()
1574
1568
1575 # Call into the normal remove code, but the removing of the standin, we want
1569 # Call into the normal remove code, but the removing of the standin, we want
1576 # to have handled by original addremove. Monkey patching here makes sure
1570 # to have handled by original addremove. Monkey patching here makes sure
1577 # we don't remove the standin in the largefiles code, preventing a very
1571 # we don't remove the standin in the largefiles code, preventing a very
1578 # confused state later.
1572 # confused state later.
1579 if s.deleted:
1573 if s.deleted:
1580 m = copy.copy(matcher)
1574 m = copy.copy(matcher)
1581
1575
1582 # The m._files and m._map attributes are not changed to the deleted list
1576 # The m._files and m._map attributes are not changed to the deleted list
1583 # because that affects the m.exact() test, which in turn governs whether
1577 # because that affects the m.exact() test, which in turn governs whether
1584 # or not the file name is printed, and how. Simply limit the original
1578 # or not the file name is printed, and how. Simply limit the original
1585 # matches to those in the deleted status list.
1579 # matches to those in the deleted status list.
1586 matchfn = m.matchfn
1580 matchfn = m.matchfn
1587 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1581 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1588
1582
1589 removelargefiles(
1583 removelargefiles(
1590 repo.ui,
1584 repo.ui,
1591 repo,
1585 repo,
1592 True,
1586 True,
1593 m,
1587 m,
1594 uipathfn,
1588 uipathfn,
1595 opts.get(b'dry_run'),
1589 opts.get(b'dry_run'),
1596 **pycompat.strkwargs(opts)
1590 **pycompat.strkwargs(opts)
1597 )
1591 )
1598 # Call into the normal add code, and any files that *should* be added as
1592 # Call into the normal add code, and any files that *should* be added as
1599 # largefiles will be
1593 # largefiles will be
1600 added, bad = addlargefiles(
1594 added, bad = addlargefiles(
1601 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1595 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1602 )
1596 )
1603 # Now that we've handled largefiles, hand off to the original addremove
1597 # Now that we've handled largefiles, hand off to the original addremove
1604 # function to take care of the rest. Make sure it doesn't do anything with
1598 # function to take care of the rest. Make sure it doesn't do anything with
1605 # largefiles by passing a matcher that will ignore them.
1599 # largefiles by passing a matcher that will ignore them.
1606 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1600 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1607
1601
1608 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1602 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1609
1603
1610
1604
1611 # Calling purge with --all will cause the largefiles to be deleted.
1605 # Calling purge with --all will cause the largefiles to be deleted.
1612 # Override repo.status to prevent this from happening.
1606 # Override repo.status to prevent this from happening.
1613 @eh.wrapcommand(b'purge')
1607 @eh.wrapcommand(b'purge')
1614 def overridepurge(orig, ui, repo, *dirs, **opts):
1608 def overridepurge(orig, ui, repo, *dirs, **opts):
1615 # XXX Monkey patching a repoview will not work. The assigned attribute will
1609 # XXX Monkey patching a repoview will not work. The assigned attribute will
1616 # be set on the unfiltered repo, but we will only lookup attributes in the
1610 # be set on the unfiltered repo, but we will only lookup attributes in the
1617 # unfiltered repo if the lookup in the repoview object itself fails. As the
1611 # unfiltered repo if the lookup in the repoview object itself fails. As the
1618 # monkey patched method exists on the repoview class the lookup will not
1612 # monkey patched method exists on the repoview class the lookup will not
1619 # fail. As a result, the original version will shadow the monkey patched
1613 # fail. As a result, the original version will shadow the monkey patched
1620 # one, defeating the monkey patch.
1614 # one, defeating the monkey patch.
1621 #
1615 #
1622 # As a work around we use an unfiltered repo here. We should do something
1616 # As a work around we use an unfiltered repo here. We should do something
1623 # cleaner instead.
1617 # cleaner instead.
1624 repo = repo.unfiltered()
1618 repo = repo.unfiltered()
1625 oldstatus = repo.status
1619 oldstatus = repo.status
1626
1620
1627 def overridestatus(
1621 def overridestatus(
1628 node1=b'.',
1622 node1=b'.',
1629 node2=None,
1623 node2=None,
1630 match=None,
1624 match=None,
1631 ignored=False,
1625 ignored=False,
1632 clean=False,
1626 clean=False,
1633 unknown=False,
1627 unknown=False,
1634 listsubrepos=False,
1628 listsubrepos=False,
1635 ):
1629 ):
1636 r = oldstatus(
1630 r = oldstatus(
1637 node1, node2, match, ignored, clean, unknown, listsubrepos
1631 node1, node2, match, ignored, clean, unknown, listsubrepos
1638 )
1632 )
1639 lfdirstate = lfutil.openlfdirstate(ui, repo)
1633 lfdirstate = lfutil.openlfdirstate(ui, repo)
1640 unknown = [
1634 unknown = [
1641 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1635 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1642 ]
1636 ]
1643 ignored = [
1637 ignored = [
1644 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1638 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1645 ]
1639 ]
1646 return scmutil.status(
1640 return scmutil.status(
1647 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1641 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1648 )
1642 )
1649
1643
1650 repo.status = overridestatus
1644 repo.status = overridestatus
1651 orig(ui, repo, *dirs, **opts)
1645 orig(ui, repo, *dirs, **opts)
1652 repo.status = oldstatus
1646 repo.status = oldstatus
1653
1647
1654
1648
1655 @eh.wrapcommand(b'rollback')
1649 @eh.wrapcommand(b'rollback')
1656 def overriderollback(orig, ui, repo, **opts):
1650 def overriderollback(orig, ui, repo, **opts):
1657 with repo.wlock():
1651 with repo.wlock():
1658 before = repo.dirstate.parents()
1652 before = repo.dirstate.parents()
1659 orphans = {
1653 orphans = {
1660 f
1654 f
1661 for f in repo.dirstate
1655 for f in repo.dirstate
1662 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1656 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1663 }
1657 }
1664 result = orig(ui, repo, **opts)
1658 result = orig(ui, repo, **opts)
1665 after = repo.dirstate.parents()
1659 after = repo.dirstate.parents()
1666 if before == after:
1660 if before == after:
1667 return result # no need to restore standins
1661 return result # no need to restore standins
1668
1662
1669 pctx = repo[b'.']
1663 pctx = repo[b'.']
1670 for f in repo.dirstate:
1664 for f in repo.dirstate:
1671 if lfutil.isstandin(f):
1665 if lfutil.isstandin(f):
1672 orphans.discard(f)
1666 orphans.discard(f)
1673 if repo.dirstate.get_entry(f).removed:
1667 if repo.dirstate.get_entry(f).removed:
1674 repo.wvfs.unlinkpath(f, ignoremissing=True)
1668 repo.wvfs.unlinkpath(f, ignoremissing=True)
1675 elif f in pctx:
1669 elif f in pctx:
1676 fctx = pctx[f]
1670 fctx = pctx[f]
1677 repo.wwrite(f, fctx.data(), fctx.flags())
1671 repo.wwrite(f, fctx.data(), fctx.flags())
1678 else:
1672 else:
1679 # content of standin is not so important in 'a',
1673 # content of standin is not so important in 'a',
1680 # 'm' or 'n' (coming from the 2nd parent) cases
1674 # 'm' or 'n' (coming from the 2nd parent) cases
1681 lfutil.writestandin(repo, f, b'', False)
1675 lfutil.writestandin(repo, f, b'', False)
1682 for standin in orphans:
1676 for standin in orphans:
1683 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1677 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1684
1678
1685 return result
1679 return result
1686
1680
1687
1681
1688 @eh.wrapcommand(b'transplant', extension=b'transplant')
1682 @eh.wrapcommand(b'transplant', extension=b'transplant')
1689 def overridetransplant(orig, ui, repo, *revs, **opts):
1683 def overridetransplant(orig, ui, repo, *revs, **opts):
1690 resuming = opts.get('continue')
1684 resuming = opts.get('continue')
1691 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1685 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1692 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1686 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1693 try:
1687 try:
1694 result = orig(ui, repo, *revs, **opts)
1688 result = orig(ui, repo, *revs, **opts)
1695 finally:
1689 finally:
1696 repo._lfstatuswriters.pop()
1690 repo._lfstatuswriters.pop()
1697 repo._lfcommithooks.pop()
1691 repo._lfcommithooks.pop()
1698 return result
1692 return result
1699
1693
1700
1694
1701 @eh.wrapcommand(b'cat')
1695 @eh.wrapcommand(b'cat')
1702 def overridecat(orig, ui, repo, file1, *pats, **opts):
1696 def overridecat(orig, ui, repo, file1, *pats, **opts):
1703 opts = pycompat.byteskwargs(opts)
1697 opts = pycompat.byteskwargs(opts)
1704 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1698 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1705 err = 1
1699 err = 1
1706 notbad = set()
1700 notbad = set()
1707 m = scmutil.match(ctx, (file1,) + pats, opts)
1701 m = scmutil.match(ctx, (file1,) + pats, opts)
1708 origmatchfn = m.matchfn
1702 origmatchfn = m.matchfn
1709
1703
1710 def lfmatchfn(f):
1704 def lfmatchfn(f):
1711 if origmatchfn(f):
1705 if origmatchfn(f):
1712 return True
1706 return True
1713 lf = lfutil.splitstandin(f)
1707 lf = lfutil.splitstandin(f)
1714 if lf is None:
1708 if lf is None:
1715 return False
1709 return False
1716 notbad.add(lf)
1710 notbad.add(lf)
1717 return origmatchfn(lf)
1711 return origmatchfn(lf)
1718
1712
1719 m.matchfn = lfmatchfn
1713 m.matchfn = lfmatchfn
1720 origbadfn = m.bad
1714 origbadfn = m.bad
1721
1715
1722 def lfbadfn(f, msg):
1716 def lfbadfn(f, msg):
1723 if not f in notbad:
1717 if not f in notbad:
1724 origbadfn(f, msg)
1718 origbadfn(f, msg)
1725
1719
1726 m.bad = lfbadfn
1720 m.bad = lfbadfn
1727
1721
1728 origvisitdirfn = m.visitdir
1722 origvisitdirfn = m.visitdir
1729
1723
1730 def lfvisitdirfn(dir):
1724 def lfvisitdirfn(dir):
1731 if dir == lfutil.shortname:
1725 if dir == lfutil.shortname:
1732 return True
1726 return True
1733 ret = origvisitdirfn(dir)
1727 ret = origvisitdirfn(dir)
1734 if ret:
1728 if ret:
1735 return ret
1729 return ret
1736 lf = lfutil.splitstandin(dir)
1730 lf = lfutil.splitstandin(dir)
1737 if lf is None:
1731 if lf is None:
1738 return False
1732 return False
1739 return origvisitdirfn(lf)
1733 return origvisitdirfn(lf)
1740
1734
1741 m.visitdir = lfvisitdirfn
1735 m.visitdir = lfvisitdirfn
1742
1736
1743 for f in ctx.walk(m):
1737 for f in ctx.walk(m):
1744 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1738 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1745 lf = lfutil.splitstandin(f)
1739 lf = lfutil.splitstandin(f)
1746 if lf is None or origmatchfn(f):
1740 if lf is None or origmatchfn(f):
1747 # duplicating unreachable code from commands.cat
1741 # duplicating unreachable code from commands.cat
1748 data = ctx[f].data()
1742 data = ctx[f].data()
1749 if opts.get(b'decode'):
1743 if opts.get(b'decode'):
1750 data = repo.wwritedata(f, data)
1744 data = repo.wwritedata(f, data)
1751 fp.write(data)
1745 fp.write(data)
1752 else:
1746 else:
1753 hash = lfutil.readasstandin(ctx[f])
1747 hash = lfutil.readasstandin(ctx[f])
1754 if not lfutil.inusercache(repo.ui, hash):
1748 if not lfutil.inusercache(repo.ui, hash):
1755 store = storefactory.openstore(repo)
1749 store = storefactory.openstore(repo)
1756 success, missing = store.get([(lf, hash)])
1750 success, missing = store.get([(lf, hash)])
1757 if len(success) != 1:
1751 if len(success) != 1:
1758 raise error.Abort(
1752 raise error.Abort(
1759 _(
1753 _(
1760 b'largefile %s is not in cache and could not be '
1754 b'largefile %s is not in cache and could not be '
1761 b'downloaded'
1755 b'downloaded'
1762 )
1756 )
1763 % lf
1757 % lf
1764 )
1758 )
1765 path = lfutil.usercachepath(repo.ui, hash)
1759 path = lfutil.usercachepath(repo.ui, hash)
1766 with open(path, b"rb") as fpin:
1760 with open(path, b"rb") as fpin:
1767 for chunk in util.filechunkiter(fpin):
1761 for chunk in util.filechunkiter(fpin):
1768 fp.write(chunk)
1762 fp.write(chunk)
1769 err = 0
1763 err = 0
1770 return err
1764 return err
1771
1765
1772
1766
1773 @eh.wrapfunction(merge, b'_update')
1767 @eh.wrapfunction(merge, b'_update')
1774 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1768 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1775 matcher = kwargs.get('matcher', None)
1769 matcher = kwargs.get('matcher', None)
1776 # note if this is a partial update
1770 # note if this is a partial update
1777 partial = matcher and not matcher.always()
1771 partial = matcher and not matcher.always()
1778 with repo.wlock():
1772 with repo.wlock():
1779 # branch | | |
1773 # branch | | |
1780 # merge | force | partial | action
1774 # merge | force | partial | action
1781 # -------+-------+---------+--------------
1775 # -------+-------+---------+--------------
1782 # x | x | x | linear-merge
1776 # x | x | x | linear-merge
1783 # o | x | x | branch-merge
1777 # o | x | x | branch-merge
1784 # x | o | x | overwrite (as clean update)
1778 # x | o | x | overwrite (as clean update)
1785 # o | o | x | force-branch-merge (*1)
1779 # o | o | x | force-branch-merge (*1)
1786 # x | x | o | (*)
1780 # x | x | o | (*)
1787 # o | x | o | (*)
1781 # o | x | o | (*)
1788 # x | o | o | overwrite (as revert)
1782 # x | o | o | overwrite (as revert)
1789 # o | o | o | (*)
1783 # o | o | o | (*)
1790 #
1784 #
1791 # (*) don't care
1785 # (*) don't care
1792 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1786 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1793
1787
1794 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1788 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1795 unsure, s, mtime_boundary = lfdirstate.status(
1789 unsure, s, mtime_boundary = lfdirstate.status(
1796 matchmod.always(),
1790 matchmod.always(),
1797 subrepos=[],
1791 subrepos=[],
1798 ignored=False,
1792 ignored=False,
1799 clean=True,
1793 clean=True,
1800 unknown=False,
1794 unknown=False,
1801 )
1795 )
1802 oldclean = set(s.clean)
1796 oldclean = set(s.clean)
1803 pctx = repo[b'.']
1797 pctx = repo[b'.']
1804 dctx = repo[node]
1798 dctx = repo[node]
1805 for lfile in unsure + s.modified:
1799 for lfile in unsure + s.modified:
1806 lfileabs = repo.wvfs.join(lfile)
1800 lfileabs = repo.wvfs.join(lfile)
1807 if not repo.wvfs.exists(lfileabs):
1801 if not repo.wvfs.exists(lfileabs):
1808 continue
1802 continue
1809 lfhash = lfutil.hashfile(lfileabs)
1803 lfhash = lfutil.hashfile(lfileabs)
1810 standin = lfutil.standin(lfile)
1804 standin = lfutil.standin(lfile)
1811 lfutil.writestandin(
1805 lfutil.writestandin(
1812 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1806 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1813 )
1807 )
1814 if standin in pctx and lfhash == lfutil.readasstandin(
1808 if standin in pctx and lfhash == lfutil.readasstandin(
1815 pctx[standin]
1809 pctx[standin]
1816 ):
1810 ):
1817 oldclean.add(lfile)
1811 oldclean.add(lfile)
1818 for lfile in s.added:
1812 for lfile in s.added:
1819 fstandin = lfutil.standin(lfile)
1813 fstandin = lfutil.standin(lfile)
1820 if fstandin not in dctx:
1814 if fstandin not in dctx:
1821 # in this case, content of standin file is meaningless
1815 # in this case, content of standin file is meaningless
1822 # (in dctx, lfile is unknown, or normal file)
1816 # (in dctx, lfile is unknown, or normal file)
1823 continue
1817 continue
1824 lfutil.updatestandin(repo, lfile, fstandin)
1818 lfutil.updatestandin(repo, lfile, fstandin)
1825 # mark all clean largefiles as dirty, just in case the update gets
1819 # mark all clean largefiles as dirty, just in case the update gets
1826 # interrupted before largefiles and lfdirstate are synchronized
1820 # interrupted before largefiles and lfdirstate are synchronized
1827 for lfile in oldclean:
1821 for lfile in oldclean:
1828 lfdirstate.set_possibly_dirty(lfile)
1822 lfdirstate.set_possibly_dirty(lfile)
1829 lfdirstate.write(repo.currenttransaction())
1823 lfdirstate.write(repo.currenttransaction())
1830
1824
1831 oldstandins = lfutil.getstandinsstate(repo)
1825 oldstandins = lfutil.getstandinsstate(repo)
1832 wc = kwargs.get('wc')
1826 wc = kwargs.get('wc')
1833 if wc and wc.isinmemory():
1827 if wc and wc.isinmemory():
1834 # largefiles is not a good candidate for in-memory merge (large
1828 # largefiles is not a good candidate for in-memory merge (large
1835 # files, custom dirstate, matcher usage).
1829 # files, custom dirstate, matcher usage).
1836 raise error.ProgrammingError(
1830 raise error.ProgrammingError(
1837 b'largefiles is not compatible with in-memory merge'
1831 b'largefiles is not compatible with in-memory merge'
1838 )
1832 )
1839 with lfdirstate.changing_parents(repo):
1833 with lfdirstate.changing_parents(repo):
1840 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1834 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1841
1835
1842 newstandins = lfutil.getstandinsstate(repo)
1836 newstandins = lfutil.getstandinsstate(repo)
1843 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1837 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1844
1838
1845 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1839 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1846 # all the ones that didn't change as clean
1840 # all the ones that didn't change as clean
1847 for lfile in oldclean.difference(filelist):
1841 for lfile in oldclean.difference(filelist):
1848 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1842 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1849 lfdirstate.write(repo.currenttransaction())
1843 lfdirstate.write(repo.currenttransaction())
1850
1844
1851 if branchmerge or force or partial:
1845 if branchmerge or force or partial:
1852 filelist.extend(s.deleted + s.removed)
1846 filelist.extend(s.deleted + s.removed)
1853
1847
1854 lfcommands.updatelfiles(
1848 lfcommands.updatelfiles(
1855 repo.ui, repo, filelist=filelist, normallookup=partial
1849 repo.ui, repo, filelist=filelist, normallookup=partial
1856 )
1850 )
1857
1851
1858 return result
1852 return result
1859
1853
1860
1854
1861 @eh.wrapfunction(scmutil, b'marktouched')
1855 @eh.wrapfunction(scmutil, b'marktouched')
1862 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1856 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1863 result = orig(repo, files, *args, **kwargs)
1857 result = orig(repo, files, *args, **kwargs)
1864
1858
1865 filelist = []
1859 filelist = []
1866 for f in files:
1860 for f in files:
1867 lf = lfutil.splitstandin(f)
1861 lf = lfutil.splitstandin(f)
1868 if lf is not None:
1862 if lf is not None:
1869 filelist.append(lf)
1863 filelist.append(lf)
1870 if filelist:
1864 if filelist:
1871 lfcommands.updatelfiles(
1865 lfcommands.updatelfiles(
1872 repo.ui,
1866 repo.ui,
1873 repo,
1867 repo,
1874 filelist=filelist,
1868 filelist=filelist,
1875 printmessage=False,
1869 printmessage=False,
1876 normallookup=True,
1870 normallookup=True,
1877 )
1871 )
1878
1872
1879 return result
1873 return result
1880
1874
1881
1875
1882 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1876 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1883 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1877 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1884 def upgraderequirements(orig, repo):
1878 def upgraderequirements(orig, repo):
1885 reqs = orig(repo)
1879 reqs = orig(repo)
1886 if b'largefiles' in repo.requirements:
1880 if b'largefiles' in repo.requirements:
1887 reqs.add(b'largefiles')
1881 reqs.add(b'largefiles')
1888 return reqs
1882 return reqs
1889
1883
1890
1884
1891 _lfscheme = b'largefile://'
1885 _lfscheme = b'largefile://'
1892
1886
1893
1887
1894 @eh.wrapfunction(urlmod, b'open')
1888 @eh.wrapfunction(urlmod, b'open')
1895 def openlargefile(orig, ui, url_, data=None, **kwargs):
1889 def openlargefile(orig, ui, url_, data=None, **kwargs):
1896 if url_.startswith(_lfscheme):
1890 if url_.startswith(_lfscheme):
1897 if data:
1891 if data:
1898 msg = b"cannot use data on a 'largefile://' url"
1892 msg = b"cannot use data on a 'largefile://' url"
1899 raise error.ProgrammingError(msg)
1893 raise error.ProgrammingError(msg)
1900 lfid = url_[len(_lfscheme) :]
1894 lfid = url_[len(_lfscheme) :]
1901 return storefactory.getlfile(ui, lfid)
1895 return storefactory.getlfile(ui, lfid)
1902 else:
1896 else:
1903 return orig(ui, url_, data=data, **kwargs)
1897 return orig(ui, url_, data=data, **kwargs)
General Comments 0
You need to be logged in to leave comments. Login now