##// END OF EJS Templates
largefiles: migrate `opts` to native kwargs
Matt Harbison -
r51773:ee393dbf default
parent child Browse files
Show More
@@ -1,675 +1,673 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import binascii
11 import binascii
12 import os
12 import os
13 import shutil
13 import shutil
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import (
16 from mercurial.node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 )
19 )
20
20
21 from mercurial import (
21 from mercurial import (
22 cmdutil,
22 cmdutil,
23 context,
23 context,
24 error,
24 error,
25 exthelper,
25 exthelper,
26 hg,
26 hg,
27 lock,
27 lock,
28 logcmdutil,
28 logcmdutil,
29 match as matchmod,
29 match as matchmod,
30 pycompat,
31 scmutil,
30 scmutil,
32 util,
31 util,
33 )
32 )
34 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
35
34
36 from ..convert import (
35 from ..convert import (
37 convcmd,
36 convcmd,
38 filemap,
37 filemap,
39 )
38 )
40
39
41 from . import lfutil, storefactory
40 from . import lfutil, storefactory
42
41
43 release = lock.release
42 release = lock.release
44
43
45 # -- Commands ----------------------------------------------------------
44 # -- Commands ----------------------------------------------------------
46
45
47 eh = exthelper.exthelper()
46 eh = exthelper.exthelper()
48
47
49
48
50 @eh.command(
49 @eh.command(
51 b'lfconvert',
50 b'lfconvert',
52 [
51 [
53 (
52 (
54 b's',
53 b's',
55 b'size',
54 b'size',
56 b'',
55 b'',
57 _(b'minimum size (MB) for files to be converted as largefiles'),
56 _(b'minimum size (MB) for files to be converted as largefiles'),
58 b'SIZE',
57 b'SIZE',
59 ),
58 ),
60 (
59 (
61 b'',
60 b'',
62 b'to-normal',
61 b'to-normal',
63 False,
62 False,
64 _(b'convert from a largefiles repo to a normal repo'),
63 _(b'convert from a largefiles repo to a normal repo'),
65 ),
64 ),
66 ],
65 ],
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
66 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 norepo=True,
67 norepo=True,
69 inferrepo=True,
68 inferrepo=True,
70 )
69 )
71 def lfconvert(ui, src, dest, *pats, **opts):
70 def lfconvert(ui, src, dest, *pats, **opts):
72 """convert a normal repository to a largefiles repository
71 """convert a normal repository to a largefiles repository
73
72
74 Convert repository SOURCE to a new repository DEST, identical to
73 Convert repository SOURCE to a new repository DEST, identical to
75 SOURCE except that certain files will be converted as largefiles:
74 SOURCE except that certain files will be converted as largefiles:
76 specifically, any file that matches any PATTERN *or* whose size is
75 specifically, any file that matches any PATTERN *or* whose size is
77 above the minimum size threshold is converted as a largefile. The
76 above the minimum size threshold is converted as a largefile. The
78 size used to determine whether or not to track a file as a
77 size used to determine whether or not to track a file as a
79 largefile is the size of the first version of the file. The
78 largefile is the size of the first version of the file. The
80 minimum size can be specified either with --size or in
79 minimum size can be specified either with --size or in
81 configuration as ``largefiles.size``.
80 configuration as ``largefiles.size``.
82
81
83 After running this command you will need to make sure that
82 After running this command you will need to make sure that
84 largefiles is enabled anywhere you intend to push the new
83 largefiles is enabled anywhere you intend to push the new
85 repository.
84 repository.
86
85
87 Use --to-normal to convert largefiles back to normal files; after
86 Use --to-normal to convert largefiles back to normal files; after
88 this, the DEST repository can be used without largefiles at all."""
87 this, the DEST repository can be used without largefiles at all."""
89
88
90 opts = pycompat.byteskwargs(opts)
89 if opts['to_normal']:
91 if opts[b'to_normal']:
92 tolfile = False
90 tolfile = False
93 else:
91 else:
94 tolfile = True
92 tolfile = True
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
93 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
96
94
97 if not hg.islocal(src):
95 if not hg.islocal(src):
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
96 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 if not hg.islocal(dest):
97 if not hg.islocal(dest):
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101
99
102 rsrc = hg.repository(ui, src)
100 rsrc = hg.repository(ui, src)
103 ui.status(_(b'initializing destination %s\n') % dest)
101 ui.status(_(b'initializing destination %s\n') % dest)
104 rdst = hg.repository(ui, dest, create=True)
102 rdst = hg.repository(ui, dest, create=True)
105
103
106 success = False
104 success = False
107 dstwlock = dstlock = None
105 dstwlock = dstlock = None
108 try:
106 try:
109 # Get a list of all changesets in the source. The easy way to do this
107 # Get a list of all changesets in the source. The easy way to do this
110 # is to simply walk the changelog, using changelog.nodesbetween().
108 # is to simply walk the changelog, using changelog.nodesbetween().
111 # Take a look at mercurial/revlog.py:639 for more details.
109 # Take a look at mercurial/revlog.py:639 for more details.
112 # Use a generator instead of a list to decrease memory usage
110 # Use a generator instead of a list to decrease memory usage
113 ctxs = (
111 ctxs = (
114 rsrc[ctx]
112 rsrc[ctx]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
113 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 )
114 )
117 revmap = {rsrc.nullid: rdst.nullid}
115 revmap = {rsrc.nullid: rdst.nullid}
118 if tolfile:
116 if tolfile:
119 # Lock destination to prevent modification while it is converted to.
117 # Lock destination to prevent modification while it is converted to.
120 # Don't need to lock src because we are just reading from its
118 # Don't need to lock src because we are just reading from its
121 # history which can't change.
119 # history which can't change.
122 dstwlock = rdst.wlock()
120 dstwlock = rdst.wlock()
123 dstlock = rdst.lock()
121 dstlock = rdst.lock()
124
122
125 lfiles = set()
123 lfiles = set()
126 normalfiles = set()
124 normalfiles = set()
127 if not pats:
125 if not pats:
128 pats = ui.configlist(lfutil.longname, b'patterns')
126 pats = ui.configlist(lfutil.longname, b'patterns')
129 if pats:
127 if pats:
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
128 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 else:
129 else:
132 matcher = None
130 matcher = None
133
131
134 lfiletohash = {}
132 lfiletohash = {}
135 with ui.makeprogress(
133 with ui.makeprogress(
136 _(b'converting revisions'),
134 _(b'converting revisions'),
137 unit=_(b'revisions'),
135 unit=_(b'revisions'),
138 total=rsrc[b'tip'].rev(),
136 total=rsrc[b'tip'].rev(),
139 ) as progress:
137 ) as progress:
140 for ctx in ctxs:
138 for ctx in ctxs:
141 progress.update(ctx.rev())
139 progress.update(ctx.rev())
142 _lfconvert_addchangeset(
140 _lfconvert_addchangeset(
143 rsrc,
141 rsrc,
144 rdst,
142 rdst,
145 ctx,
143 ctx,
146 revmap,
144 revmap,
147 lfiles,
145 lfiles,
148 normalfiles,
146 normalfiles,
149 matcher,
147 matcher,
150 size,
148 size,
151 lfiletohash,
149 lfiletohash,
152 )
150 )
153
151
154 if rdst.wvfs.exists(lfutil.shortname):
152 if rdst.wvfs.exists(lfutil.shortname):
155 rdst.wvfs.rmtree(lfutil.shortname)
153 rdst.wvfs.rmtree(lfutil.shortname)
156
154
157 for f in lfiletohash.keys():
155 for f in lfiletohash.keys():
158 if rdst.wvfs.isfile(f):
156 if rdst.wvfs.isfile(f):
159 rdst.wvfs.unlink(f)
157 rdst.wvfs.unlink(f)
160 try:
158 try:
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
159 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 except OSError:
160 except OSError:
163 pass
161 pass
164
162
165 # If there were any files converted to largefiles, add largefiles
163 # If there were any files converted to largefiles, add largefiles
166 # to the destination repository's requirements.
164 # to the destination repository's requirements.
167 if lfiles:
165 if lfiles:
168 rdst.requirements.add(b'largefiles')
166 rdst.requirements.add(b'largefiles')
169 scmutil.writereporequirements(rdst)
167 scmutil.writereporequirements(rdst)
170 else:
168 else:
171
169
172 class lfsource(filemap.filemap_source):
170 class lfsource(filemap.filemap_source):
173 def __init__(self, ui, source):
171 def __init__(self, ui, source):
174 super(lfsource, self).__init__(ui, source, None)
172 super(lfsource, self).__init__(ui, source, None)
175 self.filemapper.rename[lfutil.shortname] = b'.'
173 self.filemapper.rename[lfutil.shortname] = b'.'
176
174
177 def getfile(self, name, rev):
175 def getfile(self, name, rev):
178 realname, realrev = rev
176 realname, realrev = rev
179 f = super(lfsource, self).getfile(name, rev)
177 f = super(lfsource, self).getfile(name, rev)
180
178
181 if (
179 if (
182 not realname.startswith(lfutil.shortnameslash)
180 not realname.startswith(lfutil.shortnameslash)
183 or f[0] is None
181 or f[0] is None
184 ):
182 ):
185 return f
183 return f
186
184
187 # Substitute in the largefile data for the hash
185 # Substitute in the largefile data for the hash
188 hash = f[0].strip()
186 hash = f[0].strip()
189 path = lfutil.findfile(rsrc, hash)
187 path = lfutil.findfile(rsrc, hash)
190
188
191 if path is None:
189 if path is None:
192 raise error.Abort(
190 raise error.Abort(
193 _(b"missing largefile for '%s' in %s")
191 _(b"missing largefile for '%s' in %s")
194 % (realname, realrev)
192 % (realname, realrev)
195 )
193 )
196 return util.readfile(path), f[1]
194 return util.readfile(path), f[1]
197
195
198 class converter(convcmd.converter):
196 class converter(convcmd.converter):
199 def __init__(self, ui, source, dest, revmapfile, opts):
197 def __init__(self, ui, source, dest, revmapfile, opts):
200 src = lfsource(ui, source)
198 src = lfsource(ui, source)
201
199
202 super(converter, self).__init__(
200 super(converter, self).__init__(
203 ui, src, dest, revmapfile, opts
201 ui, src, dest, revmapfile, opts
204 )
202 )
205
203
206 found, missing = downloadlfiles(ui, rsrc)
204 found, missing = downloadlfiles(ui, rsrc)
207 if missing != 0:
205 if missing != 0:
208 raise error.Abort(_(b"all largefiles must be present locally"))
206 raise error.Abort(_(b"all largefiles must be present locally"))
209
207
210 orig = convcmd.converter
208 orig = convcmd.converter
211 convcmd.converter = converter
209 convcmd.converter = converter
212
210
213 try:
211 try:
214 convcmd.convert(
212 convcmd.convert(
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
213 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 )
214 )
217 finally:
215 finally:
218 convcmd.converter = orig
216 convcmd.converter = orig
219 success = True
217 success = True
220 finally:
218 finally:
221 if tolfile:
219 if tolfile:
222 # XXX is this the right context semantically ?
220 # XXX is this the right context semantically ?
223 with rdst.dirstate.changing_parents(rdst):
221 with rdst.dirstate.changing_parents(rdst):
224 rdst.dirstate.clear()
222 rdst.dirstate.clear()
225 release(dstlock, dstwlock)
223 release(dstlock, dstwlock)
226 if not success:
224 if not success:
227 # we failed, remove the new directory
225 # we failed, remove the new directory
228 shutil.rmtree(rdst.root)
226 shutil.rmtree(rdst.root)
229
227
230
228
231 def _lfconvert_addchangeset(
229 def _lfconvert_addchangeset(
232 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
233 ):
231 ):
234 # Convert src parents to dst parents
232 # Convert src parents to dst parents
235 parents = _convertparents(ctx, revmap)
233 parents = _convertparents(ctx, revmap)
236
234
237 # Generate list of changed files
235 # Generate list of changed files
238 files = _getchangedfiles(ctx, parents)
236 files = _getchangedfiles(ctx, parents)
239
237
240 dstfiles = []
238 dstfiles = []
241 for f in files:
239 for f in files:
242 if f not in lfiles and f not in normalfiles:
240 if f not in lfiles and f not in normalfiles:
243 islfile = _islfile(f, ctx, matcher, size)
241 islfile = _islfile(f, ctx, matcher, size)
244 # If this file was renamed or copied then copy
242 # If this file was renamed or copied then copy
245 # the largefile-ness of its predecessor
243 # the largefile-ness of its predecessor
246 if f in ctx.manifest():
244 if f in ctx.manifest():
247 fctx = ctx.filectx(f)
245 fctx = ctx.filectx(f)
248 renamed = fctx.copysource()
246 renamed = fctx.copysource()
249 if renamed is None:
247 if renamed is None:
250 # the code below assumes renamed to be a boolean or a list
248 # the code below assumes renamed to be a boolean or a list
251 # and won't quite work with the value None
249 # and won't quite work with the value None
252 renamed = False
250 renamed = False
253 renamedlfile = renamed and renamed in lfiles
251 renamedlfile = renamed and renamed in lfiles
254 islfile |= renamedlfile
252 islfile |= renamedlfile
255 if b'l' in fctx.flags():
253 if b'l' in fctx.flags():
256 if renamedlfile:
254 if renamedlfile:
257 raise error.Abort(
255 raise error.Abort(
258 _(b'renamed/copied largefile %s becomes symlink')
256 _(b'renamed/copied largefile %s becomes symlink')
259 % f
257 % f
260 )
258 )
261 islfile = False
259 islfile = False
262 if islfile:
260 if islfile:
263 lfiles.add(f)
261 lfiles.add(f)
264 else:
262 else:
265 normalfiles.add(f)
263 normalfiles.add(f)
266
264
267 if f in lfiles:
265 if f in lfiles:
268 fstandin = lfutil.standin(f)
266 fstandin = lfutil.standin(f)
269 dstfiles.append(fstandin)
267 dstfiles.append(fstandin)
270 # largefile in manifest if it has not been removed/renamed
268 # largefile in manifest if it has not been removed/renamed
271 if f in ctx.manifest():
269 if f in ctx.manifest():
272 fctx = ctx.filectx(f)
270 fctx = ctx.filectx(f)
273 if b'l' in fctx.flags():
271 if b'l' in fctx.flags():
274 renamed = fctx.copysource()
272 renamed = fctx.copysource()
275 if renamed and renamed in lfiles:
273 if renamed and renamed in lfiles:
276 raise error.Abort(
274 raise error.Abort(
277 _(b'largefile %s becomes symlink') % f
275 _(b'largefile %s becomes symlink') % f
278 )
276 )
279
277
280 # largefile was modified, update standins
278 # largefile was modified, update standins
281 m = hashutil.sha1(b'')
279 m = hashutil.sha1(b'')
282 m.update(ctx[f].data())
280 m.update(ctx[f].data())
283 hash = hex(m.digest())
281 hash = hex(m.digest())
284 if f not in lfiletohash or lfiletohash[f] != hash:
282 if f not in lfiletohash or lfiletohash[f] != hash:
285 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
286 executable = b'x' in ctx[f].flags()
284 executable = b'x' in ctx[f].flags()
287 lfutil.writestandin(rdst, fstandin, hash, executable)
285 lfutil.writestandin(rdst, fstandin, hash, executable)
288 lfiletohash[f] = hash
286 lfiletohash[f] = hash
289 else:
287 else:
290 # normal file
288 # normal file
291 dstfiles.append(f)
289 dstfiles.append(f)
292
290
293 def getfilectx(repo, memctx, f):
291 def getfilectx(repo, memctx, f):
294 srcfname = lfutil.splitstandin(f)
292 srcfname = lfutil.splitstandin(f)
295 if srcfname is not None:
293 if srcfname is not None:
296 # if the file isn't in the manifest then it was removed
294 # if the file isn't in the manifest then it was removed
297 # or renamed, return None to indicate this
295 # or renamed, return None to indicate this
298 try:
296 try:
299 fctx = ctx.filectx(srcfname)
297 fctx = ctx.filectx(srcfname)
300 except error.LookupError:
298 except error.LookupError:
301 return None
299 return None
302 renamed = fctx.copysource()
300 renamed = fctx.copysource()
303 if renamed:
301 if renamed:
304 # standin is always a largefile because largefile-ness
302 # standin is always a largefile because largefile-ness
305 # doesn't change after rename or copy
303 # doesn't change after rename or copy
306 renamed = lfutil.standin(renamed)
304 renamed = lfutil.standin(renamed)
307
305
308 return context.memfilectx(
306 return context.memfilectx(
309 repo,
307 repo,
310 memctx,
308 memctx,
311 f,
309 f,
312 lfiletohash[srcfname] + b'\n',
310 lfiletohash[srcfname] + b'\n',
313 b'l' in fctx.flags(),
311 b'l' in fctx.flags(),
314 b'x' in fctx.flags(),
312 b'x' in fctx.flags(),
315 renamed,
313 renamed,
316 )
314 )
317 else:
315 else:
318 return _getnormalcontext(repo, ctx, f, revmap)
316 return _getnormalcontext(repo, ctx, f, revmap)
319
317
320 # Commit
318 # Commit
321 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
322
320
323
321
324 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
325 mctx = context.memctx(
323 mctx = context.memctx(
326 rdst,
324 rdst,
327 parents,
325 parents,
328 ctx.description(),
326 ctx.description(),
329 dstfiles,
327 dstfiles,
330 getfilectx,
328 getfilectx,
331 ctx.user(),
329 ctx.user(),
332 ctx.date(),
330 ctx.date(),
333 ctx.extra(),
331 ctx.extra(),
334 )
332 )
335 ret = rdst.commitctx(mctx)
333 ret = rdst.commitctx(mctx)
336 lfutil.copyalltostore(rdst, ret)
334 lfutil.copyalltostore(rdst, ret)
337 rdst.setparents(ret)
335 rdst.setparents(ret)
338 revmap[ctx.node()] = rdst.changelog.tip()
336 revmap[ctx.node()] = rdst.changelog.tip()
339
337
340
338
341 # Generate list of changed files
339 # Generate list of changed files
342 def _getchangedfiles(ctx, parents):
340 def _getchangedfiles(ctx, parents):
343 files = set(ctx.files())
341 files = set(ctx.files())
344 if ctx.repo().nullid not in parents:
342 if ctx.repo().nullid not in parents:
345 mc = ctx.manifest()
343 mc = ctx.manifest()
346 for pctx in ctx.parents():
344 for pctx in ctx.parents():
347 for fn in pctx.manifest().diff(mc):
345 for fn in pctx.manifest().diff(mc):
348 files.add(fn)
346 files.add(fn)
349 return files
347 return files
350
348
351
349
352 # Convert src parents to dst parents
350 # Convert src parents to dst parents
353 def _convertparents(ctx, revmap):
351 def _convertparents(ctx, revmap):
354 parents = []
352 parents = []
355 for p in ctx.parents():
353 for p in ctx.parents():
356 parents.append(revmap[p.node()])
354 parents.append(revmap[p.node()])
357 while len(parents) < 2:
355 while len(parents) < 2:
358 parents.append(ctx.repo().nullid)
356 parents.append(ctx.repo().nullid)
359 return parents
357 return parents
360
358
361
359
362 # Get memfilectx for a normal file
360 # Get memfilectx for a normal file
363 def _getnormalcontext(repo, ctx, f, revmap):
361 def _getnormalcontext(repo, ctx, f, revmap):
364 try:
362 try:
365 fctx = ctx.filectx(f)
363 fctx = ctx.filectx(f)
366 except error.LookupError:
364 except error.LookupError:
367 return None
365 return None
368 renamed = fctx.copysource()
366 renamed = fctx.copysource()
369
367
370 data = fctx.data()
368 data = fctx.data()
371 if f == b'.hgtags':
369 if f == b'.hgtags':
372 data = _converttags(repo.ui, revmap, data)
370 data = _converttags(repo.ui, revmap, data)
373 return context.memfilectx(
371 return context.memfilectx(
374 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
375 )
373 )
376
374
377
375
378 # Remap tag data using a revision map
376 # Remap tag data using a revision map
379 def _converttags(ui, revmap, data):
377 def _converttags(ui, revmap, data):
380 newdata = []
378 newdata = []
381 for line in data.splitlines():
379 for line in data.splitlines():
382 try:
380 try:
383 id, name = line.split(b' ', 1)
381 id, name = line.split(b' ', 1)
384 except ValueError:
382 except ValueError:
385 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
386 continue
384 continue
387 try:
385 try:
388 newid = bin(id)
386 newid = bin(id)
389 except binascii.Error:
387 except binascii.Error:
390 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
391 continue
389 continue
392 try:
390 try:
393 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
394 except KeyError:
392 except KeyError:
395 ui.warn(_(b'no mapping for id %s\n') % id)
393 ui.warn(_(b'no mapping for id %s\n') % id)
396 continue
394 continue
397 return b''.join(newdata)
395 return b''.join(newdata)
398
396
399
397
400 def _islfile(file, ctx, matcher, size):
398 def _islfile(file, ctx, matcher, size):
401 """Return true if file should be considered a largefile, i.e.
399 """Return true if file should be considered a largefile, i.e.
402 matcher matches it or it is larger than size."""
400 matcher matches it or it is larger than size."""
403 # never store special .hg* files as largefiles
401 # never store special .hg* files as largefiles
404 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
405 return False
403 return False
406 if matcher and matcher(file):
404 if matcher and matcher(file):
407 return True
405 return True
408 try:
406 try:
409 return ctx.filectx(file).size() >= size * 1024 * 1024
407 return ctx.filectx(file).size() >= size * 1024 * 1024
410 except error.LookupError:
408 except error.LookupError:
411 return False
409 return False
412
410
413
411
414 def uploadlfiles(ui, rsrc, rdst, files):
412 def uploadlfiles(ui, rsrc, rdst, files):
415 '''upload largefiles to the central store'''
413 '''upload largefiles to the central store'''
416
414
417 if not files:
415 if not files:
418 return
416 return
419
417
420 store = storefactory.openstore(rsrc, rdst, put=True)
418 store = storefactory.openstore(rsrc, rdst, put=True)
421
419
422 at = 0
420 at = 0
423 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
424 retval = store.exists(files)
422 retval = store.exists(files)
425 files = [h for h in files if not retval[h]]
423 files = [h for h in files if not retval[h]]
426 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
427
425
428 with ui.makeprogress(
426 with ui.makeprogress(
429 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
430 ) as progress:
428 ) as progress:
431 for hash in files:
429 for hash in files:
432 progress.update(at)
430 progress.update(at)
433 source = lfutil.findfile(rsrc, hash)
431 source = lfutil.findfile(rsrc, hash)
434 if not source:
432 if not source:
435 raise error.Abort(
433 raise error.Abort(
436 _(
434 _(
437 b'largefile %s missing from store'
435 b'largefile %s missing from store'
438 b' (needs to be uploaded)'
436 b' (needs to be uploaded)'
439 )
437 )
440 % hash
438 % hash
441 )
439 )
442 # XXX check for errors here
440 # XXX check for errors here
443 store.put(source, hash)
441 store.put(source, hash)
444 at += 1
442 at += 1
445
443
446
444
447 def verifylfiles(ui, repo, all=False, contents=False):
445 def verifylfiles(ui, repo, all=False, contents=False):
448 """Verify that every largefile revision in the current changeset
446 """Verify that every largefile revision in the current changeset
449 exists in the central store. With --contents, also verify that
447 exists in the central store. With --contents, also verify that
450 the contents of each local largefile file revision are correct (SHA-1 hash
448 the contents of each local largefile file revision are correct (SHA-1 hash
451 matches the revision ID). With --all, check every changeset in
449 matches the revision ID). With --all, check every changeset in
452 this repository."""
450 this repository."""
453 if all:
451 if all:
454 revs = repo.revs(b'all()')
452 revs = repo.revs(b'all()')
455 else:
453 else:
456 revs = [b'.']
454 revs = [b'.']
457
455
458 store = storefactory.openstore(repo)
456 store = storefactory.openstore(repo)
459 return store.verify(revs, contents=contents)
457 return store.verify(revs, contents=contents)
460
458
461
459
462 def cachelfiles(ui, repo, node, filelist=None):
460 def cachelfiles(ui, repo, node, filelist=None):
463 """cachelfiles ensures that all largefiles needed by the specified revision
461 """cachelfiles ensures that all largefiles needed by the specified revision
464 are present in the repository's largefile cache.
462 are present in the repository's largefile cache.
465
463
466 returns a tuple (cached, missing). cached is the list of files downloaded
464 returns a tuple (cached, missing). cached is the list of files downloaded
467 by this operation; missing is the list of files that were needed but could
465 by this operation; missing is the list of files that were needed but could
468 not be found."""
466 not be found."""
469 lfiles = lfutil.listlfiles(repo, node)
467 lfiles = lfutil.listlfiles(repo, node)
470 if filelist:
468 if filelist:
471 lfiles = set(lfiles) & set(filelist)
469 lfiles = set(lfiles) & set(filelist)
472 toget = []
470 toget = []
473
471
474 ctx = repo[node]
472 ctx = repo[node]
475 for lfile in lfiles:
473 for lfile in lfiles:
476 try:
474 try:
477 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
478 except FileNotFoundError:
476 except FileNotFoundError:
479 continue # node must be None and standin wasn't found in wctx
477 continue # node must be None and standin wasn't found in wctx
480 if not lfutil.findfile(repo, expectedhash):
478 if not lfutil.findfile(repo, expectedhash):
481 toget.append((lfile, expectedhash))
479 toget.append((lfile, expectedhash))
482
480
483 if toget:
481 if toget:
484 store = storefactory.openstore(repo)
482 store = storefactory.openstore(repo)
485 ret = store.get(toget)
483 ret = store.get(toget)
486 return ret
484 return ret
487
485
488 return ([], [])
486 return ([], [])
489
487
490
488
491 def downloadlfiles(ui, repo):
489 def downloadlfiles(ui, repo):
492 tonode = repo.changelog.node
490 tonode = repo.changelog.node
493 totalsuccess = 0
491 totalsuccess = 0
494 totalmissing = 0
492 totalmissing = 0
495 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
493 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
496 success, missing = cachelfiles(ui, repo, tonode(rev))
494 success, missing = cachelfiles(ui, repo, tonode(rev))
497 totalsuccess += len(success)
495 totalsuccess += len(success)
498 totalmissing += len(missing)
496 totalmissing += len(missing)
499 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
497 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
500 if totalmissing > 0:
498 if totalmissing > 0:
501 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
499 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
502 return totalsuccess, totalmissing
500 return totalsuccess, totalmissing
503
501
504
502
505 def updatelfiles(
503 def updatelfiles(
506 ui, repo, filelist=None, printmessage=None, normallookup=False
504 ui, repo, filelist=None, printmessage=None, normallookup=False
507 ):
505 ):
508 """Update largefiles according to standins in the working directory
506 """Update largefiles according to standins in the working directory
509
507
510 If ``printmessage`` is other than ``None``, it means "print (or
508 If ``printmessage`` is other than ``None``, it means "print (or
511 ignore, for false) message forcibly".
509 ignore, for false) message forcibly".
512 """
510 """
513 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
511 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
514 with repo.wlock():
512 with repo.wlock():
515 lfdirstate = lfutil.openlfdirstate(ui, repo)
513 lfdirstate = lfutil.openlfdirstate(ui, repo)
516 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
514 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
517
515
518 if filelist is not None:
516 if filelist is not None:
519 filelist = set(filelist)
517 filelist = set(filelist)
520 lfiles = [f for f in lfiles if f in filelist]
518 lfiles = [f for f in lfiles if f in filelist]
521
519
522 update = {}
520 update = {}
523 dropped = set()
521 dropped = set()
524 updated, removed = 0, 0
522 updated, removed = 0, 0
525 wvfs = repo.wvfs
523 wvfs = repo.wvfs
526 wctx = repo[None]
524 wctx = repo[None]
527 for lfile in lfiles:
525 for lfile in lfiles:
528 lfileorig = os.path.relpath(
526 lfileorig = os.path.relpath(
529 scmutil.backuppath(ui, repo, lfile), start=repo.root
527 scmutil.backuppath(ui, repo, lfile), start=repo.root
530 )
528 )
531 standin = lfutil.standin(lfile)
529 standin = lfutil.standin(lfile)
532 standinorig = os.path.relpath(
530 standinorig = os.path.relpath(
533 scmutil.backuppath(ui, repo, standin), start=repo.root
531 scmutil.backuppath(ui, repo, standin), start=repo.root
534 )
532 )
535 if wvfs.exists(standin):
533 if wvfs.exists(standin):
536 if wvfs.exists(standinorig) and wvfs.exists(lfile):
534 if wvfs.exists(standinorig) and wvfs.exists(lfile):
537 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
535 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
538 wvfs.unlinkpath(standinorig)
536 wvfs.unlinkpath(standinorig)
539 expecthash = lfutil.readasstandin(wctx[standin])
537 expecthash = lfutil.readasstandin(wctx[standin])
540 if expecthash != b'':
538 if expecthash != b'':
541 if lfile not in wctx: # not switched to normal file
539 if lfile not in wctx: # not switched to normal file
542 if repo.dirstate.get_entry(standin).any_tracked:
540 if repo.dirstate.get_entry(standin).any_tracked:
543 wvfs.unlinkpath(lfile, ignoremissing=True)
541 wvfs.unlinkpath(lfile, ignoremissing=True)
544 else:
542 else:
545 dropped.add(lfile)
543 dropped.add(lfile)
546
544
547 # allocate an entry in largefiles dirstate to prevent
545 # allocate an entry in largefiles dirstate to prevent
548 # lfilesrepo.status() from reporting missing files as
546 # lfilesrepo.status() from reporting missing files as
549 # removed.
547 # removed.
550 lfdirstate.hacky_extension_update_file(
548 lfdirstate.hacky_extension_update_file(
551 lfile,
549 lfile,
552 p1_tracked=True,
550 p1_tracked=True,
553 wc_tracked=True,
551 wc_tracked=True,
554 possibly_dirty=True,
552 possibly_dirty=True,
555 )
553 )
556 update[lfile] = expecthash
554 update[lfile] = expecthash
557 else:
555 else:
558 # Remove lfiles for which the standin is deleted, unless the
556 # Remove lfiles for which the standin is deleted, unless the
559 # lfile is added to the repository again. This happens when a
557 # lfile is added to the repository again. This happens when a
560 # largefile is converted back to a normal file: the standin
558 # largefile is converted back to a normal file: the standin
561 # disappears, but a new (normal) file appears as the lfile.
559 # disappears, but a new (normal) file appears as the lfile.
562 if (
560 if (
563 wvfs.exists(lfile)
561 wvfs.exists(lfile)
564 and repo.dirstate.normalize(lfile) not in wctx
562 and repo.dirstate.normalize(lfile) not in wctx
565 ):
563 ):
566 wvfs.unlinkpath(lfile)
564 wvfs.unlinkpath(lfile)
567 removed += 1
565 removed += 1
568
566
569 # largefile processing might be slow and be interrupted - be prepared
567 # largefile processing might be slow and be interrupted - be prepared
570 lfdirstate.write(repo.currenttransaction())
568 lfdirstate.write(repo.currenttransaction())
571
569
572 if lfiles:
570 if lfiles:
573 lfiles = [f for f in lfiles if f not in dropped]
571 lfiles = [f for f in lfiles if f not in dropped]
574
572
575 for f in dropped:
573 for f in dropped:
576 repo.wvfs.unlinkpath(lfutil.standin(f))
574 repo.wvfs.unlinkpath(lfutil.standin(f))
577 # This needs to happen for dropped files, otherwise they stay in
575 # This needs to happen for dropped files, otherwise they stay in
578 # the M state.
576 # the M state.
579 lfdirstate._map.reset_state(f)
577 lfdirstate._map.reset_state(f)
580
578
581 statuswriter(_(b'getting changed largefiles\n'))
579 statuswriter(_(b'getting changed largefiles\n'))
582 cachelfiles(ui, repo, None, lfiles)
580 cachelfiles(ui, repo, None, lfiles)
583
581
584 for lfile in lfiles:
582 for lfile in lfiles:
585 update1 = 0
583 update1 = 0
586
584
587 expecthash = update.get(lfile)
585 expecthash = update.get(lfile)
588 if expecthash:
586 if expecthash:
589 if not lfutil.copyfromcache(repo, expecthash, lfile):
587 if not lfutil.copyfromcache(repo, expecthash, lfile):
590 # failed ... but already removed and set to normallookup
588 # failed ... but already removed and set to normallookup
591 continue
589 continue
592 # Synchronize largefile dirstate to the last modified
590 # Synchronize largefile dirstate to the last modified
593 # time of the file
591 # time of the file
594 lfdirstate.hacky_extension_update_file(
592 lfdirstate.hacky_extension_update_file(
595 lfile,
593 lfile,
596 p1_tracked=True,
594 p1_tracked=True,
597 wc_tracked=True,
595 wc_tracked=True,
598 )
596 )
599 update1 = 1
597 update1 = 1
600
598
601 # copy the exec mode of largefile standin from the repository's
599 # copy the exec mode of largefile standin from the repository's
602 # dirstate to its state in the lfdirstate.
600 # dirstate to its state in the lfdirstate.
603 standin = lfutil.standin(lfile)
601 standin = lfutil.standin(lfile)
604 if wvfs.exists(standin):
602 if wvfs.exists(standin):
605 # exec is decided by the users permissions using mask 0o100
603 # exec is decided by the users permissions using mask 0o100
606 standinexec = wvfs.stat(standin).st_mode & 0o100
604 standinexec = wvfs.stat(standin).st_mode & 0o100
607 st = wvfs.stat(lfile)
605 st = wvfs.stat(lfile)
608 mode = st.st_mode
606 mode = st.st_mode
609 if standinexec != mode & 0o100:
607 if standinexec != mode & 0o100:
610 # first remove all X bits, then shift all R bits to X
608 # first remove all X bits, then shift all R bits to X
611 mode &= ~0o111
609 mode &= ~0o111
612 if standinexec:
610 if standinexec:
613 mode |= (mode >> 2) & 0o111 & ~util.umask
611 mode |= (mode >> 2) & 0o111 & ~util.umask
614 wvfs.chmod(lfile, mode)
612 wvfs.chmod(lfile, mode)
615 update1 = 1
613 update1 = 1
616
614
617 updated += update1
615 updated += update1
618
616
619 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
617 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620
618
621 lfdirstate.write(repo.currenttransaction())
619 lfdirstate.write(repo.currenttransaction())
622 if lfiles:
620 if lfiles:
623 statuswriter(
621 statuswriter(
624 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
622 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
625 )
623 )
626
624
627
625
628 @eh.command(
626 @eh.command(
629 b'lfpull',
627 b'lfpull',
630 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
628 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
631 + cmdutil.remoteopts,
629 + cmdutil.remoteopts,
632 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
630 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
633 )
631 )
634 def lfpull(ui, repo, source=b"default", **opts):
632 def lfpull(ui, repo, source=b"default", **opts):
635 """pull largefiles for the specified revisions from the specified source
633 """pull largefiles for the specified revisions from the specified source
636
634
637 Pull largefiles that are referenced from local changesets but missing
635 Pull largefiles that are referenced from local changesets but missing
638 locally, pulling from a remote repository to the local cache.
636 locally, pulling from a remote repository to the local cache.
639
637
640 If SOURCE is omitted, the 'default' path will be used.
638 If SOURCE is omitted, the 'default' path will be used.
641 See :hg:`help urls` for more information.
639 See :hg:`help urls` for more information.
642
640
643 .. container:: verbose
641 .. container:: verbose
644
642
645 Some examples:
643 Some examples:
646
644
647 - pull largefiles for all branch heads::
645 - pull largefiles for all branch heads::
648
646
649 hg lfpull -r "head() and not closed()"
647 hg lfpull -r "head() and not closed()"
650
648
651 - pull largefiles on the default branch::
649 - pull largefiles on the default branch::
652
650
653 hg lfpull -r "branch(default)"
651 hg lfpull -r "branch(default)"
654 """
652 """
655 repo.lfpullsource = source
653 repo.lfpullsource = source
656
654
657 revs = opts.get('rev', [])
655 revs = opts.get('rev', [])
658 if not revs:
656 if not revs:
659 raise error.Abort(_(b'no revisions specified'))
657 raise error.Abort(_(b'no revisions specified'))
660 revs = logcmdutil.revrange(repo, revs)
658 revs = logcmdutil.revrange(repo, revs)
661
659
662 numcached = 0
660 numcached = 0
663 for rev in revs:
661 for rev in revs:
664 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
662 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
665 (cached, missing) = cachelfiles(ui, repo, rev)
663 (cached, missing) = cachelfiles(ui, repo, rev)
666 numcached += len(cached)
664 numcached += len(cached)
667 ui.status(_(b"%d largefiles cached\n") % numcached)
665 ui.status(_(b"%d largefiles cached\n") % numcached)
668
666
669
667
670 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
668 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
671 def debuglfput(ui, repo, filepath, **kwargs):
669 def debuglfput(ui, repo, filepath, **kwargs):
672 hash = lfutil.hashfile(filepath)
670 hash = lfutil.hashfile(filepath)
673 storefactory.openstore(repo).put(filepath, hash)
671 storefactory.openstore(repo).put(filepath, hash)
674 ui.write(b'%s\n' % hash)
672 ui.write(b'%s\n' % hash)
675 return 0
673 return 0
@@ -1,1925 +1,1924 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 dirstate,
25 dirstate,
26 error,
26 error,
27 exchange,
27 exchange,
28 extensions,
28 extensions,
29 exthelper,
29 exthelper,
30 filemerge,
30 filemerge,
31 hg,
31 hg,
32 logcmdutil,
32 logcmdutil,
33 match as matchmod,
33 match as matchmod,
34 merge,
34 merge,
35 mergestate as mergestatemod,
35 mergestate as mergestatemod,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 smartset,
39 smartset,
40 subrepo,
40 subrepo,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from mercurial.upgrade_utils import (
45 from mercurial.upgrade_utils import (
46 actions as upgrade_actions,
46 actions as upgrade_actions,
47 )
47 )
48
48
49 from . import (
49 from . import (
50 lfcommands,
50 lfcommands,
51 lfutil,
51 lfutil,
52 storefactory,
52 storefactory,
53 )
53 )
54
54
55 ACTION_ADD = mergestatemod.ACTION_ADD
55 ACTION_ADD = mergestatemod.ACTION_ADD
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
57 ACTION_GET = mergestatemod.ACTION_GET
57 ACTION_GET = mergestatemod.ACTION_GET
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
60
60
61 eh = exthelper.exthelper()
61 eh = exthelper.exthelper()
62
62
63 lfstatus = lfutil.lfstatus
63 lfstatus = lfutil.lfstatus
64
64
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
66
66
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
68
68
69
69
70 def composelargefilematcher(match, manifest):
70 def composelargefilematcher(match, manifest):
71 """create a matcher that matches only the largefiles in the original
71 """create a matcher that matches only the largefiles in the original
72 matcher"""
72 matcher"""
73 m = copy.copy(match)
73 m = copy.copy(match)
74 lfile = lambda f: lfutil.standin(f) in manifest
74 lfile = lambda f: lfutil.standin(f) in manifest
75 m._files = [lf for lf in m._files if lfile(lf)]
75 m._files = [lf for lf in m._files if lfile(lf)]
76 m._fileset = set(m._files)
76 m._fileset = set(m._files)
77 m.always = lambda: False
77 m.always = lambda: False
78 origmatchfn = m.matchfn
78 origmatchfn = m.matchfn
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
80 return m
80 return m
81
81
82
82
83 def composenormalfilematcher(match, manifest, exclude=None):
83 def composenormalfilematcher(match, manifest, exclude=None):
84 excluded = set()
84 excluded = set()
85 if exclude is not None:
85 if exclude is not None:
86 excluded.update(exclude)
86 excluded.update(exclude)
87
87
88 m = copy.copy(match)
88 m = copy.copy(match)
89 notlfile = lambda f: not (
89 notlfile = lambda f: not (
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
91 )
91 )
92 m._files = [lf for lf in m._files if notlfile(lf)]
92 m._files = [lf for lf in m._files if notlfile(lf)]
93 m._fileset = set(m._files)
93 m._fileset = set(m._files)
94 m.always = lambda: False
94 m.always = lambda: False
95 origmatchfn = m.matchfn
95 origmatchfn = m.matchfn
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
97 return m
97 return m
98
98
99
99
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
101 large = opts.get('large')
101 large = opts.get('large')
102 lfsize = lfutil.getminsize(
102 lfsize = lfutil.getminsize(
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
104 )
104 )
105
105
106 lfmatcher = None
106 lfmatcher = None
107 if lfutil.islfilesrepo(repo):
107 if lfutil.islfilesrepo(repo):
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
109 if lfpats:
109 if lfpats:
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
111
111
112 lfnames = []
112 lfnames = []
113 m = matcher
113 m = matcher
114
114
115 wctx = repo[None]
115 wctx = repo[None]
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
117 exact = m.exact(f)
117 exact = m.exact(f)
118 lfile = lfutil.standin(f) in wctx
118 lfile = lfutil.standin(f) in wctx
119 nfile = f in wctx
119 nfile = f in wctx
120 exists = lfile or nfile
120 exists = lfile or nfile
121
121
122 # Don't warn the user when they attempt to add a normal tracked file.
122 # Don't warn the user when they attempt to add a normal tracked file.
123 # The normal add code will do that for us.
123 # The normal add code will do that for us.
124 if exact and exists:
124 if exact and exists:
125 if lfile:
125 if lfile:
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
127 continue
127 continue
128
128
129 if (exact or not exists) and not lfutil.isstandin(f):
129 if (exact or not exists) and not lfutil.isstandin(f):
130 # In case the file was removed previously, but not committed
130 # In case the file was removed previously, but not committed
131 # (issue3507)
131 # (issue3507)
132 if not repo.wvfs.exists(f):
132 if not repo.wvfs.exists(f):
133 continue
133 continue
134
134
135 abovemin = (
135 abovemin = (
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
137 )
137 )
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
139 lfnames.append(f)
139 lfnames.append(f)
140 if ui.verbose or not exact:
140 if ui.verbose or not exact:
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
142
142
143 bad = []
143 bad = []
144
144
145 # Need to lock, otherwise there could be a race condition between
145 # Need to lock, otherwise there could be a race condition between
146 # when standins are created and added to the repo.
146 # when standins are created and added to the repo.
147 with repo.wlock():
147 with repo.wlock():
148 if not opts.get('dry_run'):
148 if not opts.get('dry_run'):
149 standins = []
149 standins = []
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
151 for f in lfnames:
151 for f in lfnames:
152 standinname = lfutil.standin(f)
152 standinname = lfutil.standin(f)
153 lfutil.writestandin(
153 lfutil.writestandin(
154 repo,
154 repo,
155 standinname,
155 standinname,
156 hash=b'',
156 hash=b'',
157 executable=lfutil.getexecutable(repo.wjoin(f)),
157 executable=lfutil.getexecutable(repo.wjoin(f)),
158 )
158 )
159 standins.append(standinname)
159 standins.append(standinname)
160 lfdirstate.set_tracked(f)
160 lfdirstate.set_tracked(f)
161 lfdirstate.write(repo.currenttransaction())
161 lfdirstate.write(repo.currenttransaction())
162 bad += [
162 bad += [
163 lfutil.splitstandin(f)
163 lfutil.splitstandin(f)
164 for f in repo[None].add(standins)
164 for f in repo[None].add(standins)
165 if f in m.files()
165 if f in m.files()
166 ]
166 ]
167
167
168 added = [f for f in lfnames if f not in bad]
168 added = [f for f in lfnames if f not in bad]
169 return added, bad
169 return added, bad
170
170
171
171
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
173 after = opts.get('after')
173 after = opts.get('after')
174 m = composelargefilematcher(matcher, repo[None].manifest())
174 m = composelargefilematcher(matcher, repo[None].manifest())
175 with lfstatus(repo):
175 with lfstatus(repo):
176 s = repo.status(match=m, clean=not isaddremove)
176 s = repo.status(match=m, clean=not isaddremove)
177 manifest = repo[None].manifest()
177 manifest = repo[None].manifest()
178 modified, added, deleted, clean = [
178 modified, added, deleted, clean = [
179 [f for f in list if lfutil.standin(f) in manifest]
179 [f for f in list if lfutil.standin(f) in manifest]
180 for list in (s.modified, s.added, s.deleted, s.clean)
180 for list in (s.modified, s.added, s.deleted, s.clean)
181 ]
181 ]
182
182
183 def warn(files, msg):
183 def warn(files, msg):
184 for f in files:
184 for f in files:
185 ui.warn(msg % uipathfn(f))
185 ui.warn(msg % uipathfn(f))
186 return int(len(files) > 0)
186 return int(len(files) > 0)
187
187
188 if after:
188 if after:
189 remove = deleted
189 remove = deleted
190 result = warn(
190 result = warn(
191 modified + added + clean, _(b'not removing %s: file still exists\n')
191 modified + added + clean, _(b'not removing %s: file still exists\n')
192 )
192 )
193 else:
193 else:
194 remove = deleted + clean
194 remove = deleted + clean
195 result = warn(
195 result = warn(
196 modified,
196 modified,
197 _(
197 _(
198 b'not removing %s: file is modified (use -f'
198 b'not removing %s: file is modified (use -f'
199 b' to force removal)\n'
199 b' to force removal)\n'
200 ),
200 ),
201 )
201 )
202 result = (
202 result = (
203 warn(
203 warn(
204 added,
204 added,
205 _(
205 _(
206 b'not removing %s: file has been marked for add'
206 b'not removing %s: file has been marked for add'
207 b' (use forget to undo)\n'
207 b' (use forget to undo)\n'
208 ),
208 ),
209 )
209 )
210 or result
210 or result
211 )
211 )
212
212
213 # Need to lock because standin files are deleted then removed from the
213 # Need to lock because standin files are deleted then removed from the
214 # repository and we could race in-between.
214 # repository and we could race in-between.
215 with repo.wlock():
215 with repo.wlock():
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
217 for f in sorted(remove):
217 for f in sorted(remove):
218 if ui.verbose or not m.exact(f):
218 if ui.verbose or not m.exact(f):
219 ui.status(_(b'removing %s\n') % uipathfn(f))
219 ui.status(_(b'removing %s\n') % uipathfn(f))
220
220
221 if not dryrun:
221 if not dryrun:
222 if not after:
222 if not after:
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
224
224
225 if dryrun:
225 if dryrun:
226 return result
226 return result
227
227
228 remove = [lfutil.standin(f) for f in remove]
228 remove = [lfutil.standin(f) for f in remove]
229 # If this is being called by addremove, let the original addremove
229 # If this is being called by addremove, let the original addremove
230 # function handle this.
230 # function handle this.
231 if not isaddremove:
231 if not isaddremove:
232 for f in remove:
232 for f in remove:
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
234 repo[None].forget(remove)
234 repo[None].forget(remove)
235
235
236 for f in remove:
236 for f in remove:
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
238
238
239 lfdirstate.write(repo.currenttransaction())
239 lfdirstate.write(repo.currenttransaction())
240
240
241 return result
241 return result
242
242
243
243
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
245 # appear at their right place in the manifests.
245 # appear at their right place in the manifests.
246 @eh.wrapfunction(webcommands, 'decodepath')
246 @eh.wrapfunction(webcommands, 'decodepath')
247 def decodepath(orig, path):
247 def decodepath(orig, path):
248 return lfutil.splitstandin(path) or path
248 return lfutil.splitstandin(path) or path
249
249
250
250
251 # -- Wrappers: modify existing commands --------------------------------
251 # -- Wrappers: modify existing commands --------------------------------
252
252
253
253
254 @eh.wrapcommand(
254 @eh.wrapcommand(
255 b'add',
255 b'add',
256 opts=[
256 opts=[
257 (b'', b'large', None, _(b'add as largefile')),
257 (b'', b'large', None, _(b'add as largefile')),
258 (b'', b'normal', None, _(b'add as normal file')),
258 (b'', b'normal', None, _(b'add as normal file')),
259 (
259 (
260 b'',
260 b'',
261 b'lfsize',
261 b'lfsize',
262 b'',
262 b'',
263 _(
263 _(
264 b'add all files above this size (in megabytes) '
264 b'add all files above this size (in megabytes) '
265 b'as largefiles (default: 10)'
265 b'as largefiles (default: 10)'
266 ),
266 ),
267 ),
267 ),
268 ],
268 ],
269 )
269 )
270 def overrideadd(orig, ui, repo, *pats, **opts):
270 def overrideadd(orig, ui, repo, *pats, **opts):
271 if opts.get('normal') and opts.get('large'):
271 if opts.get('normal') and opts.get('large'):
272 raise error.Abort(_(b'--normal cannot be used with --large'))
272 raise error.Abort(_(b'--normal cannot be used with --large'))
273 return orig(ui, repo, *pats, **opts)
273 return orig(ui, repo, *pats, **opts)
274
274
275
275
276 @eh.wrapfunction(cmdutil, 'add')
276 @eh.wrapfunction(cmdutil, 'add')
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
278 # The --normal flag short circuits this override
278 # The --normal flag short circuits this override
279 if opts.get('normal'):
279 if opts.get('normal'):
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
281
281
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
283 normalmatcher = composenormalfilematcher(
283 normalmatcher = composenormalfilematcher(
284 matcher, repo[None].manifest(), ladded
284 matcher, repo[None].manifest(), ladded
285 )
285 )
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
287
287
288 bad.extend(f for f in lbad)
288 bad.extend(f for f in lbad)
289 return bad
289 return bad
290
290
291
291
292 @eh.wrapfunction(cmdutil, 'remove')
292 @eh.wrapfunction(cmdutil, 'remove')
293 def cmdutilremove(
293 def cmdutilremove(
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
295 ):
295 ):
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
297 result = orig(
297 result = orig(
298 ui,
298 ui,
299 repo,
299 repo,
300 normalmatcher,
300 normalmatcher,
301 prefix,
301 prefix,
302 uipathfn,
302 uipathfn,
303 after,
303 after,
304 force,
304 force,
305 subrepos,
305 subrepos,
306 dryrun,
306 dryrun,
307 )
307 )
308 return (
308 return (
309 removelargefiles(
309 removelargefiles(
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
311 )
311 )
312 or result
312 or result
313 )
313 )
314
314
315
315
316 @eh.wrapfunction(dirstate.dirstate, '_changing')
316 @eh.wrapfunction(dirstate.dirstate, '_changing')
317 @contextlib.contextmanager
317 @contextlib.contextmanager
318 def _changing(orig, self, repo, change_type):
318 def _changing(orig, self, repo, change_type):
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
320 try:
320 try:
321 lfd = getattr(self, '_large_file_dirstate', False)
321 lfd = getattr(self, '_large_file_dirstate', False)
322 if sub_dirstate is None and not lfd:
322 if sub_dirstate is None and not lfd:
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
324 self._sub_dirstate = sub_dirstate
324 self._sub_dirstate = sub_dirstate
325 if not lfd:
325 if not lfd:
326 assert self._sub_dirstate is not None
326 assert self._sub_dirstate is not None
327 with orig(self, repo, change_type):
327 with orig(self, repo, change_type):
328 if sub_dirstate is None:
328 if sub_dirstate is None:
329 yield
329 yield
330 else:
330 else:
331 with sub_dirstate._changing(repo, change_type):
331 with sub_dirstate._changing(repo, change_type):
332 yield
332 yield
333 finally:
333 finally:
334 self._sub_dirstate = pre
334 self._sub_dirstate = pre
335
335
336
336
337 @eh.wrapfunction(dirstate.dirstate, 'running_status')
337 @eh.wrapfunction(dirstate.dirstate, 'running_status')
338 @contextlib.contextmanager
338 @contextlib.contextmanager
339 def running_status(orig, self, repo):
339 def running_status(orig, self, repo):
340 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
340 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
341 try:
341 try:
342 lfd = getattr(self, '_large_file_dirstate', False)
342 lfd = getattr(self, '_large_file_dirstate', False)
343 if sub_dirstate is None and not lfd:
343 if sub_dirstate is None and not lfd:
344 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
344 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
345 self._sub_dirstate = sub_dirstate
345 self._sub_dirstate = sub_dirstate
346 if not lfd:
346 if not lfd:
347 assert self._sub_dirstate is not None
347 assert self._sub_dirstate is not None
348 with orig(self, repo):
348 with orig(self, repo):
349 if sub_dirstate is None:
349 if sub_dirstate is None:
350 yield
350 yield
351 else:
351 else:
352 with sub_dirstate.running_status(repo):
352 with sub_dirstate.running_status(repo):
353 yield
353 yield
354 finally:
354 finally:
355 self._sub_dirstate = pre
355 self._sub_dirstate = pre
356
356
357
357
358 @eh.wrapfunction(subrepo.hgsubrepo, 'status')
358 @eh.wrapfunction(subrepo.hgsubrepo, 'status')
359 def overridestatusfn(orig, repo, rev2, **opts):
359 def overridestatusfn(orig, repo, rev2, **opts):
360 with lfstatus(repo._repo):
360 with lfstatus(repo._repo):
361 return orig(repo, rev2, **opts)
361 return orig(repo, rev2, **opts)
362
362
363
363
364 @eh.wrapcommand(b'status')
364 @eh.wrapcommand(b'status')
365 def overridestatus(orig, ui, repo, *pats, **opts):
365 def overridestatus(orig, ui, repo, *pats, **opts):
366 with lfstatus(repo):
366 with lfstatus(repo):
367 return orig(ui, repo, *pats, **opts)
367 return orig(ui, repo, *pats, **opts)
368
368
369
369
370 @eh.wrapfunction(subrepo.hgsubrepo, 'dirty')
370 @eh.wrapfunction(subrepo.hgsubrepo, 'dirty')
371 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
371 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
372 with lfstatus(repo._repo):
372 with lfstatus(repo._repo):
373 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
373 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
374
374
375
375
376 @eh.wrapcommand(b'log')
376 @eh.wrapcommand(b'log')
377 def overridelog(orig, ui, repo, *pats, **opts):
377 def overridelog(orig, ui, repo, *pats, **opts):
378 def overridematchandpats(
378 def overridematchandpats(
379 orig,
379 orig,
380 ctx,
380 ctx,
381 pats=(),
381 pats=(),
382 opts=None,
382 opts=None,
383 globbed=False,
383 globbed=False,
384 default=b'relpath',
384 default=b'relpath',
385 badfn=None,
385 badfn=None,
386 ):
386 ):
387 """Matcher that merges root directory with .hglf, suitable for log.
387 """Matcher that merges root directory with .hglf, suitable for log.
388 It is still possible to match .hglf directly.
388 It is still possible to match .hglf directly.
389 For any listed files run log on the standin too.
389 For any listed files run log on the standin too.
390 matchfn tries both the given filename and with .hglf stripped.
390 matchfn tries both the given filename and with .hglf stripped.
391 """
391 """
392 if opts is None:
392 if opts is None:
393 opts = {}
393 opts = {}
394 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
394 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
395 m, p = copy.copy(matchandpats)
395 m, p = copy.copy(matchandpats)
396
396
397 if m.always():
397 if m.always():
398 # We want to match everything anyway, so there's no benefit trying
398 # We want to match everything anyway, so there's no benefit trying
399 # to add standins.
399 # to add standins.
400 return matchandpats
400 return matchandpats
401
401
402 pats = set(p)
402 pats = set(p)
403
403
404 def fixpats(pat, tostandin=lfutil.standin):
404 def fixpats(pat, tostandin=lfutil.standin):
405 if pat.startswith(b'set:'):
405 if pat.startswith(b'set:'):
406 return pat
406 return pat
407
407
408 kindpat = matchmod._patsplit(pat, None)
408 kindpat = matchmod._patsplit(pat, None)
409
409
410 if kindpat[0] is not None:
410 if kindpat[0] is not None:
411 return kindpat[0] + b':' + tostandin(kindpat[1])
411 return kindpat[0] + b':' + tostandin(kindpat[1])
412 return tostandin(kindpat[1])
412 return tostandin(kindpat[1])
413
413
414 cwd = repo.getcwd()
414 cwd = repo.getcwd()
415 if cwd:
415 if cwd:
416 hglf = lfutil.shortname
416 hglf = lfutil.shortname
417 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
417 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
418
418
419 def tostandin(f):
419 def tostandin(f):
420 # The file may already be a standin, so truncate the back
420 # The file may already be a standin, so truncate the back
421 # prefix and test before mangling it. This avoids turning
421 # prefix and test before mangling it. This avoids turning
422 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
422 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
423 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
423 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
424 return f
424 return f
425
425
426 # An absolute path is from outside the repo, so truncate the
426 # An absolute path is from outside the repo, so truncate the
427 # path to the root before building the standin. Otherwise cwd
427 # path to the root before building the standin. Otherwise cwd
428 # is somewhere in the repo, relative to root, and needs to be
428 # is somewhere in the repo, relative to root, and needs to be
429 # prepended before building the standin.
429 # prepended before building the standin.
430 if os.path.isabs(cwd):
430 if os.path.isabs(cwd):
431 f = f[len(back) :]
431 f = f[len(back) :]
432 else:
432 else:
433 f = cwd + b'/' + f
433 f = cwd + b'/' + f
434 return back + lfutil.standin(f)
434 return back + lfutil.standin(f)
435
435
436 else:
436 else:
437
437
438 def tostandin(f):
438 def tostandin(f):
439 if lfutil.isstandin(f):
439 if lfutil.isstandin(f):
440 return f
440 return f
441 return lfutil.standin(f)
441 return lfutil.standin(f)
442
442
443 pats.update(fixpats(f, tostandin) for f in p)
443 pats.update(fixpats(f, tostandin) for f in p)
444
444
445 for i in range(0, len(m._files)):
445 for i in range(0, len(m._files)):
446 # Don't add '.hglf' to m.files, since that is already covered by '.'
446 # Don't add '.hglf' to m.files, since that is already covered by '.'
447 if m._files[i] == b'.':
447 if m._files[i] == b'.':
448 continue
448 continue
449 standin = lfutil.standin(m._files[i])
449 standin = lfutil.standin(m._files[i])
450 # If the "standin" is a directory, append instead of replace to
450 # If the "standin" is a directory, append instead of replace to
451 # support naming a directory on the command line with only
451 # support naming a directory on the command line with only
452 # largefiles. The original directory is kept to support normal
452 # largefiles. The original directory is kept to support normal
453 # files.
453 # files.
454 if standin in ctx:
454 if standin in ctx:
455 m._files[i] = standin
455 m._files[i] = standin
456 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
456 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
457 m._files.append(standin)
457 m._files.append(standin)
458
458
459 m._fileset = set(m._files)
459 m._fileset = set(m._files)
460 m.always = lambda: False
460 m.always = lambda: False
461 origmatchfn = m.matchfn
461 origmatchfn = m.matchfn
462
462
463 def lfmatchfn(f):
463 def lfmatchfn(f):
464 lf = lfutil.splitstandin(f)
464 lf = lfutil.splitstandin(f)
465 if lf is not None and origmatchfn(lf):
465 if lf is not None and origmatchfn(lf):
466 return True
466 return True
467 r = origmatchfn(f)
467 r = origmatchfn(f)
468 return r
468 return r
469
469
470 m.matchfn = lfmatchfn
470 m.matchfn = lfmatchfn
471
471
472 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
472 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
473 return m, pats
473 return m, pats
474
474
475 # For hg log --patch, the match object is used in two different senses:
475 # For hg log --patch, the match object is used in two different senses:
476 # (1) to determine what revisions should be printed out, and
476 # (1) to determine what revisions should be printed out, and
477 # (2) to determine what files to print out diffs for.
477 # (2) to determine what files to print out diffs for.
478 # The magic matchandpats override should be used for case (1) but not for
478 # The magic matchandpats override should be used for case (1) but not for
479 # case (2).
479 # case (2).
480 oldmatchandpats = scmutil.matchandpats
480 oldmatchandpats = scmutil.matchandpats
481
481
482 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
482 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
483 wctx = repo[None]
483 wctx = repo[None]
484 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
484 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
485 return lambda ctx: match
485 return lambda ctx: match
486
486
487 wrappedmatchandpats = extensions.wrappedfunction(
487 wrappedmatchandpats = extensions.wrappedfunction(
488 scmutil, 'matchandpats', overridematchandpats
488 scmutil, 'matchandpats', overridematchandpats
489 )
489 )
490 wrappedmakefilematcher = extensions.wrappedfunction(
490 wrappedmakefilematcher = extensions.wrappedfunction(
491 logcmdutil, '_makenofollowfilematcher', overridemakefilematcher
491 logcmdutil, '_makenofollowfilematcher', overridemakefilematcher
492 )
492 )
493 with wrappedmatchandpats, wrappedmakefilematcher:
493 with wrappedmatchandpats, wrappedmakefilematcher:
494 return orig(ui, repo, *pats, **opts)
494 return orig(ui, repo, *pats, **opts)
495
495
496
496
497 @eh.wrapcommand(
497 @eh.wrapcommand(
498 b'verify',
498 b'verify',
499 opts=[
499 opts=[
500 (
500 (
501 b'',
501 b'',
502 b'large',
502 b'large',
503 None,
503 None,
504 _(b'verify that all largefiles in current revision exists'),
504 _(b'verify that all largefiles in current revision exists'),
505 ),
505 ),
506 (
506 (
507 b'',
507 b'',
508 b'lfa',
508 b'lfa',
509 None,
509 None,
510 _(b'verify largefiles in all revisions, not just current'),
510 _(b'verify largefiles in all revisions, not just current'),
511 ),
511 ),
512 (
512 (
513 b'',
513 b'',
514 b'lfc',
514 b'lfc',
515 None,
515 None,
516 _(b'verify local largefile contents, not just existence'),
516 _(b'verify local largefile contents, not just existence'),
517 ),
517 ),
518 ],
518 ],
519 )
519 )
520 def overrideverify(orig, ui, repo, *pats, **opts):
520 def overrideverify(orig, ui, repo, *pats, **opts):
521 large = opts.pop('large', False)
521 large = opts.pop('large', False)
522 all = opts.pop('lfa', False)
522 all = opts.pop('lfa', False)
523 contents = opts.pop('lfc', False)
523 contents = opts.pop('lfc', False)
524
524
525 result = orig(ui, repo, *pats, **opts)
525 result = orig(ui, repo, *pats, **opts)
526 if large or all or contents:
526 if large or all or contents:
527 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
527 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
528 return result
528 return result
529
529
530
530
531 @eh.wrapcommand(
531 @eh.wrapcommand(
532 b'debugstate',
532 b'debugstate',
533 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
533 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
534 )
534 )
535 def overridedebugstate(orig, ui, repo, *pats, **opts):
535 def overridedebugstate(orig, ui, repo, *pats, **opts):
536 large = opts.pop('large', False)
536 large = opts.pop('large', False)
537 if large:
537 if large:
538
538
539 class fakerepo:
539 class fakerepo:
540 dirstate = lfutil.openlfdirstate(ui, repo)
540 dirstate = lfutil.openlfdirstate(ui, repo)
541
541
542 orig(ui, fakerepo, *pats, **opts)
542 orig(ui, fakerepo, *pats, **opts)
543 else:
543 else:
544 orig(ui, repo, *pats, **opts)
544 orig(ui, repo, *pats, **opts)
545
545
546
546
547 # Before starting the manifest merge, merge.updates will call
547 # Before starting the manifest merge, merge.updates will call
548 # _checkunknownfile to check if there are any files in the merged-in
548 # _checkunknownfile to check if there are any files in the merged-in
549 # changeset that collide with unknown files in the working copy.
549 # changeset that collide with unknown files in the working copy.
550 #
550 #
551 # The largefiles are seen as unknown, so this prevents us from merging
551 # The largefiles are seen as unknown, so this prevents us from merging
552 # in a file 'foo' if we already have a largefile with the same name.
552 # in a file 'foo' if we already have a largefile with the same name.
553 #
553 #
554 # The overridden function filters the unknown files by removing any
554 # The overridden function filters the unknown files by removing any
555 # largefiles. This makes the merge proceed and we can then handle this
555 # largefiles. This makes the merge proceed and we can then handle this
556 # case further in the overridden calculateupdates function below.
556 # case further in the overridden calculateupdates function below.
557 @eh.wrapfunction(merge, '_checkunknownfile')
557 @eh.wrapfunction(merge, '_checkunknownfile')
558 def overridecheckunknownfile(
558 def overridecheckunknownfile(
559 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
559 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
560 ):
560 ):
561 if lfutil.standin(dirstate.normalize(f)) in wctx:
561 if lfutil.standin(dirstate.normalize(f)) in wctx:
562 return False
562 return False
563 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
563 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
564
564
565
565
566 # The manifest merge handles conflicts on the manifest level. We want
566 # The manifest merge handles conflicts on the manifest level. We want
567 # to handle changes in largefile-ness of files at this level too.
567 # to handle changes in largefile-ness of files at this level too.
568 #
568 #
569 # The strategy is to run the original calculateupdates and then process
569 # The strategy is to run the original calculateupdates and then process
570 # the action list it outputs. There are two cases we need to deal with:
570 # the action list it outputs. There are two cases we need to deal with:
571 #
571 #
572 # 1. Normal file in p1, largefile in p2. Here the largefile is
572 # 1. Normal file in p1, largefile in p2. Here the largefile is
573 # detected via its standin file, which will enter the working copy
573 # detected via its standin file, which will enter the working copy
574 # with a "get" action. It is not "merge" since the standin is all
574 # with a "get" action. It is not "merge" since the standin is all
575 # Mercurial is concerned with at this level -- the link to the
575 # Mercurial is concerned with at this level -- the link to the
576 # existing normal file is not relevant here.
576 # existing normal file is not relevant here.
577 #
577 #
578 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
578 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
579 # since the largefile will be present in the working copy and
579 # since the largefile will be present in the working copy and
580 # different from the normal file in p2. Mercurial therefore
580 # different from the normal file in p2. Mercurial therefore
581 # triggers a merge action.
581 # triggers a merge action.
582 #
582 #
583 # In both cases, we prompt the user and emit new actions to either
583 # In both cases, we prompt the user and emit new actions to either
584 # remove the standin (if the normal file was kept) or to remove the
584 # remove the standin (if the normal file was kept) or to remove the
585 # normal file and get the standin (if the largefile was kept). The
585 # normal file and get the standin (if the largefile was kept). The
586 # default prompt answer is to use the largefile version since it was
586 # default prompt answer is to use the largefile version since it was
587 # presumably changed on purpose.
587 # presumably changed on purpose.
588 #
588 #
589 # Finally, the merge.applyupdates function will then take care of
589 # Finally, the merge.applyupdates function will then take care of
590 # writing the files into the working copy and lfcommands.updatelfiles
590 # writing the files into the working copy and lfcommands.updatelfiles
591 # will update the largefiles.
591 # will update the largefiles.
592 @eh.wrapfunction(merge, 'calculateupdates')
592 @eh.wrapfunction(merge, 'calculateupdates')
593 def overridecalculateupdates(
593 def overridecalculateupdates(
594 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
594 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
595 ):
595 ):
596 overwrite = force and not branchmerge
596 overwrite = force and not branchmerge
597 mresult = origfn(
597 mresult = origfn(
598 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
598 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
599 )
599 )
600
600
601 if overwrite:
601 if overwrite:
602 return mresult
602 return mresult
603
603
604 # Convert to dictionary with filename as key and action as value.
604 # Convert to dictionary with filename as key and action as value.
605 lfiles = set()
605 lfiles = set()
606 for f in mresult.files():
606 for f in mresult.files():
607 splitstandin = lfutil.splitstandin(f)
607 splitstandin = lfutil.splitstandin(f)
608 if splitstandin is not None and splitstandin in p1:
608 if splitstandin is not None and splitstandin in p1:
609 lfiles.add(splitstandin)
609 lfiles.add(splitstandin)
610 elif lfutil.standin(f) in p1:
610 elif lfutil.standin(f) in p1:
611 lfiles.add(f)
611 lfiles.add(f)
612
612
613 for lfile in sorted(lfiles):
613 for lfile in sorted(lfiles):
614 standin = lfutil.standin(lfile)
614 standin = lfutil.standin(lfile)
615 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
615 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
616 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
616 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
617
617
618 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
618 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
619 if sm == ACTION_DELETED_CHANGED:
619 if sm == ACTION_DELETED_CHANGED:
620 f1, f2, fa, move, anc = sargs
620 f1, f2, fa, move, anc = sargs
621 sargs = (p2[f2].flags(), False)
621 sargs = (p2[f2].flags(), False)
622 # Case 1: normal file in the working copy, largefile in
622 # Case 1: normal file in the working copy, largefile in
623 # the second parent
623 # the second parent
624 usermsg = (
624 usermsg = (
625 _(
625 _(
626 b'remote turned local normal file %s into a largefile\n'
626 b'remote turned local normal file %s into a largefile\n'
627 b'use (l)argefile or keep (n)ormal file?'
627 b'use (l)argefile or keep (n)ormal file?'
628 b'$$ &Largefile $$ &Normal file'
628 b'$$ &Largefile $$ &Normal file'
629 )
629 )
630 % lfile
630 % lfile
631 )
631 )
632 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
632 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
633 mresult.addfile(
633 mresult.addfile(
634 lfile, ACTION_REMOVE, None, b'replaced by standin'
634 lfile, ACTION_REMOVE, None, b'replaced by standin'
635 )
635 )
636 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
636 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
637 else: # keep local normal file
637 else: # keep local normal file
638 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
638 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
639 if branchmerge:
639 if branchmerge:
640 mresult.addfile(
640 mresult.addfile(
641 standin,
641 standin,
642 ACTION_KEEP,
642 ACTION_KEEP,
643 None,
643 None,
644 b'replaced by non-standin',
644 b'replaced by non-standin',
645 )
645 )
646 else:
646 else:
647 mresult.addfile(
647 mresult.addfile(
648 standin,
648 standin,
649 ACTION_REMOVE,
649 ACTION_REMOVE,
650 None,
650 None,
651 b'replaced by non-standin',
651 b'replaced by non-standin',
652 )
652 )
653 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
653 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
654 if lm == ACTION_DELETED_CHANGED:
654 if lm == ACTION_DELETED_CHANGED:
655 f1, f2, fa, move, anc = largs
655 f1, f2, fa, move, anc = largs
656 largs = (p2[f2].flags(), False)
656 largs = (p2[f2].flags(), False)
657 # Case 2: largefile in the working copy, normal file in
657 # Case 2: largefile in the working copy, normal file in
658 # the second parent
658 # the second parent
659 usermsg = (
659 usermsg = (
660 _(
660 _(
661 b'remote turned local largefile %s into a normal file\n'
661 b'remote turned local largefile %s into a normal file\n'
662 b'keep (l)argefile or use (n)ormal file?'
662 b'keep (l)argefile or use (n)ormal file?'
663 b'$$ &Largefile $$ &Normal file'
663 b'$$ &Largefile $$ &Normal file'
664 )
664 )
665 % lfile
665 % lfile
666 )
666 )
667 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
667 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
668 if branchmerge:
668 if branchmerge:
669 # largefile can be restored from standin safely
669 # largefile can be restored from standin safely
670 mresult.addfile(
670 mresult.addfile(
671 lfile,
671 lfile,
672 ACTION_KEEP,
672 ACTION_KEEP,
673 None,
673 None,
674 b'replaced by standin',
674 b'replaced by standin',
675 )
675 )
676 mresult.addfile(
676 mresult.addfile(
677 standin, ACTION_KEEP, None, b'replaces standin'
677 standin, ACTION_KEEP, None, b'replaces standin'
678 )
678 )
679 else:
679 else:
680 # "lfile" should be marked as "removed" without
680 # "lfile" should be marked as "removed" without
681 # removal of itself
681 # removal of itself
682 mresult.addfile(
682 mresult.addfile(
683 lfile,
683 lfile,
684 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
684 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
685 None,
685 None,
686 b'forget non-standin largefile',
686 b'forget non-standin largefile',
687 )
687 )
688
688
689 # linear-merge should treat this largefile as 're-added'
689 # linear-merge should treat this largefile as 're-added'
690 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
690 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
691 else: # pick remote normal file
691 else: # pick remote normal file
692 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
692 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
693 mresult.addfile(
693 mresult.addfile(
694 standin,
694 standin,
695 ACTION_REMOVE,
695 ACTION_REMOVE,
696 None,
696 None,
697 b'replaced by non-standin',
697 b'replaced by non-standin',
698 )
698 )
699
699
700 return mresult
700 return mresult
701
701
702
702
703 @eh.wrapfunction(mergestatemod, 'recordupdates')
703 @eh.wrapfunction(mergestatemod, 'recordupdates')
704 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
704 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
705 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
705 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
706 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
706 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
707 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
707 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
708 # this should be executed before 'orig', to execute 'remove'
708 # this should be executed before 'orig', to execute 'remove'
709 # before all other actions
709 # before all other actions
710 repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
710 repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
711 # make sure lfile doesn't get synclfdirstate'd as normal
711 # make sure lfile doesn't get synclfdirstate'd as normal
712 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
712 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
713
713
714 return orig(repo, actions, branchmerge, getfiledata)
714 return orig(repo, actions, branchmerge, getfiledata)
715
715
716
716
717 # Override filemerge to prompt the user about how they wish to merge
717 # Override filemerge to prompt the user about how they wish to merge
718 # largefiles. This will handle identical edits without prompting the user.
718 # largefiles. This will handle identical edits without prompting the user.
719 @eh.wrapfunction(filemerge, 'filemerge')
719 @eh.wrapfunction(filemerge, 'filemerge')
720 def overridefilemerge(
720 def overridefilemerge(
721 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
721 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
722 ):
722 ):
723 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
723 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
724 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
724 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
725
725
726 ahash = lfutil.readasstandin(fca).lower()
726 ahash = lfutil.readasstandin(fca).lower()
727 dhash = lfutil.readasstandin(fcd).lower()
727 dhash = lfutil.readasstandin(fcd).lower()
728 ohash = lfutil.readasstandin(fco).lower()
728 ohash = lfutil.readasstandin(fco).lower()
729 if (
729 if (
730 ohash != ahash
730 ohash != ahash
731 and ohash != dhash
731 and ohash != dhash
732 and (
732 and (
733 dhash == ahash
733 dhash == ahash
734 or repo.ui.promptchoice(
734 or repo.ui.promptchoice(
735 _(
735 _(
736 b'largefile %s has a merge conflict\nancestor was %s\n'
736 b'largefile %s has a merge conflict\nancestor was %s\n'
737 b'you can keep (l)ocal %s or take (o)ther %s.\n'
737 b'you can keep (l)ocal %s or take (o)ther %s.\n'
738 b'what do you want to do?'
738 b'what do you want to do?'
739 b'$$ &Local $$ &Other'
739 b'$$ &Local $$ &Other'
740 )
740 )
741 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
741 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
742 0,
742 0,
743 )
743 )
744 == 1
744 == 1
745 )
745 )
746 ):
746 ):
747 repo.wwrite(fcd.path(), fco.data(), fco.flags())
747 repo.wwrite(fcd.path(), fco.data(), fco.flags())
748 return 0, False
748 return 0, False
749
749
750
750
751 @eh.wrapfunction(copiesmod, 'pathcopies')
751 @eh.wrapfunction(copiesmod, 'pathcopies')
752 def copiespathcopies(orig, ctx1, ctx2, match=None):
752 def copiespathcopies(orig, ctx1, ctx2, match=None):
753 copies = orig(ctx1, ctx2, match=match)
753 copies = orig(ctx1, ctx2, match=match)
754 updated = {}
754 updated = {}
755
755
756 for k, v in copies.items():
756 for k, v in copies.items():
757 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
757 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
758
758
759 return updated
759 return updated
760
760
761
761
762 # Copy first changes the matchers to match standins instead of
762 # Copy first changes the matchers to match standins instead of
763 # largefiles. Then it overrides util.copyfile in that function it
763 # largefiles. Then it overrides util.copyfile in that function it
764 # checks if the destination largefile already exists. It also keeps a
764 # checks if the destination largefile already exists. It also keeps a
765 # list of copied files so that the largefiles can be copied and the
765 # list of copied files so that the largefiles can be copied and the
766 # dirstate updated.
766 # dirstate updated.
767 @eh.wrapfunction(cmdutil, 'copy')
767 @eh.wrapfunction(cmdutil, 'copy')
768 def overridecopy(orig, ui, repo, pats, opts, rename=False):
768 def overridecopy(orig, ui, repo, pats, opts, rename=False):
769 # doesn't remove largefile on rename
769 # doesn't remove largefile on rename
770 if len(pats) < 2:
770 if len(pats) < 2:
771 # this isn't legal, let the original function deal with it
771 # this isn't legal, let the original function deal with it
772 return orig(ui, repo, pats, opts, rename)
772 return orig(ui, repo, pats, opts, rename)
773
773
774 # This could copy both lfiles and normal files in one command,
774 # This could copy both lfiles and normal files in one command,
775 # but we don't want to do that. First replace their matcher to
775 # but we don't want to do that. First replace their matcher to
776 # only match normal files and run it, then replace it to just
776 # only match normal files and run it, then replace it to just
777 # match largefiles and run it again.
777 # match largefiles and run it again.
778 nonormalfiles = False
778 nonormalfiles = False
779 nolfiles = False
779 nolfiles = False
780 manifest = repo[None].manifest()
780 manifest = repo[None].manifest()
781
781
782 def normalfilesmatchfn(
782 def normalfilesmatchfn(
783 orig,
783 orig,
784 ctx,
784 ctx,
785 pats=(),
785 pats=(),
786 opts=None,
786 opts=None,
787 globbed=False,
787 globbed=False,
788 default=b'relpath',
788 default=b'relpath',
789 badfn=None,
789 badfn=None,
790 ):
790 ):
791 if opts is None:
791 if opts is None:
792 opts = {}
792 opts = {}
793 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
793 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
794 return composenormalfilematcher(match, manifest)
794 return composenormalfilematcher(match, manifest)
795
795
796 with extensions.wrappedfunction(scmutil, 'match', normalfilesmatchfn):
796 with extensions.wrappedfunction(scmutil, 'match', normalfilesmatchfn):
797 try:
797 try:
798 result = orig(ui, repo, pats, opts, rename)
798 result = orig(ui, repo, pats, opts, rename)
799 except error.Abort as e:
799 except error.Abort as e:
800 if e.message != _(b'no files to copy'):
800 if e.message != _(b'no files to copy'):
801 raise e
801 raise e
802 else:
802 else:
803 nonormalfiles = True
803 nonormalfiles = True
804 result = 0
804 result = 0
805
805
806 # The first rename can cause our current working directory to be removed.
806 # The first rename can cause our current working directory to be removed.
807 # In that case there is nothing left to copy/rename so just quit.
807 # In that case there is nothing left to copy/rename so just quit.
808 try:
808 try:
809 repo.getcwd()
809 repo.getcwd()
810 except OSError:
810 except OSError:
811 return result
811 return result
812
812
813 def makestandin(relpath):
813 def makestandin(relpath):
814 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
814 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
815 return repo.wvfs.join(lfutil.standin(path))
815 return repo.wvfs.join(lfutil.standin(path))
816
816
817 fullpats = scmutil.expandpats(pats)
817 fullpats = scmutil.expandpats(pats)
818 dest = fullpats[-1]
818 dest = fullpats[-1]
819
819
820 if os.path.isdir(dest):
820 if os.path.isdir(dest):
821 if not os.path.isdir(makestandin(dest)):
821 if not os.path.isdir(makestandin(dest)):
822 os.makedirs(makestandin(dest))
822 os.makedirs(makestandin(dest))
823
823
824 try:
824 try:
825 # When we call orig below it creates the standins but we don't add
825 # When we call orig below it creates the standins but we don't add
826 # them to the dir state until later so lock during that time.
826 # them to the dir state until later so lock during that time.
827 wlock = repo.wlock()
827 wlock = repo.wlock()
828
828
829 manifest = repo[None].manifest()
829 manifest = repo[None].manifest()
830
830
831 def overridematch(
831 def overridematch(
832 orig,
832 orig,
833 ctx,
833 ctx,
834 pats=(),
834 pats=(),
835 opts=None,
835 opts=None,
836 globbed=False,
836 globbed=False,
837 default=b'relpath',
837 default=b'relpath',
838 badfn=None,
838 badfn=None,
839 ):
839 ):
840 if opts is None:
840 if opts is None:
841 opts = {}
841 opts = {}
842 newpats = []
842 newpats = []
843 # The patterns were previously mangled to add the standin
843 # The patterns were previously mangled to add the standin
844 # directory; we need to remove that now
844 # directory; we need to remove that now
845 for pat in pats:
845 for pat in pats:
846 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
846 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
847 newpats.append(pat.replace(lfutil.shortname, b''))
847 newpats.append(pat.replace(lfutil.shortname, b''))
848 else:
848 else:
849 newpats.append(pat)
849 newpats.append(pat)
850 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
850 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
851 m = copy.copy(match)
851 m = copy.copy(match)
852 lfile = lambda f: lfutil.standin(f) in manifest
852 lfile = lambda f: lfutil.standin(f) in manifest
853 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
853 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
854 m._fileset = set(m._files)
854 m._fileset = set(m._files)
855 origmatchfn = m.matchfn
855 origmatchfn = m.matchfn
856
856
857 def matchfn(f):
857 def matchfn(f):
858 lfile = lfutil.splitstandin(f)
858 lfile = lfutil.splitstandin(f)
859 return (
859 return (
860 lfile is not None
860 lfile is not None
861 and (f in manifest)
861 and (f in manifest)
862 and origmatchfn(lfile)
862 and origmatchfn(lfile)
863 or None
863 or None
864 )
864 )
865
865
866 m.matchfn = matchfn
866 m.matchfn = matchfn
867 return m
867 return m
868
868
869 listpats = []
869 listpats = []
870 for pat in pats:
870 for pat in pats:
871 if matchmod.patkind(pat) is not None:
871 if matchmod.patkind(pat) is not None:
872 listpats.append(pat)
872 listpats.append(pat)
873 else:
873 else:
874 listpats.append(makestandin(pat))
874 listpats.append(makestandin(pat))
875
875
876 copiedfiles = []
876 copiedfiles = []
877
877
878 def overridecopyfile(orig, src, dest, *args, **kwargs):
878 def overridecopyfile(orig, src, dest, *args, **kwargs):
879 if lfutil.shortname in src and dest.startswith(
879 if lfutil.shortname in src and dest.startswith(
880 repo.wjoin(lfutil.shortname)
880 repo.wjoin(lfutil.shortname)
881 ):
881 ):
882 destlfile = dest.replace(lfutil.shortname, b'')
882 destlfile = dest.replace(lfutil.shortname, b'')
883 if not opts[b'force'] and os.path.exists(destlfile):
883 if not opts[b'force'] and os.path.exists(destlfile):
884 raise IOError(
884 raise IOError(
885 b'', _(b'destination largefile already exists')
885 b'', _(b'destination largefile already exists')
886 )
886 )
887 copiedfiles.append((src, dest))
887 copiedfiles.append((src, dest))
888 orig(src, dest, *args, **kwargs)
888 orig(src, dest, *args, **kwargs)
889
889
890 with extensions.wrappedfunction(util, 'copyfile', overridecopyfile):
890 with extensions.wrappedfunction(util, 'copyfile', overridecopyfile):
891 with extensions.wrappedfunction(scmutil, 'match', overridematch):
891 with extensions.wrappedfunction(scmutil, 'match', overridematch):
892 result += orig(ui, repo, listpats, opts, rename)
892 result += orig(ui, repo, listpats, opts, rename)
893
893
894 lfdirstate = lfutil.openlfdirstate(ui, repo)
894 lfdirstate = lfutil.openlfdirstate(ui, repo)
895 for (src, dest) in copiedfiles:
895 for (src, dest) in copiedfiles:
896 if lfutil.shortname in src and dest.startswith(
896 if lfutil.shortname in src and dest.startswith(
897 repo.wjoin(lfutil.shortname)
897 repo.wjoin(lfutil.shortname)
898 ):
898 ):
899 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
899 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
900 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
900 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
901 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
901 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
902 if not os.path.isdir(destlfiledir):
902 if not os.path.isdir(destlfiledir):
903 os.makedirs(destlfiledir)
903 os.makedirs(destlfiledir)
904 if rename:
904 if rename:
905 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
905 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
906
906
907 # The file is gone, but this deletes any empty parent
907 # The file is gone, but this deletes any empty parent
908 # directories as a side-effect.
908 # directories as a side-effect.
909 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
909 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
910 lfdirstate.set_untracked(srclfile)
910 lfdirstate.set_untracked(srclfile)
911 else:
911 else:
912 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
912 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
913
913
914 lfdirstate.set_tracked(destlfile)
914 lfdirstate.set_tracked(destlfile)
915 lfdirstate.write(repo.currenttransaction())
915 lfdirstate.write(repo.currenttransaction())
916 except error.Abort as e:
916 except error.Abort as e:
917 if e.message != _(b'no files to copy'):
917 if e.message != _(b'no files to copy'):
918 raise e
918 raise e
919 else:
919 else:
920 nolfiles = True
920 nolfiles = True
921 finally:
921 finally:
922 wlock.release()
922 wlock.release()
923
923
924 if nolfiles and nonormalfiles:
924 if nolfiles and nonormalfiles:
925 raise error.Abort(_(b'no files to copy'))
925 raise error.Abort(_(b'no files to copy'))
926
926
927 return result
927 return result
928
928
929
929
930 # When the user calls revert, we have to be careful to not revert any
930 # When the user calls revert, we have to be careful to not revert any
931 # changes to other largefiles accidentally. This means we have to keep
931 # changes to other largefiles accidentally. This means we have to keep
932 # track of the largefiles that are being reverted so we only pull down
932 # track of the largefiles that are being reverted so we only pull down
933 # the necessary largefiles.
933 # the necessary largefiles.
934 #
934 #
935 # Standins are only updated (to match the hash of largefiles) before
935 # Standins are only updated (to match the hash of largefiles) before
936 # commits. Update the standins then run the original revert, changing
936 # commits. Update the standins then run the original revert, changing
937 # the matcher to hit standins instead of largefiles. Based on the
937 # the matcher to hit standins instead of largefiles. Based on the
938 # resulting standins update the largefiles.
938 # resulting standins update the largefiles.
939 @eh.wrapfunction(cmdutil, 'revert')
939 @eh.wrapfunction(cmdutil, 'revert')
940 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
940 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
941 # Because we put the standins in a bad state (by updating them)
941 # Because we put the standins in a bad state (by updating them)
942 # and then return them to a correct state we need to lock to
942 # and then return them to a correct state we need to lock to
943 # prevent others from changing them in their incorrect state.
943 # prevent others from changing them in their incorrect state.
944 with repo.wlock(), repo.dirstate.running_status(repo):
944 with repo.wlock(), repo.dirstate.running_status(repo):
945 lfdirstate = lfutil.openlfdirstate(ui, repo)
945 lfdirstate = lfutil.openlfdirstate(ui, repo)
946 s = lfutil.lfdirstatestatus(lfdirstate, repo)
946 s = lfutil.lfdirstatestatus(lfdirstate, repo)
947 lfdirstate.write(repo.currenttransaction())
947 lfdirstate.write(repo.currenttransaction())
948 for lfile in s.modified:
948 for lfile in s.modified:
949 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
949 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
950 for lfile in s.deleted:
950 for lfile in s.deleted:
951 fstandin = lfutil.standin(lfile)
951 fstandin = lfutil.standin(lfile)
952 if repo.wvfs.exists(fstandin):
952 if repo.wvfs.exists(fstandin):
953 repo.wvfs.unlink(fstandin)
953 repo.wvfs.unlink(fstandin)
954
954
955 oldstandins = lfutil.getstandinsstate(repo)
955 oldstandins = lfutil.getstandinsstate(repo)
956
956
957 def overridematch(
957 def overridematch(
958 orig,
958 orig,
959 mctx,
959 mctx,
960 pats=(),
960 pats=(),
961 opts=None,
961 opts=None,
962 globbed=False,
962 globbed=False,
963 default=b'relpath',
963 default=b'relpath',
964 badfn=None,
964 badfn=None,
965 ):
965 ):
966 if opts is None:
966 if opts is None:
967 opts = {}
967 opts = {}
968 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
968 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
969 m = copy.copy(match)
969 m = copy.copy(match)
970
970
971 # revert supports recursing into subrepos, and though largefiles
971 # revert supports recursing into subrepos, and though largefiles
972 # currently doesn't work correctly in that case, this match is
972 # currently doesn't work correctly in that case, this match is
973 # called, so the lfdirstate above may not be the correct one for
973 # called, so the lfdirstate above may not be the correct one for
974 # this invocation of match.
974 # this invocation of match.
975 lfdirstate = lfutil.openlfdirstate(
975 lfdirstate = lfutil.openlfdirstate(
976 mctx.repo().ui, mctx.repo(), False
976 mctx.repo().ui, mctx.repo(), False
977 )
977 )
978
978
979 wctx = repo[None]
979 wctx = repo[None]
980 matchfiles = []
980 matchfiles = []
981 for f in m._files:
981 for f in m._files:
982 standin = lfutil.standin(f)
982 standin = lfutil.standin(f)
983 if standin in ctx or standin in mctx:
983 if standin in ctx or standin in mctx:
984 matchfiles.append(standin)
984 matchfiles.append(standin)
985 elif standin in wctx or lfdirstate.get_entry(f).removed:
985 elif standin in wctx or lfdirstate.get_entry(f).removed:
986 continue
986 continue
987 else:
987 else:
988 matchfiles.append(f)
988 matchfiles.append(f)
989 m._files = matchfiles
989 m._files = matchfiles
990 m._fileset = set(m._files)
990 m._fileset = set(m._files)
991 origmatchfn = m.matchfn
991 origmatchfn = m.matchfn
992
992
993 def matchfn(f):
993 def matchfn(f):
994 lfile = lfutil.splitstandin(f)
994 lfile = lfutil.splitstandin(f)
995 if lfile is not None:
995 if lfile is not None:
996 return origmatchfn(lfile) and (f in ctx or f in mctx)
996 return origmatchfn(lfile) and (f in ctx or f in mctx)
997 return origmatchfn(f)
997 return origmatchfn(f)
998
998
999 m.matchfn = matchfn
999 m.matchfn = matchfn
1000 return m
1000 return m
1001
1001
1002 with extensions.wrappedfunction(scmutil, 'match', overridematch):
1002 with extensions.wrappedfunction(scmutil, 'match', overridematch):
1003 orig(ui, repo, ctx, *pats, **opts)
1003 orig(ui, repo, ctx, *pats, **opts)
1004
1004
1005 newstandins = lfutil.getstandinsstate(repo)
1005 newstandins = lfutil.getstandinsstate(repo)
1006 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1006 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1007 # lfdirstate should be 'normallookup'-ed for updated files,
1007 # lfdirstate should be 'normallookup'-ed for updated files,
1008 # because reverting doesn't touch dirstate for 'normal' files
1008 # because reverting doesn't touch dirstate for 'normal' files
1009 # when target revision is explicitly specified: in such case,
1009 # when target revision is explicitly specified: in such case,
1010 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
1010 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
1011 # of target (standin) file.
1011 # of target (standin) file.
1012 lfcommands.updatelfiles(
1012 lfcommands.updatelfiles(
1013 ui, repo, filelist, printmessage=False, normallookup=True
1013 ui, repo, filelist, printmessage=False, normallookup=True
1014 )
1014 )
1015
1015
1016
1016
1017 # after pulling changesets, we need to take some extra care to get
1017 # after pulling changesets, we need to take some extra care to get
1018 # largefiles updated remotely
1018 # largefiles updated remotely
1019 @eh.wrapcommand(
1019 @eh.wrapcommand(
1020 b'pull',
1020 b'pull',
1021 opts=[
1021 opts=[
1022 (
1022 (
1023 b'',
1023 b'',
1024 b'all-largefiles',
1024 b'all-largefiles',
1025 None,
1025 None,
1026 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1026 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1027 ),
1027 ),
1028 (
1028 (
1029 b'',
1029 b'',
1030 b'lfrev',
1030 b'lfrev',
1031 [],
1031 [],
1032 _(b'download largefiles for these revisions'),
1032 _(b'download largefiles for these revisions'),
1033 _(b'REV'),
1033 _(b'REV'),
1034 ),
1034 ),
1035 ],
1035 ],
1036 )
1036 )
1037 def overridepull(orig, ui, repo, source=None, **opts):
1037 def overridepull(orig, ui, repo, source=None, **opts):
1038 revsprepull = len(repo)
1038 revsprepull = len(repo)
1039 if not source:
1039 if not source:
1040 source = b'default'
1040 source = b'default'
1041 repo.lfpullsource = source
1041 repo.lfpullsource = source
1042 result = orig(ui, repo, source, **opts)
1042 result = orig(ui, repo, source, **opts)
1043 revspostpull = len(repo)
1043 revspostpull = len(repo)
1044 lfrevs = opts.get('lfrev', [])
1044 lfrevs = opts.get('lfrev', [])
1045 if opts.get('all_largefiles'):
1045 if opts.get('all_largefiles'):
1046 lfrevs.append(b'pulled()')
1046 lfrevs.append(b'pulled()')
1047 if lfrevs and revspostpull > revsprepull:
1047 if lfrevs and revspostpull > revsprepull:
1048 numcached = 0
1048 numcached = 0
1049 repo.firstpulled = revsprepull # for pulled() revset expression
1049 repo.firstpulled = revsprepull # for pulled() revset expression
1050 try:
1050 try:
1051 for rev in logcmdutil.revrange(repo, lfrevs):
1051 for rev in logcmdutil.revrange(repo, lfrevs):
1052 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1052 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1053 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1053 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1054 numcached += len(cached)
1054 numcached += len(cached)
1055 finally:
1055 finally:
1056 del repo.firstpulled
1056 del repo.firstpulled
1057 ui.status(_(b"%d largefiles cached\n") % numcached)
1057 ui.status(_(b"%d largefiles cached\n") % numcached)
1058 return result
1058 return result
1059
1059
1060
1060
1061 @eh.wrapcommand(
1061 @eh.wrapcommand(
1062 b'push',
1062 b'push',
1063 opts=[
1063 opts=[
1064 (
1064 (
1065 b'',
1065 b'',
1066 b'lfrev',
1066 b'lfrev',
1067 [],
1067 [],
1068 _(b'upload largefiles for these revisions'),
1068 _(b'upload largefiles for these revisions'),
1069 _(b'REV'),
1069 _(b'REV'),
1070 )
1070 )
1071 ],
1071 ],
1072 )
1072 )
1073 def overridepush(orig, ui, repo, *args, **kwargs):
1073 def overridepush(orig, ui, repo, *args, **kwargs):
1074 """Override push command and store --lfrev parameters in opargs"""
1074 """Override push command and store --lfrev parameters in opargs"""
1075 lfrevs = kwargs.pop('lfrev', None)
1075 lfrevs = kwargs.pop('lfrev', None)
1076 if lfrevs:
1076 if lfrevs:
1077 opargs = kwargs.setdefault('opargs', {})
1077 opargs = kwargs.setdefault('opargs', {})
1078 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1078 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1079 return orig(ui, repo, *args, **kwargs)
1079 return orig(ui, repo, *args, **kwargs)
1080
1080
1081
1081
1082 @eh.wrapfunction(exchange, 'pushoperation')
1082 @eh.wrapfunction(exchange, 'pushoperation')
1083 def exchangepushoperation(orig, *args, **kwargs):
1083 def exchangepushoperation(orig, *args, **kwargs):
1084 """Override pushoperation constructor and store lfrevs parameter"""
1084 """Override pushoperation constructor and store lfrevs parameter"""
1085 lfrevs = kwargs.pop('lfrevs', None)
1085 lfrevs = kwargs.pop('lfrevs', None)
1086 pushop = orig(*args, **kwargs)
1086 pushop = orig(*args, **kwargs)
1087 pushop.lfrevs = lfrevs
1087 pushop.lfrevs = lfrevs
1088 return pushop
1088 return pushop
1089
1089
1090
1090
1091 @eh.revsetpredicate(b'pulled()')
1091 @eh.revsetpredicate(b'pulled()')
1092 def pulledrevsetsymbol(repo, subset, x):
1092 def pulledrevsetsymbol(repo, subset, x):
1093 """Changesets that just has been pulled.
1093 """Changesets that just has been pulled.
1094
1094
1095 Only available with largefiles from pull --lfrev expressions.
1095 Only available with largefiles from pull --lfrev expressions.
1096
1096
1097 .. container:: verbose
1097 .. container:: verbose
1098
1098
1099 Some examples:
1099 Some examples:
1100
1100
1101 - pull largefiles for all new changesets::
1101 - pull largefiles for all new changesets::
1102
1102
1103 hg pull -lfrev "pulled()"
1103 hg pull -lfrev "pulled()"
1104
1104
1105 - pull largefiles for all new branch heads::
1105 - pull largefiles for all new branch heads::
1106
1106
1107 hg pull -lfrev "head(pulled()) and not closed()"
1107 hg pull -lfrev "head(pulled()) and not closed()"
1108
1108
1109 """
1109 """
1110
1110
1111 try:
1111 try:
1112 firstpulled = repo.firstpulled
1112 firstpulled = repo.firstpulled
1113 except AttributeError:
1113 except AttributeError:
1114 raise error.Abort(_(b"pulled() only available in --lfrev"))
1114 raise error.Abort(_(b"pulled() only available in --lfrev"))
1115 return smartset.baseset([r for r in subset if r >= firstpulled])
1115 return smartset.baseset([r for r in subset if r >= firstpulled])
1116
1116
1117
1117
1118 @eh.wrapcommand(
1118 @eh.wrapcommand(
1119 b'clone',
1119 b'clone',
1120 opts=[
1120 opts=[
1121 (
1121 (
1122 b'',
1122 b'',
1123 b'all-largefiles',
1123 b'all-largefiles',
1124 None,
1124 None,
1125 _(b'download all versions of all largefiles'),
1125 _(b'download all versions of all largefiles'),
1126 )
1126 )
1127 ],
1127 ],
1128 )
1128 )
1129 def overrideclone(orig, ui, source, dest=None, **opts):
1129 def overrideclone(orig, ui, source, dest=None, **opts):
1130 d = dest
1130 d = dest
1131 if d is None:
1131 if d is None:
1132 d = hg.defaultdest(source)
1132 d = hg.defaultdest(source)
1133 if opts.get('all_largefiles') and not hg.islocal(d):
1133 if opts.get('all_largefiles') and not hg.islocal(d):
1134 raise error.Abort(
1134 raise error.Abort(
1135 _(b'--all-largefiles is incompatible with non-local destination %s')
1135 _(b'--all-largefiles is incompatible with non-local destination %s')
1136 % d
1136 % d
1137 )
1137 )
1138
1138
1139 return orig(ui, source, dest, **opts)
1139 return orig(ui, source, dest, **opts)
1140
1140
1141
1141
1142 @eh.wrapfunction(hg, 'clone')
1142 @eh.wrapfunction(hg, 'clone')
1143 def hgclone(orig, ui, opts, *args, **kwargs):
1143 def hgclone(orig, ui, opts, *args, **kwargs):
1144 result = orig(ui, opts, *args, **kwargs)
1144 result = orig(ui, opts, *args, **kwargs)
1145
1145
1146 if result is not None:
1146 if result is not None:
1147 sourcerepo, destrepo = result
1147 sourcerepo, destrepo = result
1148 repo = destrepo.local()
1148 repo = destrepo.local()
1149
1149
1150 # When cloning to a remote repo (like through SSH), no repo is available
1150 # When cloning to a remote repo (like through SSH), no repo is available
1151 # from the peer. Therefore the largefiles can't be downloaded and the
1151 # from the peer. Therefore the largefiles can't be downloaded and the
1152 # hgrc can't be updated.
1152 # hgrc can't be updated.
1153 if not repo:
1153 if not repo:
1154 return result
1154 return result
1155
1155
1156 # Caching is implicitly limited to 'rev' option, since the dest repo was
1156 # Caching is implicitly limited to 'rev' option, since the dest repo was
1157 # truncated at that point. The user may expect a download count with
1157 # truncated at that point. The user may expect a download count with
1158 # this option, so attempt whether or not this is a largefile repo.
1158 # this option, so attempt whether or not this is a largefile repo.
1159 if opts.get(b'all_largefiles'):
1159 if opts.get(b'all_largefiles'):
1160 success, missing = lfcommands.downloadlfiles(ui, repo)
1160 success, missing = lfcommands.downloadlfiles(ui, repo)
1161
1161
1162 if missing != 0:
1162 if missing != 0:
1163 return None
1163 return None
1164
1164
1165 return result
1165 return result
1166
1166
1167
1167
1168 @eh.wrapcommand(b'rebase', extension=b'rebase')
1168 @eh.wrapcommand(b'rebase', extension=b'rebase')
1169 def overriderebasecmd(orig, ui, repo, **opts):
1169 def overriderebasecmd(orig, ui, repo, **opts):
1170 if not util.safehasattr(repo, b'_largefilesenabled'):
1170 if not util.safehasattr(repo, b'_largefilesenabled'):
1171 return orig(ui, repo, **opts)
1171 return orig(ui, repo, **opts)
1172
1172
1173 resuming = opts.get('continue')
1173 resuming = opts.get('continue')
1174 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1174 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1175 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1175 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1176 try:
1176 try:
1177 with ui.configoverride(
1177 with ui.configoverride(
1178 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1178 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1179 ):
1179 ):
1180 return orig(ui, repo, **opts)
1180 return orig(ui, repo, **opts)
1181 finally:
1181 finally:
1182 repo._lfstatuswriters.pop()
1182 repo._lfstatuswriters.pop()
1183 repo._lfcommithooks.pop()
1183 repo._lfcommithooks.pop()
1184
1184
1185
1185
1186 @eh.extsetup
1186 @eh.extsetup
1187 def overriderebase(ui):
1187 def overriderebase(ui):
1188 try:
1188 try:
1189 rebase = extensions.find(b'rebase')
1189 rebase = extensions.find(b'rebase')
1190 except KeyError:
1190 except KeyError:
1191 pass
1191 pass
1192 else:
1192 else:
1193
1193
1194 def _dorebase(orig, *args, **kwargs):
1194 def _dorebase(orig, *args, **kwargs):
1195 kwargs['inmemory'] = False
1195 kwargs['inmemory'] = False
1196 return orig(*args, **kwargs)
1196 return orig(*args, **kwargs)
1197
1197
1198 extensions.wrapfunction(rebase, '_dorebase', _dorebase)
1198 extensions.wrapfunction(rebase, '_dorebase', _dorebase)
1199
1199
1200
1200
1201 @eh.wrapcommand(b'archive')
1201 @eh.wrapcommand(b'archive')
1202 def overridearchivecmd(orig, ui, repo, dest, **opts):
1202 def overridearchivecmd(orig, ui, repo, dest, **opts):
1203 with lfstatus(repo.unfiltered()):
1203 with lfstatus(repo.unfiltered()):
1204 return orig(ui, repo.unfiltered(), dest, **opts)
1204 return orig(ui, repo.unfiltered(), dest, **opts)
1205
1205
1206
1206
1207 @eh.wrapfunction(webcommands, 'archive')
1207 @eh.wrapfunction(webcommands, 'archive')
1208 def hgwebarchive(orig, web):
1208 def hgwebarchive(orig, web):
1209 with lfstatus(web.repo):
1209 with lfstatus(web.repo):
1210 return orig(web)
1210 return orig(web)
1211
1211
1212
1212
1213 @eh.wrapfunction(archival, 'archive')
1213 @eh.wrapfunction(archival, 'archive')
1214 def overridearchive(
1214 def overridearchive(
1215 orig,
1215 orig,
1216 repo,
1216 repo,
1217 dest,
1217 dest,
1218 node,
1218 node,
1219 kind,
1219 kind,
1220 decode=True,
1220 decode=True,
1221 match=None,
1221 match=None,
1222 prefix=b'',
1222 prefix=b'',
1223 mtime=None,
1223 mtime=None,
1224 subrepos=None,
1224 subrepos=None,
1225 ):
1225 ):
1226 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1226 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1227 # unfiltered repo's attr, so check that as well.
1227 # unfiltered repo's attr, so check that as well.
1228 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1228 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1229 return orig(
1229 return orig(
1230 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1230 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1231 )
1231 )
1232
1232
1233 # No need to lock because we are only reading history and
1233 # No need to lock because we are only reading history and
1234 # largefile caches, neither of which are modified.
1234 # largefile caches, neither of which are modified.
1235 if node is not None:
1235 if node is not None:
1236 lfcommands.cachelfiles(repo.ui, repo, node)
1236 lfcommands.cachelfiles(repo.ui, repo, node)
1237
1237
1238 if kind not in archival.archivers:
1238 if kind not in archival.archivers:
1239 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1239 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1240
1240
1241 ctx = repo[node]
1241 ctx = repo[node]
1242
1242
1243 if kind == b'files':
1243 if kind == b'files':
1244 if prefix:
1244 if prefix:
1245 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1245 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1246 else:
1246 else:
1247 prefix = archival.tidyprefix(dest, kind, prefix)
1247 prefix = archival.tidyprefix(dest, kind, prefix)
1248
1248
1249 def write(name, mode, islink, getdata):
1249 def write(name, mode, islink, getdata):
1250 if match and not match(name):
1250 if match and not match(name):
1251 return
1251 return
1252 data = getdata()
1252 data = getdata()
1253 if decode:
1253 if decode:
1254 data = repo.wwritedata(name, data)
1254 data = repo.wwritedata(name, data)
1255 archiver.addfile(prefix + name, mode, islink, data)
1255 archiver.addfile(prefix + name, mode, islink, data)
1256
1256
1257 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1257 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1258
1258
1259 if repo.ui.configbool(b"ui", b"archivemeta"):
1259 if repo.ui.configbool(b"ui", b"archivemeta"):
1260 write(
1260 write(
1261 b'.hg_archival.txt',
1261 b'.hg_archival.txt',
1262 0o644,
1262 0o644,
1263 False,
1263 False,
1264 lambda: archival.buildmetadata(ctx),
1264 lambda: archival.buildmetadata(ctx),
1265 )
1265 )
1266
1266
1267 for f in ctx:
1267 for f in ctx:
1268 ff = ctx.flags(f)
1268 ff = ctx.flags(f)
1269 getdata = ctx[f].data
1269 getdata = ctx[f].data
1270 lfile = lfutil.splitstandin(f)
1270 lfile = lfutil.splitstandin(f)
1271 if lfile is not None:
1271 if lfile is not None:
1272 if node is not None:
1272 if node is not None:
1273 path = lfutil.findfile(repo, getdata().strip())
1273 path = lfutil.findfile(repo, getdata().strip())
1274
1274
1275 if path is None:
1275 if path is None:
1276 raise error.Abort(
1276 raise error.Abort(
1277 _(
1277 _(
1278 b'largefile %s not found in repo store or system cache'
1278 b'largefile %s not found in repo store or system cache'
1279 )
1279 )
1280 % lfile
1280 % lfile
1281 )
1281 )
1282 else:
1282 else:
1283 path = lfile
1283 path = lfile
1284
1284
1285 f = lfile
1285 f = lfile
1286
1286
1287 getdata = lambda: util.readfile(path)
1287 getdata = lambda: util.readfile(path)
1288 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1288 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1289
1289
1290 if subrepos:
1290 if subrepos:
1291 for subpath in sorted(ctx.substate):
1291 for subpath in sorted(ctx.substate):
1292 sub = ctx.workingsub(subpath)
1292 sub = ctx.workingsub(subpath)
1293 submatch = matchmod.subdirmatcher(subpath, match)
1293 submatch = matchmod.subdirmatcher(subpath, match)
1294 subprefix = prefix + subpath + b'/'
1294 subprefix = prefix + subpath + b'/'
1295
1295
1296 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1296 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1297 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1297 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1298 # allow only hgsubrepos to set this, instead of the current scheme
1298 # allow only hgsubrepos to set this, instead of the current scheme
1299 # where the parent sets this for the child.
1299 # where the parent sets this for the child.
1300 with (
1300 with (
1301 util.safehasattr(sub, '_repo')
1301 util.safehasattr(sub, '_repo')
1302 and lfstatus(sub._repo)
1302 and lfstatus(sub._repo)
1303 or util.nullcontextmanager()
1303 or util.nullcontextmanager()
1304 ):
1304 ):
1305 sub.archive(archiver, subprefix, submatch)
1305 sub.archive(archiver, subprefix, submatch)
1306
1306
1307 archiver.done()
1307 archiver.done()
1308
1308
1309
1309
1310 @eh.wrapfunction(subrepo.hgsubrepo, 'archive')
1310 @eh.wrapfunction(subrepo.hgsubrepo, 'archive')
1311 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1311 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1312 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1312 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1313 if not lfenabled or not repo._repo.lfstatus:
1313 if not lfenabled or not repo._repo.lfstatus:
1314 return orig(repo, archiver, prefix, match, decode)
1314 return orig(repo, archiver, prefix, match, decode)
1315
1315
1316 repo._get(repo._state + (b'hg',))
1316 repo._get(repo._state + (b'hg',))
1317 rev = repo._state[1]
1317 rev = repo._state[1]
1318 ctx = repo._repo[rev]
1318 ctx = repo._repo[rev]
1319
1319
1320 if ctx.node() is not None:
1320 if ctx.node() is not None:
1321 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1321 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1322
1322
1323 def write(name, mode, islink, getdata):
1323 def write(name, mode, islink, getdata):
1324 # At this point, the standin has been replaced with the largefile name,
1324 # At this point, the standin has been replaced with the largefile name,
1325 # so the normal matcher works here without the lfutil variants.
1325 # so the normal matcher works here without the lfutil variants.
1326 if match and not match(f):
1326 if match and not match(f):
1327 return
1327 return
1328 data = getdata()
1328 data = getdata()
1329 if decode:
1329 if decode:
1330 data = repo._repo.wwritedata(name, data)
1330 data = repo._repo.wwritedata(name, data)
1331
1331
1332 archiver.addfile(prefix + name, mode, islink, data)
1332 archiver.addfile(prefix + name, mode, islink, data)
1333
1333
1334 for f in ctx:
1334 for f in ctx:
1335 ff = ctx.flags(f)
1335 ff = ctx.flags(f)
1336 getdata = ctx[f].data
1336 getdata = ctx[f].data
1337 lfile = lfutil.splitstandin(f)
1337 lfile = lfutil.splitstandin(f)
1338 if lfile is not None:
1338 if lfile is not None:
1339 if ctx.node() is not None:
1339 if ctx.node() is not None:
1340 path = lfutil.findfile(repo._repo, getdata().strip())
1340 path = lfutil.findfile(repo._repo, getdata().strip())
1341
1341
1342 if path is None:
1342 if path is None:
1343 raise error.Abort(
1343 raise error.Abort(
1344 _(
1344 _(
1345 b'largefile %s not found in repo store or system cache'
1345 b'largefile %s not found in repo store or system cache'
1346 )
1346 )
1347 % lfile
1347 % lfile
1348 )
1348 )
1349 else:
1349 else:
1350 path = lfile
1350 path = lfile
1351
1351
1352 f = lfile
1352 f = lfile
1353
1353
1354 getdata = lambda: util.readfile(os.path.join(prefix, path))
1354 getdata = lambda: util.readfile(os.path.join(prefix, path))
1355
1355
1356 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1356 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1357
1357
1358 for subpath in sorted(ctx.substate):
1358 for subpath in sorted(ctx.substate):
1359 sub = ctx.workingsub(subpath)
1359 sub = ctx.workingsub(subpath)
1360 submatch = matchmod.subdirmatcher(subpath, match)
1360 submatch = matchmod.subdirmatcher(subpath, match)
1361 subprefix = prefix + subpath + b'/'
1361 subprefix = prefix + subpath + b'/'
1362 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1362 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1363 # infer and possibly set lfstatus at the top of this function. That
1363 # infer and possibly set lfstatus at the top of this function. That
1364 # would allow only hgsubrepos to set this, instead of the current scheme
1364 # would allow only hgsubrepos to set this, instead of the current scheme
1365 # where the parent sets this for the child.
1365 # where the parent sets this for the child.
1366 with (
1366 with (
1367 util.safehasattr(sub, '_repo')
1367 util.safehasattr(sub, '_repo')
1368 and lfstatus(sub._repo)
1368 and lfstatus(sub._repo)
1369 or util.nullcontextmanager()
1369 or util.nullcontextmanager()
1370 ):
1370 ):
1371 sub.archive(archiver, subprefix, submatch, decode)
1371 sub.archive(archiver, subprefix, submatch, decode)
1372
1372
1373
1373
1374 # If a largefile is modified, the change is not reflected in its
1374 # If a largefile is modified, the change is not reflected in its
1375 # standin until a commit. cmdutil.bailifchanged() raises an exception
1375 # standin until a commit. cmdutil.bailifchanged() raises an exception
1376 # if the repo has uncommitted changes. Wrap it to also check if
1376 # if the repo has uncommitted changes. Wrap it to also check if
1377 # largefiles were changed. This is used by bisect, backout and fetch.
1377 # largefiles were changed. This is used by bisect, backout and fetch.
1378 @eh.wrapfunction(cmdutil, 'bailifchanged')
1378 @eh.wrapfunction(cmdutil, 'bailifchanged')
1379 def overridebailifchanged(orig, repo, *args, **kwargs):
1379 def overridebailifchanged(orig, repo, *args, **kwargs):
1380 orig(repo, *args, **kwargs)
1380 orig(repo, *args, **kwargs)
1381 with lfstatus(repo):
1381 with lfstatus(repo):
1382 s = repo.status()
1382 s = repo.status()
1383 if s.modified or s.added or s.removed or s.deleted:
1383 if s.modified or s.added or s.removed or s.deleted:
1384 raise error.Abort(_(b'uncommitted changes'))
1384 raise error.Abort(_(b'uncommitted changes'))
1385
1385
1386
1386
1387 @eh.wrapfunction(cmdutil, 'postcommitstatus')
1387 @eh.wrapfunction(cmdutil, 'postcommitstatus')
1388 def postcommitstatus(orig, repo, *args, **kwargs):
1388 def postcommitstatus(orig, repo, *args, **kwargs):
1389 with lfstatus(repo):
1389 with lfstatus(repo):
1390 return orig(repo, *args, **kwargs)
1390 return orig(repo, *args, **kwargs)
1391
1391
1392
1392
1393 @eh.wrapfunction(cmdutil, 'forget')
1393 @eh.wrapfunction(cmdutil, 'forget')
1394 def cmdutilforget(
1394 def cmdutilforget(
1395 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1395 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1396 ):
1396 ):
1397 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1397 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1398 bad, forgot = orig(
1398 bad, forgot = orig(
1399 ui,
1399 ui,
1400 repo,
1400 repo,
1401 normalmatcher,
1401 normalmatcher,
1402 prefix,
1402 prefix,
1403 uipathfn,
1403 uipathfn,
1404 explicitonly,
1404 explicitonly,
1405 dryrun,
1405 dryrun,
1406 interactive,
1406 interactive,
1407 )
1407 )
1408 m = composelargefilematcher(match, repo[None].manifest())
1408 m = composelargefilematcher(match, repo[None].manifest())
1409
1409
1410 with lfstatus(repo):
1410 with lfstatus(repo):
1411 s = repo.status(match=m, clean=True)
1411 s = repo.status(match=m, clean=True)
1412 manifest = repo[None].manifest()
1412 manifest = repo[None].manifest()
1413 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1413 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1414 forget = [f for f in forget if lfutil.standin(f) in manifest]
1414 forget = [f for f in forget if lfutil.standin(f) in manifest]
1415
1415
1416 for f in forget:
1416 for f in forget:
1417 fstandin = lfutil.standin(f)
1417 fstandin = lfutil.standin(f)
1418 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1418 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1419 ui.warn(
1419 ui.warn(
1420 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1420 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1421 )
1421 )
1422 bad.append(f)
1422 bad.append(f)
1423
1423
1424 for f in forget:
1424 for f in forget:
1425 if ui.verbose or not m.exact(f):
1425 if ui.verbose or not m.exact(f):
1426 ui.status(_(b'removing %s\n') % uipathfn(f))
1426 ui.status(_(b'removing %s\n') % uipathfn(f))
1427
1427
1428 # Need to lock because standin files are deleted then removed from the
1428 # Need to lock because standin files are deleted then removed from the
1429 # repository and we could race in-between.
1429 # repository and we could race in-between.
1430 with repo.wlock():
1430 with repo.wlock():
1431 lfdirstate = lfutil.openlfdirstate(ui, repo)
1431 lfdirstate = lfutil.openlfdirstate(ui, repo)
1432 for f in forget:
1432 for f in forget:
1433 lfdirstate.set_untracked(f)
1433 lfdirstate.set_untracked(f)
1434 lfdirstate.write(repo.currenttransaction())
1434 lfdirstate.write(repo.currenttransaction())
1435 standins = [lfutil.standin(f) for f in forget]
1435 standins = [lfutil.standin(f) for f in forget]
1436 for f in standins:
1436 for f in standins:
1437 repo.wvfs.unlinkpath(f, ignoremissing=True)
1437 repo.wvfs.unlinkpath(f, ignoremissing=True)
1438 rejected = repo[None].forget(standins)
1438 rejected = repo[None].forget(standins)
1439
1439
1440 bad.extend(f for f in rejected if f in m.files())
1440 bad.extend(f for f in rejected if f in m.files())
1441 forgot.extend(f for f in forget if f not in rejected)
1441 forgot.extend(f for f in forget if f not in rejected)
1442 return bad, forgot
1442 return bad, forgot
1443
1443
1444
1444
1445 def _getoutgoings(repo, other, missing, addfunc):
1445 def _getoutgoings(repo, other, missing, addfunc):
1446 """get pairs of filename and largefile hash in outgoing revisions
1446 """get pairs of filename and largefile hash in outgoing revisions
1447 in 'missing'.
1447 in 'missing'.
1448
1448
1449 largefiles already existing on 'other' repository are ignored.
1449 largefiles already existing on 'other' repository are ignored.
1450
1450
1451 'addfunc' is invoked with each unique pairs of filename and
1451 'addfunc' is invoked with each unique pairs of filename and
1452 largefile hash value.
1452 largefile hash value.
1453 """
1453 """
1454 knowns = set()
1454 knowns = set()
1455 lfhashes = set()
1455 lfhashes = set()
1456
1456
1457 def dedup(fn, lfhash):
1457 def dedup(fn, lfhash):
1458 k = (fn, lfhash)
1458 k = (fn, lfhash)
1459 if k not in knowns:
1459 if k not in knowns:
1460 knowns.add(k)
1460 knowns.add(k)
1461 lfhashes.add(lfhash)
1461 lfhashes.add(lfhash)
1462
1462
1463 lfutil.getlfilestoupload(repo, missing, dedup)
1463 lfutil.getlfilestoupload(repo, missing, dedup)
1464 if lfhashes:
1464 if lfhashes:
1465 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1465 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1466 for fn, lfhash in knowns:
1466 for fn, lfhash in knowns:
1467 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1467 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1468 addfunc(fn, lfhash)
1468 addfunc(fn, lfhash)
1469
1469
1470
1470
1471 def outgoinghook(ui, repo, other, opts, missing):
1471 def outgoinghook(ui, repo, other, opts, missing):
1472 if opts.pop(b'large', None):
1472 if opts.pop(b'large', None):
1473 lfhashes = set()
1473 lfhashes = set()
1474 if ui.debugflag:
1474 if ui.debugflag:
1475 toupload = {}
1475 toupload = {}
1476
1476
1477 def addfunc(fn, lfhash):
1477 def addfunc(fn, lfhash):
1478 if fn not in toupload:
1478 if fn not in toupload:
1479 toupload[fn] = [] # pytype: disable=unsupported-operands
1479 toupload[fn] = [] # pytype: disable=unsupported-operands
1480 toupload[fn].append(lfhash)
1480 toupload[fn].append(lfhash)
1481 lfhashes.add(lfhash)
1481 lfhashes.add(lfhash)
1482
1482
1483 def showhashes(fn):
1483 def showhashes(fn):
1484 for lfhash in sorted(toupload[fn]):
1484 for lfhash in sorted(toupload[fn]):
1485 ui.debug(b' %s\n' % lfhash)
1485 ui.debug(b' %s\n' % lfhash)
1486
1486
1487 else:
1487 else:
1488 toupload = set()
1488 toupload = set()
1489
1489
1490 def addfunc(fn, lfhash):
1490 def addfunc(fn, lfhash):
1491 toupload.add(fn)
1491 toupload.add(fn)
1492 lfhashes.add(lfhash)
1492 lfhashes.add(lfhash)
1493
1493
1494 def showhashes(fn):
1494 def showhashes(fn):
1495 pass
1495 pass
1496
1496
1497 _getoutgoings(repo, other, missing, addfunc)
1497 _getoutgoings(repo, other, missing, addfunc)
1498
1498
1499 if not toupload:
1499 if not toupload:
1500 ui.status(_(b'largefiles: no files to upload\n'))
1500 ui.status(_(b'largefiles: no files to upload\n'))
1501 else:
1501 else:
1502 ui.status(
1502 ui.status(
1503 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1503 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1504 )
1504 )
1505 for file in sorted(toupload):
1505 for file in sorted(toupload):
1506 ui.status(lfutil.splitstandin(file) + b'\n')
1506 ui.status(lfutil.splitstandin(file) + b'\n')
1507 showhashes(file)
1507 showhashes(file)
1508 ui.status(b'\n')
1508 ui.status(b'\n')
1509
1509
1510
1510
1511 @eh.wrapcommand(
1511 @eh.wrapcommand(
1512 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1512 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1513 )
1513 )
1514 def _outgoingcmd(orig, *args, **kwargs):
1514 def _outgoingcmd(orig, *args, **kwargs):
1515 # Nothing to do here other than add the extra help option- the hook above
1515 # Nothing to do here other than add the extra help option- the hook above
1516 # processes it.
1516 # processes it.
1517 return orig(*args, **kwargs)
1517 return orig(*args, **kwargs)
1518
1518
1519
1519
1520 def summaryremotehook(ui, repo, opts, changes):
1520 def summaryremotehook(ui, repo, opts, changes):
1521 largeopt = opts.get(b'large', False)
1521 largeopt = opts.get(b'large', False)
1522 if changes is None:
1522 if changes is None:
1523 if largeopt:
1523 if largeopt:
1524 return (False, True) # only outgoing check is needed
1524 return (False, True) # only outgoing check is needed
1525 else:
1525 else:
1526 return (False, False)
1526 return (False, False)
1527 elif largeopt:
1527 elif largeopt:
1528 url, branch, peer, outgoing = changes[1]
1528 url, branch, peer, outgoing = changes[1]
1529 if peer is None:
1529 if peer is None:
1530 # i18n: column positioning for "hg summary"
1530 # i18n: column positioning for "hg summary"
1531 ui.status(_(b'largefiles: (no remote repo)\n'))
1531 ui.status(_(b'largefiles: (no remote repo)\n'))
1532 return
1532 return
1533
1533
1534 toupload = set()
1534 toupload = set()
1535 lfhashes = set()
1535 lfhashes = set()
1536
1536
1537 def addfunc(fn, lfhash):
1537 def addfunc(fn, lfhash):
1538 toupload.add(fn)
1538 toupload.add(fn)
1539 lfhashes.add(lfhash)
1539 lfhashes.add(lfhash)
1540
1540
1541 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1541 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1542
1542
1543 if not toupload:
1543 if not toupload:
1544 # i18n: column positioning for "hg summary"
1544 # i18n: column positioning for "hg summary"
1545 ui.status(_(b'largefiles: (no files to upload)\n'))
1545 ui.status(_(b'largefiles: (no files to upload)\n'))
1546 else:
1546 else:
1547 # i18n: column positioning for "hg summary"
1547 # i18n: column positioning for "hg summary"
1548 ui.status(
1548 ui.status(
1549 _(b'largefiles: %d entities for %d files to upload\n')
1549 _(b'largefiles: %d entities for %d files to upload\n')
1550 % (len(lfhashes), len(toupload))
1550 % (len(lfhashes), len(toupload))
1551 )
1551 )
1552
1552
1553
1553
1554 @eh.wrapcommand(
1554 @eh.wrapcommand(
1555 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1555 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1556 )
1556 )
1557 def overridesummary(orig, ui, repo, *pats, **opts):
1557 def overridesummary(orig, ui, repo, *pats, **opts):
1558 with lfstatus(repo):
1558 with lfstatus(repo):
1559 orig(ui, repo, *pats, **opts)
1559 orig(ui, repo, *pats, **opts)
1560
1560
1561
1561
1562 @eh.wrapfunction(scmutil, 'addremove')
1562 @eh.wrapfunction(scmutil, 'addremove')
1563 def scmutiladdremove(
1563 def scmutiladdremove(
1564 orig,
1564 orig,
1565 repo,
1565 repo,
1566 matcher,
1566 matcher,
1567 prefix,
1567 prefix,
1568 uipathfn,
1568 uipathfn,
1569 opts=None,
1569 opts=None,
1570 open_tr=None,
1570 open_tr=None,
1571 ):
1571 ):
1572 if opts is None:
1572 if opts is None:
1573 opts = {}
1573 opts = {}
1574 if not lfutil.islfilesrepo(repo):
1574 if not lfutil.islfilesrepo(repo):
1575 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1575 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1576
1576
1577 # open the transaction and changing_files context
1577 # open the transaction and changing_files context
1578 if open_tr is not None:
1578 if open_tr is not None:
1579 open_tr()
1579 open_tr()
1580
1580
1581 # Get the list of missing largefiles so we can remove them
1581 # Get the list of missing largefiles so we can remove them
1582 with repo.dirstate.running_status(repo):
1582 with repo.dirstate.running_status(repo):
1583 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1583 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1584 unsure, s, mtime_boundary = lfdirstate.status(
1584 unsure, s, mtime_boundary = lfdirstate.status(
1585 matchmod.always(),
1585 matchmod.always(),
1586 subrepos=[],
1586 subrepos=[],
1587 ignored=False,
1587 ignored=False,
1588 clean=False,
1588 clean=False,
1589 unknown=False,
1589 unknown=False,
1590 )
1590 )
1591
1591
1592 # Call into the normal remove code, but the removing of the standin, we want
1592 # Call into the normal remove code, but the removing of the standin, we want
1593 # to have handled by original addremove. Monkey patching here makes sure
1593 # to have handled by original addremove. Monkey patching here makes sure
1594 # we don't remove the standin in the largefiles code, preventing a very
1594 # we don't remove the standin in the largefiles code, preventing a very
1595 # confused state later.
1595 # confused state later.
1596 if s.deleted:
1596 if s.deleted:
1597 m = copy.copy(matcher)
1597 m = copy.copy(matcher)
1598
1598
1599 # The m._files and m._map attributes are not changed to the deleted list
1599 # The m._files and m._map attributes are not changed to the deleted list
1600 # because that affects the m.exact() test, which in turn governs whether
1600 # because that affects the m.exact() test, which in turn governs whether
1601 # or not the file name is printed, and how. Simply limit the original
1601 # or not the file name is printed, and how. Simply limit the original
1602 # matches to those in the deleted status list.
1602 # matches to those in the deleted status list.
1603 matchfn = m.matchfn
1603 matchfn = m.matchfn
1604 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1604 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1605
1605
1606 removelargefiles(
1606 removelargefiles(
1607 repo.ui,
1607 repo.ui,
1608 repo,
1608 repo,
1609 True,
1609 True,
1610 m,
1610 m,
1611 uipathfn,
1611 uipathfn,
1612 opts.get(b'dry_run'),
1612 opts.get(b'dry_run'),
1613 **pycompat.strkwargs(opts)
1613 **pycompat.strkwargs(opts)
1614 )
1614 )
1615 # Call into the normal add code, and any files that *should* be added as
1615 # Call into the normal add code, and any files that *should* be added as
1616 # largefiles will be
1616 # largefiles will be
1617 added, bad = addlargefiles(
1617 added, bad = addlargefiles(
1618 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1618 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1619 )
1619 )
1620 # Now that we've handled largefiles, hand off to the original addremove
1620 # Now that we've handled largefiles, hand off to the original addremove
1621 # function to take care of the rest. Make sure it doesn't do anything with
1621 # function to take care of the rest. Make sure it doesn't do anything with
1622 # largefiles by passing a matcher that will ignore them.
1622 # largefiles by passing a matcher that will ignore them.
1623 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1623 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1624
1624
1625 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1625 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1626
1626
1627
1627
1628 # Calling purge with --all will cause the largefiles to be deleted.
1628 # Calling purge with --all will cause the largefiles to be deleted.
1629 # Override repo.status to prevent this from happening.
1629 # Override repo.status to prevent this from happening.
1630 @eh.wrapcommand(b'purge')
1630 @eh.wrapcommand(b'purge')
1631 def overridepurge(orig, ui, repo, *dirs, **opts):
1631 def overridepurge(orig, ui, repo, *dirs, **opts):
1632 # XXX Monkey patching a repoview will not work. The assigned attribute will
1632 # XXX Monkey patching a repoview will not work. The assigned attribute will
1633 # be set on the unfiltered repo, but we will only lookup attributes in the
1633 # be set on the unfiltered repo, but we will only lookup attributes in the
1634 # unfiltered repo if the lookup in the repoview object itself fails. As the
1634 # unfiltered repo if the lookup in the repoview object itself fails. As the
1635 # monkey patched method exists on the repoview class the lookup will not
1635 # monkey patched method exists on the repoview class the lookup will not
1636 # fail. As a result, the original version will shadow the monkey patched
1636 # fail. As a result, the original version will shadow the monkey patched
1637 # one, defeating the monkey patch.
1637 # one, defeating the monkey patch.
1638 #
1638 #
1639 # As a work around we use an unfiltered repo here. We should do something
1639 # As a work around we use an unfiltered repo here. We should do something
1640 # cleaner instead.
1640 # cleaner instead.
1641 repo = repo.unfiltered()
1641 repo = repo.unfiltered()
1642 oldstatus = repo.status
1642 oldstatus = repo.status
1643
1643
1644 def overridestatus(
1644 def overridestatus(
1645 node1=b'.',
1645 node1=b'.',
1646 node2=None,
1646 node2=None,
1647 match=None,
1647 match=None,
1648 ignored=False,
1648 ignored=False,
1649 clean=False,
1649 clean=False,
1650 unknown=False,
1650 unknown=False,
1651 listsubrepos=False,
1651 listsubrepos=False,
1652 ):
1652 ):
1653 r = oldstatus(
1653 r = oldstatus(
1654 node1, node2, match, ignored, clean, unknown, listsubrepos
1654 node1, node2, match, ignored, clean, unknown, listsubrepos
1655 )
1655 )
1656 lfdirstate = lfutil.openlfdirstate(ui, repo)
1656 lfdirstate = lfutil.openlfdirstate(ui, repo)
1657 unknown = [
1657 unknown = [
1658 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1658 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1659 ]
1659 ]
1660 ignored = [
1660 ignored = [
1661 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1661 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1662 ]
1662 ]
1663 return scmutil.status(
1663 return scmutil.status(
1664 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1664 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1665 )
1665 )
1666
1666
1667 repo.status = overridestatus
1667 repo.status = overridestatus
1668 orig(ui, repo, *dirs, **opts)
1668 orig(ui, repo, *dirs, **opts)
1669 repo.status = oldstatus
1669 repo.status = oldstatus
1670
1670
1671
1671
1672 @eh.wrapcommand(b'rollback')
1672 @eh.wrapcommand(b'rollback')
1673 def overriderollback(orig, ui, repo, **opts):
1673 def overriderollback(orig, ui, repo, **opts):
1674 with repo.wlock():
1674 with repo.wlock():
1675 before = repo.dirstate.parents()
1675 before = repo.dirstate.parents()
1676 orphans = {
1676 orphans = {
1677 f
1677 f
1678 for f in repo.dirstate
1678 for f in repo.dirstate
1679 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1679 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1680 }
1680 }
1681 result = orig(ui, repo, **opts)
1681 result = orig(ui, repo, **opts)
1682 after = repo.dirstate.parents()
1682 after = repo.dirstate.parents()
1683 if before == after:
1683 if before == after:
1684 return result # no need to restore standins
1684 return result # no need to restore standins
1685
1685
1686 pctx = repo[b'.']
1686 pctx = repo[b'.']
1687 for f in repo.dirstate:
1687 for f in repo.dirstate:
1688 if lfutil.isstandin(f):
1688 if lfutil.isstandin(f):
1689 orphans.discard(f)
1689 orphans.discard(f)
1690 if repo.dirstate.get_entry(f).removed:
1690 if repo.dirstate.get_entry(f).removed:
1691 repo.wvfs.unlinkpath(f, ignoremissing=True)
1691 repo.wvfs.unlinkpath(f, ignoremissing=True)
1692 elif f in pctx:
1692 elif f in pctx:
1693 fctx = pctx[f]
1693 fctx = pctx[f]
1694 repo.wwrite(f, fctx.data(), fctx.flags())
1694 repo.wwrite(f, fctx.data(), fctx.flags())
1695 else:
1695 else:
1696 # content of standin is not so important in 'a',
1696 # content of standin is not so important in 'a',
1697 # 'm' or 'n' (coming from the 2nd parent) cases
1697 # 'm' or 'n' (coming from the 2nd parent) cases
1698 lfutil.writestandin(repo, f, b'', False)
1698 lfutil.writestandin(repo, f, b'', False)
1699 for standin in orphans:
1699 for standin in orphans:
1700 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1700 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1701
1701
1702 return result
1702 return result
1703
1703
1704
1704
1705 @eh.wrapcommand(b'transplant', extension=b'transplant')
1705 @eh.wrapcommand(b'transplant', extension=b'transplant')
1706 def overridetransplant(orig, ui, repo, *revs, **opts):
1706 def overridetransplant(orig, ui, repo, *revs, **opts):
1707 resuming = opts.get('continue')
1707 resuming = opts.get('continue')
1708 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1708 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1709 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1709 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1710 try:
1710 try:
1711 result = orig(ui, repo, *revs, **opts)
1711 result = orig(ui, repo, *revs, **opts)
1712 finally:
1712 finally:
1713 repo._lfstatuswriters.pop()
1713 repo._lfstatuswriters.pop()
1714 repo._lfcommithooks.pop()
1714 repo._lfcommithooks.pop()
1715 return result
1715 return result
1716
1716
1717
1717
1718 @eh.wrapcommand(b'cat')
1718 @eh.wrapcommand(b'cat')
1719 def overridecat(orig, ui, repo, file1, *pats, **opts):
1719 def overridecat(orig, ui, repo, file1, *pats, **opts):
1720 opts = pycompat.byteskwargs(opts)
1720 ctx = logcmdutil.revsingle(repo, opts.get('rev'))
1721 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1722 err = 1
1721 err = 1
1723 notbad = set()
1722 notbad = set()
1724 m = scmutil.match(ctx, (file1,) + pats, opts)
1723 m = scmutil.match(ctx, (file1,) + pats, pycompat.byteskwargs(opts))
1725 origmatchfn = m.matchfn
1724 origmatchfn = m.matchfn
1726
1725
1727 def lfmatchfn(f):
1726 def lfmatchfn(f):
1728 if origmatchfn(f):
1727 if origmatchfn(f):
1729 return True
1728 return True
1730 lf = lfutil.splitstandin(f)
1729 lf = lfutil.splitstandin(f)
1731 if lf is None:
1730 if lf is None:
1732 return False
1731 return False
1733 notbad.add(lf)
1732 notbad.add(lf)
1734 return origmatchfn(lf)
1733 return origmatchfn(lf)
1735
1734
1736 m.matchfn = lfmatchfn
1735 m.matchfn = lfmatchfn
1737 origbadfn = m.bad
1736 origbadfn = m.bad
1738
1737
1739 def lfbadfn(f, msg):
1738 def lfbadfn(f, msg):
1740 if not f in notbad:
1739 if not f in notbad:
1741 origbadfn(f, msg)
1740 origbadfn(f, msg)
1742
1741
1743 m.bad = lfbadfn
1742 m.bad = lfbadfn
1744
1743
1745 origvisitdirfn = m.visitdir
1744 origvisitdirfn = m.visitdir
1746
1745
1747 def lfvisitdirfn(dir):
1746 def lfvisitdirfn(dir):
1748 if dir == lfutil.shortname:
1747 if dir == lfutil.shortname:
1749 return True
1748 return True
1750 ret = origvisitdirfn(dir)
1749 ret = origvisitdirfn(dir)
1751 if ret:
1750 if ret:
1752 return ret
1751 return ret
1753 lf = lfutil.splitstandin(dir)
1752 lf = lfutil.splitstandin(dir)
1754 if lf is None:
1753 if lf is None:
1755 return False
1754 return False
1756 return origvisitdirfn(lf)
1755 return origvisitdirfn(lf)
1757
1756
1758 m.visitdir = lfvisitdirfn
1757 m.visitdir = lfvisitdirfn
1759
1758
1760 for f in ctx.walk(m):
1759 for f in ctx.walk(m):
1761 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1760 with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp:
1762 lf = lfutil.splitstandin(f)
1761 lf = lfutil.splitstandin(f)
1763 if lf is None or origmatchfn(f):
1762 if lf is None or origmatchfn(f):
1764 # duplicating unreachable code from commands.cat
1763 # duplicating unreachable code from commands.cat
1765 data = ctx[f].data()
1764 data = ctx[f].data()
1766 if opts.get(b'decode'):
1765 if opts.get('decode'):
1767 data = repo.wwritedata(f, data)
1766 data = repo.wwritedata(f, data)
1768 fp.write(data)
1767 fp.write(data)
1769 else:
1768 else:
1770 hash = lfutil.readasstandin(ctx[f])
1769 hash = lfutil.readasstandin(ctx[f])
1771 if not lfutil.inusercache(repo.ui, hash):
1770 if not lfutil.inusercache(repo.ui, hash):
1772 store = storefactory.openstore(repo)
1771 store = storefactory.openstore(repo)
1773 success, missing = store.get([(lf, hash)])
1772 success, missing = store.get([(lf, hash)])
1774 if len(success) != 1:
1773 if len(success) != 1:
1775 raise error.Abort(
1774 raise error.Abort(
1776 _(
1775 _(
1777 b'largefile %s is not in cache and could not be '
1776 b'largefile %s is not in cache and could not be '
1778 b'downloaded'
1777 b'downloaded'
1779 )
1778 )
1780 % lf
1779 % lf
1781 )
1780 )
1782 path = lfutil.usercachepath(repo.ui, hash)
1781 path = lfutil.usercachepath(repo.ui, hash)
1783 with open(path, b"rb") as fpin:
1782 with open(path, b"rb") as fpin:
1784 for chunk in util.filechunkiter(fpin):
1783 for chunk in util.filechunkiter(fpin):
1785 fp.write(chunk)
1784 fp.write(chunk)
1786 err = 0
1785 err = 0
1787 return err
1786 return err
1788
1787
1789
1788
1790 @eh.wrapfunction(merge, '_update')
1789 @eh.wrapfunction(merge, '_update')
1791 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1790 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1792 matcher = kwargs.get('matcher', None)
1791 matcher = kwargs.get('matcher', None)
1793 # note if this is a partial update
1792 # note if this is a partial update
1794 partial = matcher and not matcher.always()
1793 partial = matcher and not matcher.always()
1795 with repo.wlock(), repo.dirstate.changing_parents(repo):
1794 with repo.wlock(), repo.dirstate.changing_parents(repo):
1796 # branch | | |
1795 # branch | | |
1797 # merge | force | partial | action
1796 # merge | force | partial | action
1798 # -------+-------+---------+--------------
1797 # -------+-------+---------+--------------
1799 # x | x | x | linear-merge
1798 # x | x | x | linear-merge
1800 # o | x | x | branch-merge
1799 # o | x | x | branch-merge
1801 # x | o | x | overwrite (as clean update)
1800 # x | o | x | overwrite (as clean update)
1802 # o | o | x | force-branch-merge (*1)
1801 # o | o | x | force-branch-merge (*1)
1803 # x | x | o | (*)
1802 # x | x | o | (*)
1804 # o | x | o | (*)
1803 # o | x | o | (*)
1805 # x | o | o | overwrite (as revert)
1804 # x | o | o | overwrite (as revert)
1806 # o | o | o | (*)
1805 # o | o | o | (*)
1807 #
1806 #
1808 # (*) don't care
1807 # (*) don't care
1809 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1808 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1810 with repo.dirstate.running_status(repo):
1809 with repo.dirstate.running_status(repo):
1811 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1810 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1812 unsure, s, mtime_boundary = lfdirstate.status(
1811 unsure, s, mtime_boundary = lfdirstate.status(
1813 matchmod.always(),
1812 matchmod.always(),
1814 subrepos=[],
1813 subrepos=[],
1815 ignored=False,
1814 ignored=False,
1816 clean=True,
1815 clean=True,
1817 unknown=False,
1816 unknown=False,
1818 )
1817 )
1819 oldclean = set(s.clean)
1818 oldclean = set(s.clean)
1820 pctx = repo[b'.']
1819 pctx = repo[b'.']
1821 dctx = repo[node]
1820 dctx = repo[node]
1822 for lfile in unsure + s.modified:
1821 for lfile in unsure + s.modified:
1823 lfileabs = repo.wvfs.join(lfile)
1822 lfileabs = repo.wvfs.join(lfile)
1824 if not repo.wvfs.exists(lfileabs):
1823 if not repo.wvfs.exists(lfileabs):
1825 continue
1824 continue
1826 lfhash = lfutil.hashfile(lfileabs)
1825 lfhash = lfutil.hashfile(lfileabs)
1827 standin = lfutil.standin(lfile)
1826 standin = lfutil.standin(lfile)
1828 lfutil.writestandin(
1827 lfutil.writestandin(
1829 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1828 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1830 )
1829 )
1831 if standin in pctx and lfhash == lfutil.readasstandin(
1830 if standin in pctx and lfhash == lfutil.readasstandin(
1832 pctx[standin]
1831 pctx[standin]
1833 ):
1832 ):
1834 oldclean.add(lfile)
1833 oldclean.add(lfile)
1835 for lfile in s.added:
1834 for lfile in s.added:
1836 fstandin = lfutil.standin(lfile)
1835 fstandin = lfutil.standin(lfile)
1837 if fstandin not in dctx:
1836 if fstandin not in dctx:
1838 # in this case, content of standin file is meaningless
1837 # in this case, content of standin file is meaningless
1839 # (in dctx, lfile is unknown, or normal file)
1838 # (in dctx, lfile is unknown, or normal file)
1840 continue
1839 continue
1841 lfutil.updatestandin(repo, lfile, fstandin)
1840 lfutil.updatestandin(repo, lfile, fstandin)
1842 # mark all clean largefiles as dirty, just in case the update gets
1841 # mark all clean largefiles as dirty, just in case the update gets
1843 # interrupted before largefiles and lfdirstate are synchronized
1842 # interrupted before largefiles and lfdirstate are synchronized
1844 for lfile in oldclean:
1843 for lfile in oldclean:
1845 entry = lfdirstate.get_entry(lfile)
1844 entry = lfdirstate.get_entry(lfile)
1846 lfdirstate.hacky_extension_update_file(
1845 lfdirstate.hacky_extension_update_file(
1847 lfile,
1846 lfile,
1848 wc_tracked=entry.tracked,
1847 wc_tracked=entry.tracked,
1849 p1_tracked=entry.p1_tracked,
1848 p1_tracked=entry.p1_tracked,
1850 p2_info=entry.p2_info,
1849 p2_info=entry.p2_info,
1851 possibly_dirty=True,
1850 possibly_dirty=True,
1852 )
1851 )
1853 lfdirstate.write(repo.currenttransaction())
1852 lfdirstate.write(repo.currenttransaction())
1854
1853
1855 oldstandins = lfutil.getstandinsstate(repo)
1854 oldstandins = lfutil.getstandinsstate(repo)
1856 wc = kwargs.get('wc')
1855 wc = kwargs.get('wc')
1857 if wc and wc.isinmemory():
1856 if wc and wc.isinmemory():
1858 # largefiles is not a good candidate for in-memory merge (large
1857 # largefiles is not a good candidate for in-memory merge (large
1859 # files, custom dirstate, matcher usage).
1858 # files, custom dirstate, matcher usage).
1860 raise error.ProgrammingError(
1859 raise error.ProgrammingError(
1861 b'largefiles is not compatible with in-memory merge'
1860 b'largefiles is not compatible with in-memory merge'
1862 )
1861 )
1863 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1862 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1864
1863
1865 newstandins = lfutil.getstandinsstate(repo)
1864 newstandins = lfutil.getstandinsstate(repo)
1866 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1865 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1867
1866
1868 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1867 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1869 # all the ones that didn't change as clean
1868 # all the ones that didn't change as clean
1870 for lfile in oldclean.difference(filelist):
1869 for lfile in oldclean.difference(filelist):
1871 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1870 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1872
1871
1873 if branchmerge or force or partial:
1872 if branchmerge or force or partial:
1874 filelist.extend(s.deleted + s.removed)
1873 filelist.extend(s.deleted + s.removed)
1875
1874
1876 lfcommands.updatelfiles(
1875 lfcommands.updatelfiles(
1877 repo.ui, repo, filelist=filelist, normallookup=partial
1876 repo.ui, repo, filelist=filelist, normallookup=partial
1878 )
1877 )
1879
1878
1880 return result
1879 return result
1881
1880
1882
1881
1883 @eh.wrapfunction(scmutil, 'marktouched')
1882 @eh.wrapfunction(scmutil, 'marktouched')
1884 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1883 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1885 result = orig(repo, files, *args, **kwargs)
1884 result = orig(repo, files, *args, **kwargs)
1886
1885
1887 filelist = []
1886 filelist = []
1888 for f in files:
1887 for f in files:
1889 lf = lfutil.splitstandin(f)
1888 lf = lfutil.splitstandin(f)
1890 if lf is not None:
1889 if lf is not None:
1891 filelist.append(lf)
1890 filelist.append(lf)
1892 if filelist:
1891 if filelist:
1893 lfcommands.updatelfiles(
1892 lfcommands.updatelfiles(
1894 repo.ui,
1893 repo.ui,
1895 repo,
1894 repo,
1896 filelist=filelist,
1895 filelist=filelist,
1897 printmessage=False,
1896 printmessage=False,
1898 normallookup=True,
1897 normallookup=True,
1899 )
1898 )
1900
1899
1901 return result
1900 return result
1902
1901
1903
1902
1904 @eh.wrapfunction(upgrade_actions, 'preservedrequirements')
1903 @eh.wrapfunction(upgrade_actions, 'preservedrequirements')
1905 @eh.wrapfunction(upgrade_actions, 'supporteddestrequirements')
1904 @eh.wrapfunction(upgrade_actions, 'supporteddestrequirements')
1906 def upgraderequirements(orig, repo):
1905 def upgraderequirements(orig, repo):
1907 reqs = orig(repo)
1906 reqs = orig(repo)
1908 if b'largefiles' in repo.requirements:
1907 if b'largefiles' in repo.requirements:
1909 reqs.add(b'largefiles')
1908 reqs.add(b'largefiles')
1910 return reqs
1909 return reqs
1911
1910
1912
1911
1913 _lfscheme = b'largefile://'
1912 _lfscheme = b'largefile://'
1914
1913
1915
1914
1916 @eh.wrapfunction(urlmod, 'open')
1915 @eh.wrapfunction(urlmod, 'open')
1917 def openlargefile(orig, ui, url_, data=None, **kwargs):
1916 def openlargefile(orig, ui, url_, data=None, **kwargs):
1918 if url_.startswith(_lfscheme):
1917 if url_.startswith(_lfscheme):
1919 if data:
1918 if data:
1920 msg = b"cannot use data on a 'largefile://' url"
1919 msg = b"cannot use data on a 'largefile://' url"
1921 raise error.ProgrammingError(msg)
1920 raise error.ProgrammingError(msg)
1922 lfid = url_[len(_lfscheme) :]
1921 lfid = url_[len(_lfscheme) :]
1923 return storefactory.getlfile(ui, lfid)
1922 return storefactory.getlfile(ui, lfid)
1924 else:
1923 else:
1925 return orig(ui, url_, data=data, **kwargs)
1924 return orig(ui, url_, data=data, **kwargs)
General Comments 0
You need to be logged in to leave comments. Login now