##// END OF EJS Templates
largefiles: remove unused 'rev' parameter from downloadlfiles()...
Yuya Nishihara -
r46025:39ddb112 default
parent child Browse files
Show More
@@ -1,669 +1,668 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 from mercurial import (
18 from mercurial import (
19 cmdutil,
19 cmdutil,
20 context,
20 context,
21 error,
21 error,
22 exthelper,
22 exthelper,
23 hg,
23 hg,
24 lock,
24 lock,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from mercurial.utils import hashutil
31 from mercurial.utils import hashutil
32
32
33 from ..convert import (
33 from ..convert import (
34 convcmd,
34 convcmd,
35 filemap,
35 filemap,
36 )
36 )
37
37
38 from . import lfutil, storefactory
38 from . import lfutil, storefactory
39
39
40 release = lock.release
40 release = lock.release
41
41
42 # -- Commands ----------------------------------------------------------
42 # -- Commands ----------------------------------------------------------
43
43
44 eh = exthelper.exthelper()
44 eh = exthelper.exthelper()
45
45
46
46
47 @eh.command(
47 @eh.command(
48 b'lfconvert',
48 b'lfconvert',
49 [
49 [
50 (
50 (
51 b's',
51 b's',
52 b'size',
52 b'size',
53 b'',
53 b'',
54 _(b'minimum size (MB) for files to be converted as largefiles'),
54 _(b'minimum size (MB) for files to be converted as largefiles'),
55 b'SIZE',
55 b'SIZE',
56 ),
56 ),
57 (
57 (
58 b'',
58 b'',
59 b'to-normal',
59 b'to-normal',
60 False,
60 False,
61 _(b'convert from a largefiles repo to a normal repo'),
61 _(b'convert from a largefiles repo to a normal repo'),
62 ),
62 ),
63 ],
63 ],
64 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
64 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
65 norepo=True,
65 norepo=True,
66 inferrepo=True,
66 inferrepo=True,
67 )
67 )
68 def lfconvert(ui, src, dest, *pats, **opts):
68 def lfconvert(ui, src, dest, *pats, **opts):
69 '''convert a normal repository to a largefiles repository
69 '''convert a normal repository to a largefiles repository
70
70
71 Convert repository SOURCE to a new repository DEST, identical to
71 Convert repository SOURCE to a new repository DEST, identical to
72 SOURCE except that certain files will be converted as largefiles:
72 SOURCE except that certain files will be converted as largefiles:
73 specifically, any file that matches any PATTERN *or* whose size is
73 specifically, any file that matches any PATTERN *or* whose size is
74 above the minimum size threshold is converted as a largefile. The
74 above the minimum size threshold is converted as a largefile. The
75 size used to determine whether or not to track a file as a
75 size used to determine whether or not to track a file as a
76 largefile is the size of the first version of the file. The
76 largefile is the size of the first version of the file. The
77 minimum size can be specified either with --size or in
77 minimum size can be specified either with --size or in
78 configuration as ``largefiles.size``.
78 configuration as ``largefiles.size``.
79
79
80 After running this command you will need to make sure that
80 After running this command you will need to make sure that
81 largefiles is enabled anywhere you intend to push the new
81 largefiles is enabled anywhere you intend to push the new
82 repository.
82 repository.
83
83
84 Use --to-normal to convert largefiles back to normal files; after
84 Use --to-normal to convert largefiles back to normal files; after
85 this, the DEST repository can be used without largefiles at all.'''
85 this, the DEST repository can be used without largefiles at all.'''
86
86
87 opts = pycompat.byteskwargs(opts)
87 opts = pycompat.byteskwargs(opts)
88 if opts[b'to_normal']:
88 if opts[b'to_normal']:
89 tolfile = False
89 tolfile = False
90 else:
90 else:
91 tolfile = True
91 tolfile = True
92 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
92 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
93
93
94 if not hg.islocal(src):
94 if not hg.islocal(src):
95 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
95 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
96 if not hg.islocal(dest):
96 if not hg.islocal(dest):
97 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
97 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
98
98
99 rsrc = hg.repository(ui, src)
99 rsrc = hg.repository(ui, src)
100 ui.status(_(b'initializing destination %s\n') % dest)
100 ui.status(_(b'initializing destination %s\n') % dest)
101 rdst = hg.repository(ui, dest, create=True)
101 rdst = hg.repository(ui, dest, create=True)
102
102
103 success = False
103 success = False
104 dstwlock = dstlock = None
104 dstwlock = dstlock = None
105 try:
105 try:
106 # Get a list of all changesets in the source. The easy way to do this
106 # Get a list of all changesets in the source. The easy way to do this
107 # is to simply walk the changelog, using changelog.nodesbetween().
107 # is to simply walk the changelog, using changelog.nodesbetween().
108 # Take a look at mercurial/revlog.py:639 for more details.
108 # Take a look at mercurial/revlog.py:639 for more details.
109 # Use a generator instead of a list to decrease memory usage
109 # Use a generator instead of a list to decrease memory usage
110 ctxs = (
110 ctxs = (
111 rsrc[ctx]
111 rsrc[ctx]
112 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
112 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
113 )
113 )
114 revmap = {node.nullid: node.nullid}
114 revmap = {node.nullid: node.nullid}
115 if tolfile:
115 if tolfile:
116 # Lock destination to prevent modification while it is converted to.
116 # Lock destination to prevent modification while it is converted to.
117 # Don't need to lock src because we are just reading from its
117 # Don't need to lock src because we are just reading from its
118 # history which can't change.
118 # history which can't change.
119 dstwlock = rdst.wlock()
119 dstwlock = rdst.wlock()
120 dstlock = rdst.lock()
120 dstlock = rdst.lock()
121
121
122 lfiles = set()
122 lfiles = set()
123 normalfiles = set()
123 normalfiles = set()
124 if not pats:
124 if not pats:
125 pats = ui.configlist(lfutil.longname, b'patterns')
125 pats = ui.configlist(lfutil.longname, b'patterns')
126 if pats:
126 if pats:
127 matcher = matchmod.match(rsrc.root, b'', list(pats))
127 matcher = matchmod.match(rsrc.root, b'', list(pats))
128 else:
128 else:
129 matcher = None
129 matcher = None
130
130
131 lfiletohash = {}
131 lfiletohash = {}
132 with ui.makeprogress(
132 with ui.makeprogress(
133 _(b'converting revisions'),
133 _(b'converting revisions'),
134 unit=_(b'revisions'),
134 unit=_(b'revisions'),
135 total=rsrc[b'tip'].rev(),
135 total=rsrc[b'tip'].rev(),
136 ) as progress:
136 ) as progress:
137 for ctx in ctxs:
137 for ctx in ctxs:
138 progress.update(ctx.rev())
138 progress.update(ctx.rev())
139 _lfconvert_addchangeset(
139 _lfconvert_addchangeset(
140 rsrc,
140 rsrc,
141 rdst,
141 rdst,
142 ctx,
142 ctx,
143 revmap,
143 revmap,
144 lfiles,
144 lfiles,
145 normalfiles,
145 normalfiles,
146 matcher,
146 matcher,
147 size,
147 size,
148 lfiletohash,
148 lfiletohash,
149 )
149 )
150
150
151 if rdst.wvfs.exists(lfutil.shortname):
151 if rdst.wvfs.exists(lfutil.shortname):
152 rdst.wvfs.rmtree(lfutil.shortname)
152 rdst.wvfs.rmtree(lfutil.shortname)
153
153
154 for f in lfiletohash.keys():
154 for f in lfiletohash.keys():
155 if rdst.wvfs.isfile(f):
155 if rdst.wvfs.isfile(f):
156 rdst.wvfs.unlink(f)
156 rdst.wvfs.unlink(f)
157 try:
157 try:
158 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
158 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
159 except OSError:
159 except OSError:
160 pass
160 pass
161
161
162 # If there were any files converted to largefiles, add largefiles
162 # If there were any files converted to largefiles, add largefiles
163 # to the destination repository's requirements.
163 # to the destination repository's requirements.
164 if lfiles:
164 if lfiles:
165 rdst.requirements.add(b'largefiles')
165 rdst.requirements.add(b'largefiles')
166 scmutil.writereporequirements(rdst)
166 scmutil.writereporequirements(rdst)
167 else:
167 else:
168
168
169 class lfsource(filemap.filemap_source):
169 class lfsource(filemap.filemap_source):
170 def __init__(self, ui, source):
170 def __init__(self, ui, source):
171 super(lfsource, self).__init__(ui, source, None)
171 super(lfsource, self).__init__(ui, source, None)
172 self.filemapper.rename[lfutil.shortname] = b'.'
172 self.filemapper.rename[lfutil.shortname] = b'.'
173
173
174 def getfile(self, name, rev):
174 def getfile(self, name, rev):
175 realname, realrev = rev
175 realname, realrev = rev
176 f = super(lfsource, self).getfile(name, rev)
176 f = super(lfsource, self).getfile(name, rev)
177
177
178 if (
178 if (
179 not realname.startswith(lfutil.shortnameslash)
179 not realname.startswith(lfutil.shortnameslash)
180 or f[0] is None
180 or f[0] is None
181 ):
181 ):
182 return f
182 return f
183
183
184 # Substitute in the largefile data for the hash
184 # Substitute in the largefile data for the hash
185 hash = f[0].strip()
185 hash = f[0].strip()
186 path = lfutil.findfile(rsrc, hash)
186 path = lfutil.findfile(rsrc, hash)
187
187
188 if path is None:
188 if path is None:
189 raise error.Abort(
189 raise error.Abort(
190 _(b"missing largefile for '%s' in %s")
190 _(b"missing largefile for '%s' in %s")
191 % (realname, realrev)
191 % (realname, realrev)
192 )
192 )
193 return util.readfile(path), f[1]
193 return util.readfile(path), f[1]
194
194
195 class converter(convcmd.converter):
195 class converter(convcmd.converter):
196 def __init__(self, ui, source, dest, revmapfile, opts):
196 def __init__(self, ui, source, dest, revmapfile, opts):
197 src = lfsource(ui, source)
197 src = lfsource(ui, source)
198
198
199 super(converter, self).__init__(
199 super(converter, self).__init__(
200 ui, src, dest, revmapfile, opts
200 ui, src, dest, revmapfile, opts
201 )
201 )
202
202
203 found, missing = downloadlfiles(ui, rsrc)
203 found, missing = downloadlfiles(ui, rsrc)
204 if missing != 0:
204 if missing != 0:
205 raise error.Abort(_(b"all largefiles must be present locally"))
205 raise error.Abort(_(b"all largefiles must be present locally"))
206
206
207 orig = convcmd.converter
207 orig = convcmd.converter
208 convcmd.converter = converter
208 convcmd.converter = converter
209
209
210 try:
210 try:
211 convcmd.convert(
211 convcmd.convert(
212 ui, src, dest, source_type=b'hg', dest_type=b'hg'
212 ui, src, dest, source_type=b'hg', dest_type=b'hg'
213 )
213 )
214 finally:
214 finally:
215 convcmd.converter = orig
215 convcmd.converter = orig
216 success = True
216 success = True
217 finally:
217 finally:
218 if tolfile:
218 if tolfile:
219 rdst.dirstate.clear()
219 rdst.dirstate.clear()
220 release(dstlock, dstwlock)
220 release(dstlock, dstwlock)
221 if not success:
221 if not success:
222 # we failed, remove the new directory
222 # we failed, remove the new directory
223 shutil.rmtree(rdst.root)
223 shutil.rmtree(rdst.root)
224
224
225
225
226 def _lfconvert_addchangeset(
226 def _lfconvert_addchangeset(
227 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
227 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
228 ):
228 ):
229 # Convert src parents to dst parents
229 # Convert src parents to dst parents
230 parents = _convertparents(ctx, revmap)
230 parents = _convertparents(ctx, revmap)
231
231
232 # Generate list of changed files
232 # Generate list of changed files
233 files = _getchangedfiles(ctx, parents)
233 files = _getchangedfiles(ctx, parents)
234
234
235 dstfiles = []
235 dstfiles = []
236 for f in files:
236 for f in files:
237 if f not in lfiles and f not in normalfiles:
237 if f not in lfiles and f not in normalfiles:
238 islfile = _islfile(f, ctx, matcher, size)
238 islfile = _islfile(f, ctx, matcher, size)
239 # If this file was renamed or copied then copy
239 # If this file was renamed or copied then copy
240 # the largefile-ness of its predecessor
240 # the largefile-ness of its predecessor
241 if f in ctx.manifest():
241 if f in ctx.manifest():
242 fctx = ctx.filectx(f)
242 fctx = ctx.filectx(f)
243 renamed = fctx.copysource()
243 renamed = fctx.copysource()
244 if renamed is None:
244 if renamed is None:
245 # the code below assumes renamed to be a boolean or a list
245 # the code below assumes renamed to be a boolean or a list
246 # and won't quite work with the value None
246 # and won't quite work with the value None
247 renamed = False
247 renamed = False
248 renamedlfile = renamed and renamed in lfiles
248 renamedlfile = renamed and renamed in lfiles
249 islfile |= renamedlfile
249 islfile |= renamedlfile
250 if b'l' in fctx.flags():
250 if b'l' in fctx.flags():
251 if renamedlfile:
251 if renamedlfile:
252 raise error.Abort(
252 raise error.Abort(
253 _(b'renamed/copied largefile %s becomes symlink')
253 _(b'renamed/copied largefile %s becomes symlink')
254 % f
254 % f
255 )
255 )
256 islfile = False
256 islfile = False
257 if islfile:
257 if islfile:
258 lfiles.add(f)
258 lfiles.add(f)
259 else:
259 else:
260 normalfiles.add(f)
260 normalfiles.add(f)
261
261
262 if f in lfiles:
262 if f in lfiles:
263 fstandin = lfutil.standin(f)
263 fstandin = lfutil.standin(f)
264 dstfiles.append(fstandin)
264 dstfiles.append(fstandin)
265 # largefile in manifest if it has not been removed/renamed
265 # largefile in manifest if it has not been removed/renamed
266 if f in ctx.manifest():
266 if f in ctx.manifest():
267 fctx = ctx.filectx(f)
267 fctx = ctx.filectx(f)
268 if b'l' in fctx.flags():
268 if b'l' in fctx.flags():
269 renamed = fctx.copysource()
269 renamed = fctx.copysource()
270 if renamed and renamed in lfiles:
270 if renamed and renamed in lfiles:
271 raise error.Abort(
271 raise error.Abort(
272 _(b'largefile %s becomes symlink') % f
272 _(b'largefile %s becomes symlink') % f
273 )
273 )
274
274
275 # largefile was modified, update standins
275 # largefile was modified, update standins
276 m = hashutil.sha1(b'')
276 m = hashutil.sha1(b'')
277 m.update(ctx[f].data())
277 m.update(ctx[f].data())
278 hash = node.hex(m.digest())
278 hash = node.hex(m.digest())
279 if f not in lfiletohash or lfiletohash[f] != hash:
279 if f not in lfiletohash or lfiletohash[f] != hash:
280 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
280 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
281 executable = b'x' in ctx[f].flags()
281 executable = b'x' in ctx[f].flags()
282 lfutil.writestandin(rdst, fstandin, hash, executable)
282 lfutil.writestandin(rdst, fstandin, hash, executable)
283 lfiletohash[f] = hash
283 lfiletohash[f] = hash
284 else:
284 else:
285 # normal file
285 # normal file
286 dstfiles.append(f)
286 dstfiles.append(f)
287
287
288 def getfilectx(repo, memctx, f):
288 def getfilectx(repo, memctx, f):
289 srcfname = lfutil.splitstandin(f)
289 srcfname = lfutil.splitstandin(f)
290 if srcfname is not None:
290 if srcfname is not None:
291 # if the file isn't in the manifest then it was removed
291 # if the file isn't in the manifest then it was removed
292 # or renamed, return None to indicate this
292 # or renamed, return None to indicate this
293 try:
293 try:
294 fctx = ctx.filectx(srcfname)
294 fctx = ctx.filectx(srcfname)
295 except error.LookupError:
295 except error.LookupError:
296 return None
296 return None
297 renamed = fctx.copysource()
297 renamed = fctx.copysource()
298 if renamed:
298 if renamed:
299 # standin is always a largefile because largefile-ness
299 # standin is always a largefile because largefile-ness
300 # doesn't change after rename or copy
300 # doesn't change after rename or copy
301 renamed = lfutil.standin(renamed)
301 renamed = lfutil.standin(renamed)
302
302
303 return context.memfilectx(
303 return context.memfilectx(
304 repo,
304 repo,
305 memctx,
305 memctx,
306 f,
306 f,
307 lfiletohash[srcfname] + b'\n',
307 lfiletohash[srcfname] + b'\n',
308 b'l' in fctx.flags(),
308 b'l' in fctx.flags(),
309 b'x' in fctx.flags(),
309 b'x' in fctx.flags(),
310 renamed,
310 renamed,
311 )
311 )
312 else:
312 else:
313 return _getnormalcontext(repo, ctx, f, revmap)
313 return _getnormalcontext(repo, ctx, f, revmap)
314
314
315 # Commit
315 # Commit
316 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
316 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
317
317
318
318
319 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
319 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
320 mctx = context.memctx(
320 mctx = context.memctx(
321 rdst,
321 rdst,
322 parents,
322 parents,
323 ctx.description(),
323 ctx.description(),
324 dstfiles,
324 dstfiles,
325 getfilectx,
325 getfilectx,
326 ctx.user(),
326 ctx.user(),
327 ctx.date(),
327 ctx.date(),
328 ctx.extra(),
328 ctx.extra(),
329 )
329 )
330 ret = rdst.commitctx(mctx)
330 ret = rdst.commitctx(mctx)
331 lfutil.copyalltostore(rdst, ret)
331 lfutil.copyalltostore(rdst, ret)
332 rdst.setparents(ret)
332 rdst.setparents(ret)
333 revmap[ctx.node()] = rdst.changelog.tip()
333 revmap[ctx.node()] = rdst.changelog.tip()
334
334
335
335
336 # Generate list of changed files
336 # Generate list of changed files
337 def _getchangedfiles(ctx, parents):
337 def _getchangedfiles(ctx, parents):
338 files = set(ctx.files())
338 files = set(ctx.files())
339 if node.nullid not in parents:
339 if node.nullid not in parents:
340 mc = ctx.manifest()
340 mc = ctx.manifest()
341 for pctx in ctx.parents():
341 for pctx in ctx.parents():
342 for fn in pctx.manifest().diff(mc):
342 for fn in pctx.manifest().diff(mc):
343 files.add(fn)
343 files.add(fn)
344 return files
344 return files
345
345
346
346
347 # Convert src parents to dst parents
347 # Convert src parents to dst parents
348 def _convertparents(ctx, revmap):
348 def _convertparents(ctx, revmap):
349 parents = []
349 parents = []
350 for p in ctx.parents():
350 for p in ctx.parents():
351 parents.append(revmap[p.node()])
351 parents.append(revmap[p.node()])
352 while len(parents) < 2:
352 while len(parents) < 2:
353 parents.append(node.nullid)
353 parents.append(node.nullid)
354 return parents
354 return parents
355
355
356
356
357 # Get memfilectx for a normal file
357 # Get memfilectx for a normal file
358 def _getnormalcontext(repo, ctx, f, revmap):
358 def _getnormalcontext(repo, ctx, f, revmap):
359 try:
359 try:
360 fctx = ctx.filectx(f)
360 fctx = ctx.filectx(f)
361 except error.LookupError:
361 except error.LookupError:
362 return None
362 return None
363 renamed = fctx.copysource()
363 renamed = fctx.copysource()
364
364
365 data = fctx.data()
365 data = fctx.data()
366 if f == b'.hgtags':
366 if f == b'.hgtags':
367 data = _converttags(repo.ui, revmap, data)
367 data = _converttags(repo.ui, revmap, data)
368 return context.memfilectx(
368 return context.memfilectx(
369 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
369 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
370 )
370 )
371
371
372
372
373 # Remap tag data using a revision map
373 # Remap tag data using a revision map
374 def _converttags(ui, revmap, data):
374 def _converttags(ui, revmap, data):
375 newdata = []
375 newdata = []
376 for line in data.splitlines():
376 for line in data.splitlines():
377 try:
377 try:
378 id, name = line.split(b' ', 1)
378 id, name = line.split(b' ', 1)
379 except ValueError:
379 except ValueError:
380 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
380 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
381 continue
381 continue
382 try:
382 try:
383 newid = node.bin(id)
383 newid = node.bin(id)
384 except TypeError:
384 except TypeError:
385 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
385 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
386 continue
386 continue
387 try:
387 try:
388 newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name))
388 newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name))
389 except KeyError:
389 except KeyError:
390 ui.warn(_(b'no mapping for id %s\n') % id)
390 ui.warn(_(b'no mapping for id %s\n') % id)
391 continue
391 continue
392 return b''.join(newdata)
392 return b''.join(newdata)
393
393
394
394
395 def _islfile(file, ctx, matcher, size):
395 def _islfile(file, ctx, matcher, size):
396 '''Return true if file should be considered a largefile, i.e.
396 '''Return true if file should be considered a largefile, i.e.
397 matcher matches it or it is larger than size.'''
397 matcher matches it or it is larger than size.'''
398 # never store special .hg* files as largefiles
398 # never store special .hg* files as largefiles
399 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
399 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
400 return False
400 return False
401 if matcher and matcher(file):
401 if matcher and matcher(file):
402 return True
402 return True
403 try:
403 try:
404 return ctx.filectx(file).size() >= size * 1024 * 1024
404 return ctx.filectx(file).size() >= size * 1024 * 1024
405 except error.LookupError:
405 except error.LookupError:
406 return False
406 return False
407
407
408
408
409 def uploadlfiles(ui, rsrc, rdst, files):
409 def uploadlfiles(ui, rsrc, rdst, files):
410 '''upload largefiles to the central store'''
410 '''upload largefiles to the central store'''
411
411
412 if not files:
412 if not files:
413 return
413 return
414
414
415 store = storefactory.openstore(rsrc, rdst, put=True)
415 store = storefactory.openstore(rsrc, rdst, put=True)
416
416
417 at = 0
417 at = 0
418 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
418 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
419 retval = store.exists(files)
419 retval = store.exists(files)
420 files = [h for h in files if not retval[h]]
420 files = [h for h in files if not retval[h]]
421 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
421 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
422
422
423 with ui.makeprogress(
423 with ui.makeprogress(
424 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
424 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
425 ) as progress:
425 ) as progress:
426 for hash in files:
426 for hash in files:
427 progress.update(at)
427 progress.update(at)
428 source = lfutil.findfile(rsrc, hash)
428 source = lfutil.findfile(rsrc, hash)
429 if not source:
429 if not source:
430 raise error.Abort(
430 raise error.Abort(
431 _(
431 _(
432 b'largefile %s missing from store'
432 b'largefile %s missing from store'
433 b' (needs to be uploaded)'
433 b' (needs to be uploaded)'
434 )
434 )
435 % hash
435 % hash
436 )
436 )
437 # XXX check for errors here
437 # XXX check for errors here
438 store.put(source, hash)
438 store.put(source, hash)
439 at += 1
439 at += 1
440
440
441
441
442 def verifylfiles(ui, repo, all=False, contents=False):
442 def verifylfiles(ui, repo, all=False, contents=False):
443 '''Verify that every largefile revision in the current changeset
443 '''Verify that every largefile revision in the current changeset
444 exists in the central store. With --contents, also verify that
444 exists in the central store. With --contents, also verify that
445 the contents of each local largefile file revision are correct (SHA-1 hash
445 the contents of each local largefile file revision are correct (SHA-1 hash
446 matches the revision ID). With --all, check every changeset in
446 matches the revision ID). With --all, check every changeset in
447 this repository.'''
447 this repository.'''
448 if all:
448 if all:
449 revs = repo.revs(b'all()')
449 revs = repo.revs(b'all()')
450 else:
450 else:
451 revs = [b'.']
451 revs = [b'.']
452
452
453 store = storefactory.openstore(repo)
453 store = storefactory.openstore(repo)
454 return store.verify(revs, contents=contents)
454 return store.verify(revs, contents=contents)
455
455
456
456
457 def cachelfiles(ui, repo, node, filelist=None):
457 def cachelfiles(ui, repo, node, filelist=None):
458 '''cachelfiles ensures that all largefiles needed by the specified revision
458 '''cachelfiles ensures that all largefiles needed by the specified revision
459 are present in the repository's largefile cache.
459 are present in the repository's largefile cache.
460
460
461 returns a tuple (cached, missing). cached is the list of files downloaded
461 returns a tuple (cached, missing). cached is the list of files downloaded
462 by this operation; missing is the list of files that were needed but could
462 by this operation; missing is the list of files that were needed but could
463 not be found.'''
463 not be found.'''
464 lfiles = lfutil.listlfiles(repo, node)
464 lfiles = lfutil.listlfiles(repo, node)
465 if filelist:
465 if filelist:
466 lfiles = set(lfiles) & set(filelist)
466 lfiles = set(lfiles) & set(filelist)
467 toget = []
467 toget = []
468
468
469 ctx = repo[node]
469 ctx = repo[node]
470 for lfile in lfiles:
470 for lfile in lfiles:
471 try:
471 try:
472 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
472 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
473 except IOError as err:
473 except IOError as err:
474 if err.errno == errno.ENOENT:
474 if err.errno == errno.ENOENT:
475 continue # node must be None and standin wasn't found in wctx
475 continue # node must be None and standin wasn't found in wctx
476 raise
476 raise
477 if not lfutil.findfile(repo, expectedhash):
477 if not lfutil.findfile(repo, expectedhash):
478 toget.append((lfile, expectedhash))
478 toget.append((lfile, expectedhash))
479
479
480 if toget:
480 if toget:
481 store = storefactory.openstore(repo)
481 store = storefactory.openstore(repo)
482 ret = store.get(toget)
482 ret = store.get(toget)
483 return ret
483 return ret
484
484
485 return ([], [])
485 return ([], [])
486
486
487
487
488 def downloadlfiles(ui, repo, rev=None):
488 def downloadlfiles(ui, repo):
489 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
489 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
490
490
491 def prepare(ctx, fns):
491 def prepare(ctx, fns):
492 pass
492 pass
493
493
494 totalsuccess = 0
494 totalsuccess = 0
495 totalmissing = 0
495 totalmissing = 0
496 if rev != []: # walkchangerevs on empty list would return all revs
496 for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': None}, prepare):
497 for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': rev}, prepare):
498 success, missing = cachelfiles(ui, repo, ctx.node())
497 success, missing = cachelfiles(ui, repo, ctx.node())
499 totalsuccess += len(success)
498 totalsuccess += len(success)
500 totalmissing += len(missing)
499 totalmissing += len(missing)
501 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
500 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
502 if totalmissing > 0:
501 if totalmissing > 0:
503 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
502 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
504 return totalsuccess, totalmissing
503 return totalsuccess, totalmissing
505
504
506
505
507 def updatelfiles(
506 def updatelfiles(
508 ui, repo, filelist=None, printmessage=None, normallookup=False
507 ui, repo, filelist=None, printmessage=None, normallookup=False
509 ):
508 ):
510 '''Update largefiles according to standins in the working directory
509 '''Update largefiles according to standins in the working directory
511
510
512 If ``printmessage`` is other than ``None``, it means "print (or
511 If ``printmessage`` is other than ``None``, it means "print (or
513 ignore, for false) message forcibly".
512 ignore, for false) message forcibly".
514 '''
513 '''
515 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
514 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
516 with repo.wlock():
515 with repo.wlock():
517 lfdirstate = lfutil.openlfdirstate(ui, repo)
516 lfdirstate = lfutil.openlfdirstate(ui, repo)
518 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
517 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
519
518
520 if filelist is not None:
519 if filelist is not None:
521 filelist = set(filelist)
520 filelist = set(filelist)
522 lfiles = [f for f in lfiles if f in filelist]
521 lfiles = [f for f in lfiles if f in filelist]
523
522
524 update = {}
523 update = {}
525 dropped = set()
524 dropped = set()
526 updated, removed = 0, 0
525 updated, removed = 0, 0
527 wvfs = repo.wvfs
526 wvfs = repo.wvfs
528 wctx = repo[None]
527 wctx = repo[None]
529 for lfile in lfiles:
528 for lfile in lfiles:
530 lfileorig = os.path.relpath(
529 lfileorig = os.path.relpath(
531 scmutil.backuppath(ui, repo, lfile), start=repo.root
530 scmutil.backuppath(ui, repo, lfile), start=repo.root
532 )
531 )
533 standin = lfutil.standin(lfile)
532 standin = lfutil.standin(lfile)
534 standinorig = os.path.relpath(
533 standinorig = os.path.relpath(
535 scmutil.backuppath(ui, repo, standin), start=repo.root
534 scmutil.backuppath(ui, repo, standin), start=repo.root
536 )
535 )
537 if wvfs.exists(standin):
536 if wvfs.exists(standin):
538 if wvfs.exists(standinorig) and wvfs.exists(lfile):
537 if wvfs.exists(standinorig) and wvfs.exists(lfile):
539 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
538 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
540 wvfs.unlinkpath(standinorig)
539 wvfs.unlinkpath(standinorig)
541 expecthash = lfutil.readasstandin(wctx[standin])
540 expecthash = lfutil.readasstandin(wctx[standin])
542 if expecthash != b'':
541 if expecthash != b'':
543 if lfile not in wctx: # not switched to normal file
542 if lfile not in wctx: # not switched to normal file
544 if repo.dirstate[standin] != b'?':
543 if repo.dirstate[standin] != b'?':
545 wvfs.unlinkpath(lfile, ignoremissing=True)
544 wvfs.unlinkpath(lfile, ignoremissing=True)
546 else:
545 else:
547 dropped.add(lfile)
546 dropped.add(lfile)
548
547
549 # use normallookup() to allocate an entry in largefiles
548 # use normallookup() to allocate an entry in largefiles
550 # dirstate to prevent lfilesrepo.status() from reporting
549 # dirstate to prevent lfilesrepo.status() from reporting
551 # missing files as removed.
550 # missing files as removed.
552 lfdirstate.normallookup(lfile)
551 lfdirstate.normallookup(lfile)
553 update[lfile] = expecthash
552 update[lfile] = expecthash
554 else:
553 else:
555 # Remove lfiles for which the standin is deleted, unless the
554 # Remove lfiles for which the standin is deleted, unless the
556 # lfile is added to the repository again. This happens when a
555 # lfile is added to the repository again. This happens when a
557 # largefile is converted back to a normal file: the standin
556 # largefile is converted back to a normal file: the standin
558 # disappears, but a new (normal) file appears as the lfile.
557 # disappears, but a new (normal) file appears as the lfile.
559 if (
558 if (
560 wvfs.exists(lfile)
559 wvfs.exists(lfile)
561 and repo.dirstate.normalize(lfile) not in wctx
560 and repo.dirstate.normalize(lfile) not in wctx
562 ):
561 ):
563 wvfs.unlinkpath(lfile)
562 wvfs.unlinkpath(lfile)
564 removed += 1
563 removed += 1
565
564
566 # largefile processing might be slow and be interrupted - be prepared
565 # largefile processing might be slow and be interrupted - be prepared
567 lfdirstate.write()
566 lfdirstate.write()
568
567
569 if lfiles:
568 if lfiles:
570 lfiles = [f for f in lfiles if f not in dropped]
569 lfiles = [f for f in lfiles if f not in dropped]
571
570
572 for f in dropped:
571 for f in dropped:
573 repo.wvfs.unlinkpath(lfutil.standin(f))
572 repo.wvfs.unlinkpath(lfutil.standin(f))
574
573
575 # This needs to happen for dropped files, otherwise they stay in
574 # This needs to happen for dropped files, otherwise they stay in
576 # the M state.
575 # the M state.
577 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
576 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
578
577
579 statuswriter(_(b'getting changed largefiles\n'))
578 statuswriter(_(b'getting changed largefiles\n'))
580 cachelfiles(ui, repo, None, lfiles)
579 cachelfiles(ui, repo, None, lfiles)
581
580
582 for lfile in lfiles:
581 for lfile in lfiles:
583 update1 = 0
582 update1 = 0
584
583
585 expecthash = update.get(lfile)
584 expecthash = update.get(lfile)
586 if expecthash:
585 if expecthash:
587 if not lfutil.copyfromcache(repo, expecthash, lfile):
586 if not lfutil.copyfromcache(repo, expecthash, lfile):
588 # failed ... but already removed and set to normallookup
587 # failed ... but already removed and set to normallookup
589 continue
588 continue
590 # Synchronize largefile dirstate to the last modified
589 # Synchronize largefile dirstate to the last modified
591 # time of the file
590 # time of the file
592 lfdirstate.normal(lfile)
591 lfdirstate.normal(lfile)
593 update1 = 1
592 update1 = 1
594
593
595 # copy the exec mode of largefile standin from the repository's
594 # copy the exec mode of largefile standin from the repository's
596 # dirstate to its state in the lfdirstate.
595 # dirstate to its state in the lfdirstate.
597 standin = lfutil.standin(lfile)
596 standin = lfutil.standin(lfile)
598 if wvfs.exists(standin):
597 if wvfs.exists(standin):
599 # exec is decided by the users permissions using mask 0o100
598 # exec is decided by the users permissions using mask 0o100
600 standinexec = wvfs.stat(standin).st_mode & 0o100
599 standinexec = wvfs.stat(standin).st_mode & 0o100
601 st = wvfs.stat(lfile)
600 st = wvfs.stat(lfile)
602 mode = st.st_mode
601 mode = st.st_mode
603 if standinexec != mode & 0o100:
602 if standinexec != mode & 0o100:
604 # first remove all X bits, then shift all R bits to X
603 # first remove all X bits, then shift all R bits to X
605 mode &= ~0o111
604 mode &= ~0o111
606 if standinexec:
605 if standinexec:
607 mode |= (mode >> 2) & 0o111 & ~util.umask
606 mode |= (mode >> 2) & 0o111 & ~util.umask
608 wvfs.chmod(lfile, mode)
607 wvfs.chmod(lfile, mode)
609 update1 = 1
608 update1 = 1
610
609
611 updated += update1
610 updated += update1
612
611
613 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
612 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
614
613
615 lfdirstate.write()
614 lfdirstate.write()
616 if lfiles:
615 if lfiles:
617 statuswriter(
616 statuswriter(
618 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
617 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
619 )
618 )
620
619
621
620
622 @eh.command(
621 @eh.command(
623 b'lfpull',
622 b'lfpull',
624 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
623 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
625 + cmdutil.remoteopts,
624 + cmdutil.remoteopts,
626 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
625 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
627 )
626 )
628 def lfpull(ui, repo, source=b"default", **opts):
627 def lfpull(ui, repo, source=b"default", **opts):
629 """pull largefiles for the specified revisions from the specified source
628 """pull largefiles for the specified revisions from the specified source
630
629
631 Pull largefiles that are referenced from local changesets but missing
630 Pull largefiles that are referenced from local changesets but missing
632 locally, pulling from a remote repository to the local cache.
631 locally, pulling from a remote repository to the local cache.
633
632
634 If SOURCE is omitted, the 'default' path will be used.
633 If SOURCE is omitted, the 'default' path will be used.
635 See :hg:`help urls` for more information.
634 See :hg:`help urls` for more information.
636
635
637 .. container:: verbose
636 .. container:: verbose
638
637
639 Some examples:
638 Some examples:
640
639
641 - pull largefiles for all branch heads::
640 - pull largefiles for all branch heads::
642
641
643 hg lfpull -r "head() and not closed()"
642 hg lfpull -r "head() and not closed()"
644
643
645 - pull largefiles on the default branch::
644 - pull largefiles on the default branch::
646
645
647 hg lfpull -r "branch(default)"
646 hg lfpull -r "branch(default)"
648 """
647 """
649 repo.lfpullsource = source
648 repo.lfpullsource = source
650
649
651 revs = opts.get('rev', [])
650 revs = opts.get('rev', [])
652 if not revs:
651 if not revs:
653 raise error.Abort(_(b'no revisions specified'))
652 raise error.Abort(_(b'no revisions specified'))
654 revs = scmutil.revrange(repo, revs)
653 revs = scmutil.revrange(repo, revs)
655
654
656 numcached = 0
655 numcached = 0
657 for rev in revs:
656 for rev in revs:
658 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
657 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
659 (cached, missing) = cachelfiles(ui, repo, rev)
658 (cached, missing) = cachelfiles(ui, repo, rev)
660 numcached += len(cached)
659 numcached += len(cached)
661 ui.status(_(b"%d largefiles cached\n") % numcached)
660 ui.status(_(b"%d largefiles cached\n") % numcached)
662
661
663
662
664 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
663 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
665 def debuglfput(ui, repo, filepath, **kwargs):
664 def debuglfput(ui, repo, filepath, **kwargs):
666 hash = lfutil.hashfile(filepath)
665 hash = lfutil.hashfile(filepath)
667 storefactory.openstore(repo).put(filepath, hash)
666 storefactory.openstore(repo).put(filepath, hash)
668 ui.write(b'%s\n' % hash)
667 ui.write(b'%s\n' % hash)
669 return 0
668 return 0
@@ -1,1828 +1,1828 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 exthelper,
28 exthelper,
29 filemerge,
29 filemerge,
30 hg,
30 hg,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge,
33 merge,
34 mergestate as mergestatemod,
34 mergestate as mergestatemod,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 subrepo,
39 subrepo,
40 upgrade,
40 upgrade,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from . import (
45 from . import (
46 lfcommands,
46 lfcommands,
47 lfutil,
47 lfutil,
48 storefactory,
48 storefactory,
49 )
49 )
50
50
51 eh = exthelper.exthelper()
51 eh = exthelper.exthelper()
52
52
53 lfstatus = lfutil.lfstatus
53 lfstatus = lfutil.lfstatus
54
54
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
56
56
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
58
58
59
59
60 def composelargefilematcher(match, manifest):
60 def composelargefilematcher(match, manifest):
61 '''create a matcher that matches only the largefiles in the original
61 '''create a matcher that matches only the largefiles in the original
62 matcher'''
62 matcher'''
63 m = copy.copy(match)
63 m = copy.copy(match)
64 lfile = lambda f: lfutil.standin(f) in manifest
64 lfile = lambda f: lfutil.standin(f) in manifest
65 m._files = [lf for lf in m._files if lfile(lf)]
65 m._files = [lf for lf in m._files if lfile(lf)]
66 m._fileset = set(m._files)
66 m._fileset = set(m._files)
67 m.always = lambda: False
67 m.always = lambda: False
68 origmatchfn = m.matchfn
68 origmatchfn = m.matchfn
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
70 return m
70 return m
71
71
72
72
73 def composenormalfilematcher(match, manifest, exclude=None):
73 def composenormalfilematcher(match, manifest, exclude=None):
74 excluded = set()
74 excluded = set()
75 if exclude is not None:
75 if exclude is not None:
76 excluded.update(exclude)
76 excluded.update(exclude)
77
77
78 m = copy.copy(match)
78 m = copy.copy(match)
79 notlfile = lambda f: not (
79 notlfile = lambda f: not (
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
81 )
81 )
82 m._files = [lf for lf in m._files if notlfile(lf)]
82 m._files = [lf for lf in m._files if notlfile(lf)]
83 m._fileset = set(m._files)
83 m._fileset = set(m._files)
84 m.always = lambda: False
84 m.always = lambda: False
85 origmatchfn = m.matchfn
85 origmatchfn = m.matchfn
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
87 return m
87 return m
88
88
89
89
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
91 large = opts.get('large')
91 large = opts.get('large')
92 lfsize = lfutil.getminsize(
92 lfsize = lfutil.getminsize(
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
94 )
94 )
95
95
96 lfmatcher = None
96 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
97 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
99 if lfpats:
99 if lfpats:
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
101
101
102 lfnames = []
102 lfnames = []
103 m = matcher
103 m = matcher
104
104
105 wctx = repo[None]
105 wctx = repo[None]
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
107 exact = m.exact(f)
107 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
108 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
109 nfile = f in wctx
110 exists = lfile or nfile
110 exists = lfile or nfile
111
111
112 # Don't warn the user when they attempt to add a normal tracked file.
112 # Don't warn the user when they attempt to add a normal tracked file.
113 # The normal add code will do that for us.
113 # The normal add code will do that for us.
114 if exact and exists:
114 if exact and exists:
115 if lfile:
115 if lfile:
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
117 continue
117 continue
118
118
119 if (exact or not exists) and not lfutil.isstandin(f):
119 if (exact or not exists) and not lfutil.isstandin(f):
120 # In case the file was removed previously, but not committed
120 # In case the file was removed previously, but not committed
121 # (issue3507)
121 # (issue3507)
122 if not repo.wvfs.exists(f):
122 if not repo.wvfs.exists(f):
123 continue
123 continue
124
124
125 abovemin = (
125 abovemin = (
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
127 )
127 )
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
129 lfnames.append(f)
129 lfnames.append(f)
130 if ui.verbose or not exact:
130 if ui.verbose or not exact:
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
132
132
133 bad = []
133 bad = []
134
134
135 # Need to lock, otherwise there could be a race condition between
135 # Need to lock, otherwise there could be a race condition between
136 # when standins are created and added to the repo.
136 # when standins are created and added to the repo.
137 with repo.wlock():
137 with repo.wlock():
138 if not opts.get('dry_run'):
138 if not opts.get('dry_run'):
139 standins = []
139 standins = []
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
141 for f in lfnames:
141 for f in lfnames:
142 standinname = lfutil.standin(f)
142 standinname = lfutil.standin(f)
143 lfutil.writestandin(
143 lfutil.writestandin(
144 repo,
144 repo,
145 standinname,
145 standinname,
146 hash=b'',
146 hash=b'',
147 executable=lfutil.getexecutable(repo.wjoin(f)),
147 executable=lfutil.getexecutable(repo.wjoin(f)),
148 )
148 )
149 standins.append(standinname)
149 standins.append(standinname)
150 if lfdirstate[f] == b'r':
150 if lfdirstate[f] == b'r':
151 lfdirstate.normallookup(f)
151 lfdirstate.normallookup(f)
152 else:
152 else:
153 lfdirstate.add(f)
153 lfdirstate.add(f)
154 lfdirstate.write()
154 lfdirstate.write()
155 bad += [
155 bad += [
156 lfutil.splitstandin(f)
156 lfutil.splitstandin(f)
157 for f in repo[None].add(standins)
157 for f in repo[None].add(standins)
158 if f in m.files()
158 if f in m.files()
159 ]
159 ]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 return added, bad
162 return added, bad
163
163
164
164
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 after = opts.get('after')
166 after = opts.get('after')
167 m = composelargefilematcher(matcher, repo[None].manifest())
167 m = composelargefilematcher(matcher, repo[None].manifest())
168 with lfstatus(repo):
168 with lfstatus(repo):
169 s = repo.status(match=m, clean=not isaddremove)
169 s = repo.status(match=m, clean=not isaddremove)
170 manifest = repo[None].manifest()
170 manifest = repo[None].manifest()
171 modified, added, deleted, clean = [
171 modified, added, deleted, clean = [
172 [f for f in list if lfutil.standin(f) in manifest]
172 [f for f in list if lfutil.standin(f) in manifest]
173 for list in (s.modified, s.added, s.deleted, s.clean)
173 for list in (s.modified, s.added, s.deleted, s.clean)
174 ]
174 ]
175
175
176 def warn(files, msg):
176 def warn(files, msg):
177 for f in files:
177 for f in files:
178 ui.warn(msg % uipathfn(f))
178 ui.warn(msg % uipathfn(f))
179 return int(len(files) > 0)
179 return int(len(files) > 0)
180
180
181 if after:
181 if after:
182 remove = deleted
182 remove = deleted
183 result = warn(
183 result = warn(
184 modified + added + clean, _(b'not removing %s: file still exists\n')
184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 )
185 )
186 else:
186 else:
187 remove = deleted + clean
187 remove = deleted + clean
188 result = warn(
188 result = warn(
189 modified,
189 modified,
190 _(
190 _(
191 b'not removing %s: file is modified (use -f'
191 b'not removing %s: file is modified (use -f'
192 b' to force removal)\n'
192 b' to force removal)\n'
193 ),
193 ),
194 )
194 )
195 result = (
195 result = (
196 warn(
196 warn(
197 added,
197 added,
198 _(
198 _(
199 b'not removing %s: file has been marked for add'
199 b'not removing %s: file has been marked for add'
200 b' (use forget to undo)\n'
200 b' (use forget to undo)\n'
201 ),
201 ),
202 )
202 )
203 or result
203 or result
204 )
204 )
205
205
206 # Need to lock because standin files are deleted then removed from the
206 # Need to lock because standin files are deleted then removed from the
207 # repository and we could race in-between.
207 # repository and we could race in-between.
208 with repo.wlock():
208 with repo.wlock():
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 for f in sorted(remove):
210 for f in sorted(remove):
211 if ui.verbose or not m.exact(f):
211 if ui.verbose or not m.exact(f):
212 ui.status(_(b'removing %s\n') % uipathfn(f))
212 ui.status(_(b'removing %s\n') % uipathfn(f))
213
213
214 if not dryrun:
214 if not dryrun:
215 if not after:
215 if not after:
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217
217
218 if dryrun:
218 if dryrun:
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfutil.synclfdirstate(
230 lfutil.synclfdirstate(
231 repo, lfdirstate, lfutil.splitstandin(f), False
231 repo, lfdirstate, lfutil.splitstandin(f), False
232 )
232 )
233
233
234 lfdirstate.write()
234 lfdirstate.write()
235
235
236 return result
236 return result
237
237
238
238
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
240 # appear at their right place in the manifests.
241 @eh.wrapfunction(webcommands, b'decodepath')
241 @eh.wrapfunction(webcommands, b'decodepath')
242 def decodepath(orig, path):
242 def decodepath(orig, path):
243 return lfutil.splitstandin(path) or path
243 return lfutil.splitstandin(path) or path
244
244
245
245
246 # -- Wrappers: modify existing commands --------------------------------
246 # -- Wrappers: modify existing commands --------------------------------
247
247
248
248
249 @eh.wrapcommand(
249 @eh.wrapcommand(
250 b'add',
250 b'add',
251 opts=[
251 opts=[
252 (b'', b'large', None, _(b'add as largefile')),
252 (b'', b'large', None, _(b'add as largefile')),
253 (b'', b'normal', None, _(b'add as normal file')),
253 (b'', b'normal', None, _(b'add as normal file')),
254 (
254 (
255 b'',
255 b'',
256 b'lfsize',
256 b'lfsize',
257 b'',
257 b'',
258 _(
258 _(
259 b'add all files above this size (in megabytes) '
259 b'add all files above this size (in megabytes) '
260 b'as largefiles (default: 10)'
260 b'as largefiles (default: 10)'
261 ),
261 ),
262 ),
262 ),
263 ],
263 ],
264 )
264 )
265 def overrideadd(orig, ui, repo, *pats, **opts):
265 def overrideadd(orig, ui, repo, *pats, **opts):
266 if opts.get('normal') and opts.get('large'):
266 if opts.get('normal') and opts.get('large'):
267 raise error.Abort(_(b'--normal cannot be used with --large'))
267 raise error.Abort(_(b'--normal cannot be used with --large'))
268 return orig(ui, repo, *pats, **opts)
268 return orig(ui, repo, *pats, **opts)
269
269
270
270
271 @eh.wrapfunction(cmdutil, b'add')
271 @eh.wrapfunction(cmdutil, b'add')
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
273 # The --normal flag short circuits this override
273 # The --normal flag short circuits this override
274 if opts.get('normal'):
274 if opts.get('normal'):
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
276
276
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
278 normalmatcher = composenormalfilematcher(
278 normalmatcher = composenormalfilematcher(
279 matcher, repo[None].manifest(), ladded
279 matcher, repo[None].manifest(), ladded
280 )
280 )
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
282
282
283 bad.extend(f for f in lbad)
283 bad.extend(f for f in lbad)
284 return bad
284 return bad
285
285
286
286
287 @eh.wrapfunction(cmdutil, b'remove')
287 @eh.wrapfunction(cmdutil, b'remove')
288 def cmdutilremove(
288 def cmdutilremove(
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
290 ):
290 ):
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
292 result = orig(
292 result = orig(
293 ui,
293 ui,
294 repo,
294 repo,
295 normalmatcher,
295 normalmatcher,
296 prefix,
296 prefix,
297 uipathfn,
297 uipathfn,
298 after,
298 after,
299 force,
299 force,
300 subrepos,
300 subrepos,
301 dryrun,
301 dryrun,
302 )
302 )
303 return (
303 return (
304 removelargefiles(
304 removelargefiles(
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
306 )
306 )
307 or result
307 or result
308 )
308 )
309
309
310
310
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
312 def overridestatusfn(orig, repo, rev2, **opts):
312 def overridestatusfn(orig, repo, rev2, **opts):
313 with lfstatus(repo._repo):
313 with lfstatus(repo._repo):
314 return orig(repo, rev2, **opts)
314 return orig(repo, rev2, **opts)
315
315
316
316
317 @eh.wrapcommand(b'status')
317 @eh.wrapcommand(b'status')
318 def overridestatus(orig, ui, repo, *pats, **opts):
318 def overridestatus(orig, ui, repo, *pats, **opts):
319 with lfstatus(repo):
319 with lfstatus(repo):
320 return orig(ui, repo, *pats, **opts)
320 return orig(ui, repo, *pats, **opts)
321
321
322
322
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
325 with lfstatus(repo._repo):
325 with lfstatus(repo._repo):
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
327
327
328
328
329 @eh.wrapcommand(b'log')
329 @eh.wrapcommand(b'log')
330 def overridelog(orig, ui, repo, *pats, **opts):
330 def overridelog(orig, ui, repo, *pats, **opts):
331 def overridematchandpats(
331 def overridematchandpats(
332 orig,
332 orig,
333 ctx,
333 ctx,
334 pats=(),
334 pats=(),
335 opts=None,
335 opts=None,
336 globbed=False,
336 globbed=False,
337 default=b'relpath',
337 default=b'relpath',
338 badfn=None,
338 badfn=None,
339 ):
339 ):
340 """Matcher that merges root directory with .hglf, suitable for log.
340 """Matcher that merges root directory with .hglf, suitable for log.
341 It is still possible to match .hglf directly.
341 It is still possible to match .hglf directly.
342 For any listed files run log on the standin too.
342 For any listed files run log on the standin too.
343 matchfn tries both the given filename and with .hglf stripped.
343 matchfn tries both the given filename and with .hglf stripped.
344 """
344 """
345 if opts is None:
345 if opts is None:
346 opts = {}
346 opts = {}
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
348 m, p = copy.copy(matchandpats)
348 m, p = copy.copy(matchandpats)
349
349
350 if m.always():
350 if m.always():
351 # We want to match everything anyway, so there's no benefit trying
351 # We want to match everything anyway, so there's no benefit trying
352 # to add standins.
352 # to add standins.
353 return matchandpats
353 return matchandpats
354
354
355 pats = set(p)
355 pats = set(p)
356
356
357 def fixpats(pat, tostandin=lfutil.standin):
357 def fixpats(pat, tostandin=lfutil.standin):
358 if pat.startswith(b'set:'):
358 if pat.startswith(b'set:'):
359 return pat
359 return pat
360
360
361 kindpat = matchmod._patsplit(pat, None)
361 kindpat = matchmod._patsplit(pat, None)
362
362
363 if kindpat[0] is not None:
363 if kindpat[0] is not None:
364 return kindpat[0] + b':' + tostandin(kindpat[1])
364 return kindpat[0] + b':' + tostandin(kindpat[1])
365 return tostandin(kindpat[1])
365 return tostandin(kindpat[1])
366
366
367 cwd = repo.getcwd()
367 cwd = repo.getcwd()
368 if cwd:
368 if cwd:
369 hglf = lfutil.shortname
369 hglf = lfutil.shortname
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
371
371
372 def tostandin(f):
372 def tostandin(f):
373 # The file may already be a standin, so truncate the back
373 # The file may already be a standin, so truncate the back
374 # prefix and test before mangling it. This avoids turning
374 # prefix and test before mangling it. This avoids turning
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
377 return f
377 return f
378
378
379 # An absolute path is from outside the repo, so truncate the
379 # An absolute path is from outside the repo, so truncate the
380 # path to the root before building the standin. Otherwise cwd
380 # path to the root before building the standin. Otherwise cwd
381 # is somewhere in the repo, relative to root, and needs to be
381 # is somewhere in the repo, relative to root, and needs to be
382 # prepended before building the standin.
382 # prepended before building the standin.
383 if os.path.isabs(cwd):
383 if os.path.isabs(cwd):
384 f = f[len(back) :]
384 f = f[len(back) :]
385 else:
385 else:
386 f = cwd + b'/' + f
386 f = cwd + b'/' + f
387 return back + lfutil.standin(f)
387 return back + lfutil.standin(f)
388
388
389 else:
389 else:
390
390
391 def tostandin(f):
391 def tostandin(f):
392 if lfutil.isstandin(f):
392 if lfutil.isstandin(f):
393 return f
393 return f
394 return lfutil.standin(f)
394 return lfutil.standin(f)
395
395
396 pats.update(fixpats(f, tostandin) for f in p)
396 pats.update(fixpats(f, tostandin) for f in p)
397
397
398 for i in range(0, len(m._files)):
398 for i in range(0, len(m._files)):
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
400 if m._files[i] == b'.':
400 if m._files[i] == b'.':
401 continue
401 continue
402 standin = lfutil.standin(m._files[i])
402 standin = lfutil.standin(m._files[i])
403 # If the "standin" is a directory, append instead of replace to
403 # If the "standin" is a directory, append instead of replace to
404 # support naming a directory on the command line with only
404 # support naming a directory on the command line with only
405 # largefiles. The original directory is kept to support normal
405 # largefiles. The original directory is kept to support normal
406 # files.
406 # files.
407 if standin in ctx:
407 if standin in ctx:
408 m._files[i] = standin
408 m._files[i] = standin
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
410 m._files.append(standin)
410 m._files.append(standin)
411
411
412 m._fileset = set(m._files)
412 m._fileset = set(m._files)
413 m.always = lambda: False
413 m.always = lambda: False
414 origmatchfn = m.matchfn
414 origmatchfn = m.matchfn
415
415
416 def lfmatchfn(f):
416 def lfmatchfn(f):
417 lf = lfutil.splitstandin(f)
417 lf = lfutil.splitstandin(f)
418 if lf is not None and origmatchfn(lf):
418 if lf is not None and origmatchfn(lf):
419 return True
419 return True
420 r = origmatchfn(f)
420 r = origmatchfn(f)
421 return r
421 return r
422
422
423 m.matchfn = lfmatchfn
423 m.matchfn = lfmatchfn
424
424
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
426 return m, pats
426 return m, pats
427
427
428 # For hg log --patch, the match object is used in two different senses:
428 # For hg log --patch, the match object is used in two different senses:
429 # (1) to determine what revisions should be printed out, and
429 # (1) to determine what revisions should be printed out, and
430 # (2) to determine what files to print out diffs for.
430 # (2) to determine what files to print out diffs for.
431 # The magic matchandpats override should be used for case (1) but not for
431 # The magic matchandpats override should be used for case (1) but not for
432 # case (2).
432 # case (2).
433 oldmatchandpats = scmutil.matchandpats
433 oldmatchandpats = scmutil.matchandpats
434
434
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
436 wctx = repo[None]
436 wctx = repo[None]
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
438 return lambda ctx: match
438 return lambda ctx: match
439
439
440 wrappedmatchandpats = extensions.wrappedfunction(
440 wrappedmatchandpats = extensions.wrappedfunction(
441 scmutil, b'matchandpats', overridematchandpats
441 scmutil, b'matchandpats', overridematchandpats
442 )
442 )
443 wrappedmakefilematcher = extensions.wrappedfunction(
443 wrappedmakefilematcher = extensions.wrappedfunction(
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
445 )
445 )
446 with wrappedmatchandpats, wrappedmakefilematcher:
446 with wrappedmatchandpats, wrappedmakefilematcher:
447 return orig(ui, repo, *pats, **opts)
447 return orig(ui, repo, *pats, **opts)
448
448
449
449
450 @eh.wrapcommand(
450 @eh.wrapcommand(
451 b'verify',
451 b'verify',
452 opts=[
452 opts=[
453 (
453 (
454 b'',
454 b'',
455 b'large',
455 b'large',
456 None,
456 None,
457 _(b'verify that all largefiles in current revision exists'),
457 _(b'verify that all largefiles in current revision exists'),
458 ),
458 ),
459 (
459 (
460 b'',
460 b'',
461 b'lfa',
461 b'lfa',
462 None,
462 None,
463 _(b'verify largefiles in all revisions, not just current'),
463 _(b'verify largefiles in all revisions, not just current'),
464 ),
464 ),
465 (
465 (
466 b'',
466 b'',
467 b'lfc',
467 b'lfc',
468 None,
468 None,
469 _(b'verify local largefile contents, not just existence'),
469 _(b'verify local largefile contents, not just existence'),
470 ),
470 ),
471 ],
471 ],
472 )
472 )
473 def overrideverify(orig, ui, repo, *pats, **opts):
473 def overrideverify(orig, ui, repo, *pats, **opts):
474 large = opts.pop('large', False)
474 large = opts.pop('large', False)
475 all = opts.pop('lfa', False)
475 all = opts.pop('lfa', False)
476 contents = opts.pop('lfc', False)
476 contents = opts.pop('lfc', False)
477
477
478 result = orig(ui, repo, *pats, **opts)
478 result = orig(ui, repo, *pats, **opts)
479 if large or all or contents:
479 if large or all or contents:
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
481 return result
481 return result
482
482
483
483
484 @eh.wrapcommand(
484 @eh.wrapcommand(
485 b'debugstate',
485 b'debugstate',
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
487 )
487 )
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
489 large = opts.pop('large', False)
489 large = opts.pop('large', False)
490 if large:
490 if large:
491
491
492 class fakerepo(object):
492 class fakerepo(object):
493 dirstate = lfutil.openlfdirstate(ui, repo)
493 dirstate = lfutil.openlfdirstate(ui, repo)
494
494
495 orig(ui, fakerepo, *pats, **opts)
495 orig(ui, fakerepo, *pats, **opts)
496 else:
496 else:
497 orig(ui, repo, *pats, **opts)
497 orig(ui, repo, *pats, **opts)
498
498
499
499
500 # Before starting the manifest merge, merge.updates will call
500 # Before starting the manifest merge, merge.updates will call
501 # _checkunknownfile to check if there are any files in the merged-in
501 # _checkunknownfile to check if there are any files in the merged-in
502 # changeset that collide with unknown files in the working copy.
502 # changeset that collide with unknown files in the working copy.
503 #
503 #
504 # The largefiles are seen as unknown, so this prevents us from merging
504 # The largefiles are seen as unknown, so this prevents us from merging
505 # in a file 'foo' if we already have a largefile with the same name.
505 # in a file 'foo' if we already have a largefile with the same name.
506 #
506 #
507 # The overridden function filters the unknown files by removing any
507 # The overridden function filters the unknown files by removing any
508 # largefiles. This makes the merge proceed and we can then handle this
508 # largefiles. This makes the merge proceed and we can then handle this
509 # case further in the overridden calculateupdates function below.
509 # case further in the overridden calculateupdates function below.
510 @eh.wrapfunction(merge, b'_checkunknownfile')
510 @eh.wrapfunction(merge, b'_checkunknownfile')
511 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
511 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
512 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
512 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
513 return False
513 return False
514 return origfn(repo, wctx, mctx, f, f2)
514 return origfn(repo, wctx, mctx, f, f2)
515
515
516
516
517 # The manifest merge handles conflicts on the manifest level. We want
517 # The manifest merge handles conflicts on the manifest level. We want
518 # to handle changes in largefile-ness of files at this level too.
518 # to handle changes in largefile-ness of files at this level too.
519 #
519 #
520 # The strategy is to run the original calculateupdates and then process
520 # The strategy is to run the original calculateupdates and then process
521 # the action list it outputs. There are two cases we need to deal with:
521 # the action list it outputs. There are two cases we need to deal with:
522 #
522 #
523 # 1. Normal file in p1, largefile in p2. Here the largefile is
523 # 1. Normal file in p1, largefile in p2. Here the largefile is
524 # detected via its standin file, which will enter the working copy
524 # detected via its standin file, which will enter the working copy
525 # with a "get" action. It is not "merge" since the standin is all
525 # with a "get" action. It is not "merge" since the standin is all
526 # Mercurial is concerned with at this level -- the link to the
526 # Mercurial is concerned with at this level -- the link to the
527 # existing normal file is not relevant here.
527 # existing normal file is not relevant here.
528 #
528 #
529 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
529 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
530 # since the largefile will be present in the working copy and
530 # since the largefile will be present in the working copy and
531 # different from the normal file in p2. Mercurial therefore
531 # different from the normal file in p2. Mercurial therefore
532 # triggers a merge action.
532 # triggers a merge action.
533 #
533 #
534 # In both cases, we prompt the user and emit new actions to either
534 # In both cases, we prompt the user and emit new actions to either
535 # remove the standin (if the normal file was kept) or to remove the
535 # remove the standin (if the normal file was kept) or to remove the
536 # normal file and get the standin (if the largefile was kept). The
536 # normal file and get the standin (if the largefile was kept). The
537 # default prompt answer is to use the largefile version since it was
537 # default prompt answer is to use the largefile version since it was
538 # presumably changed on purpose.
538 # presumably changed on purpose.
539 #
539 #
540 # Finally, the merge.applyupdates function will then take care of
540 # Finally, the merge.applyupdates function will then take care of
541 # writing the files into the working copy and lfcommands.updatelfiles
541 # writing the files into the working copy and lfcommands.updatelfiles
542 # will update the largefiles.
542 # will update the largefiles.
543 @eh.wrapfunction(merge, b'calculateupdates')
543 @eh.wrapfunction(merge, b'calculateupdates')
544 def overridecalculateupdates(
544 def overridecalculateupdates(
545 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
545 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
546 ):
546 ):
547 overwrite = force and not branchmerge
547 overwrite = force and not branchmerge
548 mresult = origfn(
548 mresult = origfn(
549 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
549 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
550 )
550 )
551
551
552 if overwrite:
552 if overwrite:
553 return mresult
553 return mresult
554
554
555 # Convert to dictionary with filename as key and action as value.
555 # Convert to dictionary with filename as key and action as value.
556 lfiles = set()
556 lfiles = set()
557 for f in mresult.files():
557 for f in mresult.files():
558 splitstandin = lfutil.splitstandin(f)
558 splitstandin = lfutil.splitstandin(f)
559 if splitstandin is not None and splitstandin in p1:
559 if splitstandin is not None and splitstandin in p1:
560 lfiles.add(splitstandin)
560 lfiles.add(splitstandin)
561 elif lfutil.standin(f) in p1:
561 elif lfutil.standin(f) in p1:
562 lfiles.add(f)
562 lfiles.add(f)
563
563
564 for lfile in sorted(lfiles):
564 for lfile in sorted(lfiles):
565 standin = lfutil.standin(lfile)
565 standin = lfutil.standin(lfile)
566 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
566 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
567 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
567 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
568 if sm in (b'g', b'dc') and lm != b'r':
568 if sm in (b'g', b'dc') and lm != b'r':
569 if sm == b'dc':
569 if sm == b'dc':
570 f1, f2, fa, move, anc = sargs
570 f1, f2, fa, move, anc = sargs
571 sargs = (p2[f2].flags(), False)
571 sargs = (p2[f2].flags(), False)
572 # Case 1: normal file in the working copy, largefile in
572 # Case 1: normal file in the working copy, largefile in
573 # the second parent
573 # the second parent
574 usermsg = (
574 usermsg = (
575 _(
575 _(
576 b'remote turned local normal file %s into a largefile\n'
576 b'remote turned local normal file %s into a largefile\n'
577 b'use (l)argefile or keep (n)ormal file?'
577 b'use (l)argefile or keep (n)ormal file?'
578 b'$$ &Largefile $$ &Normal file'
578 b'$$ &Largefile $$ &Normal file'
579 )
579 )
580 % lfile
580 % lfile
581 )
581 )
582 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
582 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
583 mresult.addfile(lfile, b'r', None, b'replaced by standin')
583 mresult.addfile(lfile, b'r', None, b'replaced by standin')
584 mresult.addfile(standin, b'g', sargs, b'replaces standin')
584 mresult.addfile(standin, b'g', sargs, b'replaces standin')
585 else: # keep local normal file
585 else: # keep local normal file
586 mresult.addfile(lfile, b'k', None, b'replaces standin')
586 mresult.addfile(lfile, b'k', None, b'replaces standin')
587 if branchmerge:
587 if branchmerge:
588 mresult.addfile(
588 mresult.addfile(
589 standin, b'k', None, b'replaced by non-standin',
589 standin, b'k', None, b'replaced by non-standin',
590 )
590 )
591 else:
591 else:
592 mresult.addfile(
592 mresult.addfile(
593 standin, b'r', None, b'replaced by non-standin',
593 standin, b'r', None, b'replaced by non-standin',
594 )
594 )
595 elif lm in (b'g', b'dc') and sm != b'r':
595 elif lm in (b'g', b'dc') and sm != b'r':
596 if lm == b'dc':
596 if lm == b'dc':
597 f1, f2, fa, move, anc = largs
597 f1, f2, fa, move, anc = largs
598 largs = (p2[f2].flags(), False)
598 largs = (p2[f2].flags(), False)
599 # Case 2: largefile in the working copy, normal file in
599 # Case 2: largefile in the working copy, normal file in
600 # the second parent
600 # the second parent
601 usermsg = (
601 usermsg = (
602 _(
602 _(
603 b'remote turned local largefile %s into a normal file\n'
603 b'remote turned local largefile %s into a normal file\n'
604 b'keep (l)argefile or use (n)ormal file?'
604 b'keep (l)argefile or use (n)ormal file?'
605 b'$$ &Largefile $$ &Normal file'
605 b'$$ &Largefile $$ &Normal file'
606 )
606 )
607 % lfile
607 % lfile
608 )
608 )
609 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
609 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
610 if branchmerge:
610 if branchmerge:
611 # largefile can be restored from standin safely
611 # largefile can be restored from standin safely
612 mresult.addfile(
612 mresult.addfile(
613 lfile, b'k', None, b'replaced by standin',
613 lfile, b'k', None, b'replaced by standin',
614 )
614 )
615 mresult.addfile(standin, b'k', None, b'replaces standin')
615 mresult.addfile(standin, b'k', None, b'replaces standin')
616 else:
616 else:
617 # "lfile" should be marked as "removed" without
617 # "lfile" should be marked as "removed" without
618 # removal of itself
618 # removal of itself
619 mresult.addfile(
619 mresult.addfile(
620 lfile,
620 lfile,
621 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
621 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
622 None,
622 None,
623 b'forget non-standin largefile',
623 b'forget non-standin largefile',
624 )
624 )
625
625
626 # linear-merge should treat this largefile as 're-added'
626 # linear-merge should treat this largefile as 're-added'
627 mresult.addfile(standin, b'a', None, b'keep standin')
627 mresult.addfile(standin, b'a', None, b'keep standin')
628 else: # pick remote normal file
628 else: # pick remote normal file
629 mresult.addfile(lfile, b'g', largs, b'replaces standin')
629 mresult.addfile(lfile, b'g', largs, b'replaces standin')
630 mresult.addfile(
630 mresult.addfile(
631 standin, b'r', None, b'replaced by non-standin',
631 standin, b'r', None, b'replaced by non-standin',
632 )
632 )
633
633
634 return mresult
634 return mresult
635
635
636
636
637 @eh.wrapfunction(mergestatemod, b'recordupdates')
637 @eh.wrapfunction(mergestatemod, b'recordupdates')
638 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
638 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
639 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
639 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
640 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
640 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
641 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
641 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
642 # this should be executed before 'orig', to execute 'remove'
642 # this should be executed before 'orig', to execute 'remove'
643 # before all other actions
643 # before all other actions
644 repo.dirstate.remove(lfile)
644 repo.dirstate.remove(lfile)
645 # make sure lfile doesn't get synclfdirstate'd as normal
645 # make sure lfile doesn't get synclfdirstate'd as normal
646 lfdirstate.add(lfile)
646 lfdirstate.add(lfile)
647 lfdirstate.write()
647 lfdirstate.write()
648
648
649 return orig(repo, actions, branchmerge, getfiledata)
649 return orig(repo, actions, branchmerge, getfiledata)
650
650
651
651
652 # Override filemerge to prompt the user about how they wish to merge
652 # Override filemerge to prompt the user about how they wish to merge
653 # largefiles. This will handle identical edits without prompting the user.
653 # largefiles. This will handle identical edits without prompting the user.
654 @eh.wrapfunction(filemerge, b'_filemerge')
654 @eh.wrapfunction(filemerge, b'_filemerge')
655 def overridefilemerge(
655 def overridefilemerge(
656 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
656 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
657 ):
657 ):
658 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
658 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
659 return origfn(
659 return origfn(
660 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
660 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
661 )
661 )
662
662
663 ahash = lfutil.readasstandin(fca).lower()
663 ahash = lfutil.readasstandin(fca).lower()
664 dhash = lfutil.readasstandin(fcd).lower()
664 dhash = lfutil.readasstandin(fcd).lower()
665 ohash = lfutil.readasstandin(fco).lower()
665 ohash = lfutil.readasstandin(fco).lower()
666 if (
666 if (
667 ohash != ahash
667 ohash != ahash
668 and ohash != dhash
668 and ohash != dhash
669 and (
669 and (
670 dhash == ahash
670 dhash == ahash
671 or repo.ui.promptchoice(
671 or repo.ui.promptchoice(
672 _(
672 _(
673 b'largefile %s has a merge conflict\nancestor was %s\n'
673 b'largefile %s has a merge conflict\nancestor was %s\n'
674 b'you can keep (l)ocal %s or take (o)ther %s.\n'
674 b'you can keep (l)ocal %s or take (o)ther %s.\n'
675 b'what do you want to do?'
675 b'what do you want to do?'
676 b'$$ &Local $$ &Other'
676 b'$$ &Local $$ &Other'
677 )
677 )
678 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
678 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
679 0,
679 0,
680 )
680 )
681 == 1
681 == 1
682 )
682 )
683 ):
683 ):
684 repo.wwrite(fcd.path(), fco.data(), fco.flags())
684 repo.wwrite(fcd.path(), fco.data(), fco.flags())
685 return True, 0, False
685 return True, 0, False
686
686
687
687
688 @eh.wrapfunction(copiesmod, b'pathcopies')
688 @eh.wrapfunction(copiesmod, b'pathcopies')
689 def copiespathcopies(orig, ctx1, ctx2, match=None):
689 def copiespathcopies(orig, ctx1, ctx2, match=None):
690 copies = orig(ctx1, ctx2, match=match)
690 copies = orig(ctx1, ctx2, match=match)
691 updated = {}
691 updated = {}
692
692
693 for k, v in pycompat.iteritems(copies):
693 for k, v in pycompat.iteritems(copies):
694 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
694 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
695
695
696 return updated
696 return updated
697
697
698
698
699 # Copy first changes the matchers to match standins instead of
699 # Copy first changes the matchers to match standins instead of
700 # largefiles. Then it overrides util.copyfile in that function it
700 # largefiles. Then it overrides util.copyfile in that function it
701 # checks if the destination largefile already exists. It also keeps a
701 # checks if the destination largefile already exists. It also keeps a
702 # list of copied files so that the largefiles can be copied and the
702 # list of copied files so that the largefiles can be copied and the
703 # dirstate updated.
703 # dirstate updated.
704 @eh.wrapfunction(cmdutil, b'copy')
704 @eh.wrapfunction(cmdutil, b'copy')
705 def overridecopy(orig, ui, repo, pats, opts, rename=False):
705 def overridecopy(orig, ui, repo, pats, opts, rename=False):
706 # doesn't remove largefile on rename
706 # doesn't remove largefile on rename
707 if len(pats) < 2:
707 if len(pats) < 2:
708 # this isn't legal, let the original function deal with it
708 # this isn't legal, let the original function deal with it
709 return orig(ui, repo, pats, opts, rename)
709 return orig(ui, repo, pats, opts, rename)
710
710
711 # This could copy both lfiles and normal files in one command,
711 # This could copy both lfiles and normal files in one command,
712 # but we don't want to do that. First replace their matcher to
712 # but we don't want to do that. First replace their matcher to
713 # only match normal files and run it, then replace it to just
713 # only match normal files and run it, then replace it to just
714 # match largefiles and run it again.
714 # match largefiles and run it again.
715 nonormalfiles = False
715 nonormalfiles = False
716 nolfiles = False
716 nolfiles = False
717 manifest = repo[None].manifest()
717 manifest = repo[None].manifest()
718
718
719 def normalfilesmatchfn(
719 def normalfilesmatchfn(
720 orig,
720 orig,
721 ctx,
721 ctx,
722 pats=(),
722 pats=(),
723 opts=None,
723 opts=None,
724 globbed=False,
724 globbed=False,
725 default=b'relpath',
725 default=b'relpath',
726 badfn=None,
726 badfn=None,
727 ):
727 ):
728 if opts is None:
728 if opts is None:
729 opts = {}
729 opts = {}
730 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
730 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
731 return composenormalfilematcher(match, manifest)
731 return composenormalfilematcher(match, manifest)
732
732
733 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
733 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
734 try:
734 try:
735 result = orig(ui, repo, pats, opts, rename)
735 result = orig(ui, repo, pats, opts, rename)
736 except error.Abort as e:
736 except error.Abort as e:
737 if pycompat.bytestr(e) != _(b'no files to copy'):
737 if pycompat.bytestr(e) != _(b'no files to copy'):
738 raise e
738 raise e
739 else:
739 else:
740 nonormalfiles = True
740 nonormalfiles = True
741 result = 0
741 result = 0
742
742
743 # The first rename can cause our current working directory to be removed.
743 # The first rename can cause our current working directory to be removed.
744 # In that case there is nothing left to copy/rename so just quit.
744 # In that case there is nothing left to copy/rename so just quit.
745 try:
745 try:
746 repo.getcwd()
746 repo.getcwd()
747 except OSError:
747 except OSError:
748 return result
748 return result
749
749
750 def makestandin(relpath):
750 def makestandin(relpath):
751 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
751 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
752 return repo.wvfs.join(lfutil.standin(path))
752 return repo.wvfs.join(lfutil.standin(path))
753
753
754 fullpats = scmutil.expandpats(pats)
754 fullpats = scmutil.expandpats(pats)
755 dest = fullpats[-1]
755 dest = fullpats[-1]
756
756
757 if os.path.isdir(dest):
757 if os.path.isdir(dest):
758 if not os.path.isdir(makestandin(dest)):
758 if not os.path.isdir(makestandin(dest)):
759 os.makedirs(makestandin(dest))
759 os.makedirs(makestandin(dest))
760
760
761 try:
761 try:
762 # When we call orig below it creates the standins but we don't add
762 # When we call orig below it creates the standins but we don't add
763 # them to the dir state until later so lock during that time.
763 # them to the dir state until later so lock during that time.
764 wlock = repo.wlock()
764 wlock = repo.wlock()
765
765
766 manifest = repo[None].manifest()
766 manifest = repo[None].manifest()
767
767
768 def overridematch(
768 def overridematch(
769 orig,
769 orig,
770 ctx,
770 ctx,
771 pats=(),
771 pats=(),
772 opts=None,
772 opts=None,
773 globbed=False,
773 globbed=False,
774 default=b'relpath',
774 default=b'relpath',
775 badfn=None,
775 badfn=None,
776 ):
776 ):
777 if opts is None:
777 if opts is None:
778 opts = {}
778 opts = {}
779 newpats = []
779 newpats = []
780 # The patterns were previously mangled to add the standin
780 # The patterns were previously mangled to add the standin
781 # directory; we need to remove that now
781 # directory; we need to remove that now
782 for pat in pats:
782 for pat in pats:
783 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
783 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
784 newpats.append(pat.replace(lfutil.shortname, b''))
784 newpats.append(pat.replace(lfutil.shortname, b''))
785 else:
785 else:
786 newpats.append(pat)
786 newpats.append(pat)
787 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
787 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
788 m = copy.copy(match)
788 m = copy.copy(match)
789 lfile = lambda f: lfutil.standin(f) in manifest
789 lfile = lambda f: lfutil.standin(f) in manifest
790 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
790 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
791 m._fileset = set(m._files)
791 m._fileset = set(m._files)
792 origmatchfn = m.matchfn
792 origmatchfn = m.matchfn
793
793
794 def matchfn(f):
794 def matchfn(f):
795 lfile = lfutil.splitstandin(f)
795 lfile = lfutil.splitstandin(f)
796 return (
796 return (
797 lfile is not None
797 lfile is not None
798 and (f in manifest)
798 and (f in manifest)
799 and origmatchfn(lfile)
799 and origmatchfn(lfile)
800 or None
800 or None
801 )
801 )
802
802
803 m.matchfn = matchfn
803 m.matchfn = matchfn
804 return m
804 return m
805
805
806 listpats = []
806 listpats = []
807 for pat in pats:
807 for pat in pats:
808 if matchmod.patkind(pat) is not None:
808 if matchmod.patkind(pat) is not None:
809 listpats.append(pat)
809 listpats.append(pat)
810 else:
810 else:
811 listpats.append(makestandin(pat))
811 listpats.append(makestandin(pat))
812
812
813 copiedfiles = []
813 copiedfiles = []
814
814
815 def overridecopyfile(orig, src, dest, *args, **kwargs):
815 def overridecopyfile(orig, src, dest, *args, **kwargs):
816 if lfutil.shortname in src and dest.startswith(
816 if lfutil.shortname in src and dest.startswith(
817 repo.wjoin(lfutil.shortname)
817 repo.wjoin(lfutil.shortname)
818 ):
818 ):
819 destlfile = dest.replace(lfutil.shortname, b'')
819 destlfile = dest.replace(lfutil.shortname, b'')
820 if not opts[b'force'] and os.path.exists(destlfile):
820 if not opts[b'force'] and os.path.exists(destlfile):
821 raise IOError(
821 raise IOError(
822 b'', _(b'destination largefile already exists')
822 b'', _(b'destination largefile already exists')
823 )
823 )
824 copiedfiles.append((src, dest))
824 copiedfiles.append((src, dest))
825 orig(src, dest, *args, **kwargs)
825 orig(src, dest, *args, **kwargs)
826
826
827 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
827 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
828 with extensions.wrappedfunction(scmutil, b'match', overridematch):
828 with extensions.wrappedfunction(scmutil, b'match', overridematch):
829 result += orig(ui, repo, listpats, opts, rename)
829 result += orig(ui, repo, listpats, opts, rename)
830
830
831 lfdirstate = lfutil.openlfdirstate(ui, repo)
831 lfdirstate = lfutil.openlfdirstate(ui, repo)
832 for (src, dest) in copiedfiles:
832 for (src, dest) in copiedfiles:
833 if lfutil.shortname in src and dest.startswith(
833 if lfutil.shortname in src and dest.startswith(
834 repo.wjoin(lfutil.shortname)
834 repo.wjoin(lfutil.shortname)
835 ):
835 ):
836 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
836 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
837 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
837 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
838 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
838 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
839 if not os.path.isdir(destlfiledir):
839 if not os.path.isdir(destlfiledir):
840 os.makedirs(destlfiledir)
840 os.makedirs(destlfiledir)
841 if rename:
841 if rename:
842 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
842 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
843
843
844 # The file is gone, but this deletes any empty parent
844 # The file is gone, but this deletes any empty parent
845 # directories as a side-effect.
845 # directories as a side-effect.
846 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
846 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
847 lfdirstate.remove(srclfile)
847 lfdirstate.remove(srclfile)
848 else:
848 else:
849 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
849 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
850
850
851 lfdirstate.add(destlfile)
851 lfdirstate.add(destlfile)
852 lfdirstate.write()
852 lfdirstate.write()
853 except error.Abort as e:
853 except error.Abort as e:
854 if pycompat.bytestr(e) != _(b'no files to copy'):
854 if pycompat.bytestr(e) != _(b'no files to copy'):
855 raise e
855 raise e
856 else:
856 else:
857 nolfiles = True
857 nolfiles = True
858 finally:
858 finally:
859 wlock.release()
859 wlock.release()
860
860
861 if nolfiles and nonormalfiles:
861 if nolfiles and nonormalfiles:
862 raise error.Abort(_(b'no files to copy'))
862 raise error.Abort(_(b'no files to copy'))
863
863
864 return result
864 return result
865
865
866
866
867 # When the user calls revert, we have to be careful to not revert any
867 # When the user calls revert, we have to be careful to not revert any
868 # changes to other largefiles accidentally. This means we have to keep
868 # changes to other largefiles accidentally. This means we have to keep
869 # track of the largefiles that are being reverted so we only pull down
869 # track of the largefiles that are being reverted so we only pull down
870 # the necessary largefiles.
870 # the necessary largefiles.
871 #
871 #
872 # Standins are only updated (to match the hash of largefiles) before
872 # Standins are only updated (to match the hash of largefiles) before
873 # commits. Update the standins then run the original revert, changing
873 # commits. Update the standins then run the original revert, changing
874 # the matcher to hit standins instead of largefiles. Based on the
874 # the matcher to hit standins instead of largefiles. Based on the
875 # resulting standins update the largefiles.
875 # resulting standins update the largefiles.
876 @eh.wrapfunction(cmdutil, b'revert')
876 @eh.wrapfunction(cmdutil, b'revert')
877 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
877 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
878 # Because we put the standins in a bad state (by updating them)
878 # Because we put the standins in a bad state (by updating them)
879 # and then return them to a correct state we need to lock to
879 # and then return them to a correct state we need to lock to
880 # prevent others from changing them in their incorrect state.
880 # prevent others from changing them in their incorrect state.
881 with repo.wlock():
881 with repo.wlock():
882 lfdirstate = lfutil.openlfdirstate(ui, repo)
882 lfdirstate = lfutil.openlfdirstate(ui, repo)
883 s = lfutil.lfdirstatestatus(lfdirstate, repo)
883 s = lfutil.lfdirstatestatus(lfdirstate, repo)
884 lfdirstate.write()
884 lfdirstate.write()
885 for lfile in s.modified:
885 for lfile in s.modified:
886 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
886 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
887 for lfile in s.deleted:
887 for lfile in s.deleted:
888 fstandin = lfutil.standin(lfile)
888 fstandin = lfutil.standin(lfile)
889 if repo.wvfs.exists(fstandin):
889 if repo.wvfs.exists(fstandin):
890 repo.wvfs.unlink(fstandin)
890 repo.wvfs.unlink(fstandin)
891
891
892 oldstandins = lfutil.getstandinsstate(repo)
892 oldstandins = lfutil.getstandinsstate(repo)
893
893
894 def overridematch(
894 def overridematch(
895 orig,
895 orig,
896 mctx,
896 mctx,
897 pats=(),
897 pats=(),
898 opts=None,
898 opts=None,
899 globbed=False,
899 globbed=False,
900 default=b'relpath',
900 default=b'relpath',
901 badfn=None,
901 badfn=None,
902 ):
902 ):
903 if opts is None:
903 if opts is None:
904 opts = {}
904 opts = {}
905 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
905 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
906 m = copy.copy(match)
906 m = copy.copy(match)
907
907
908 # revert supports recursing into subrepos, and though largefiles
908 # revert supports recursing into subrepos, and though largefiles
909 # currently doesn't work correctly in that case, this match is
909 # currently doesn't work correctly in that case, this match is
910 # called, so the lfdirstate above may not be the correct one for
910 # called, so the lfdirstate above may not be the correct one for
911 # this invocation of match.
911 # this invocation of match.
912 lfdirstate = lfutil.openlfdirstate(
912 lfdirstate = lfutil.openlfdirstate(
913 mctx.repo().ui, mctx.repo(), False
913 mctx.repo().ui, mctx.repo(), False
914 )
914 )
915
915
916 wctx = repo[None]
916 wctx = repo[None]
917 matchfiles = []
917 matchfiles = []
918 for f in m._files:
918 for f in m._files:
919 standin = lfutil.standin(f)
919 standin = lfutil.standin(f)
920 if standin in ctx or standin in mctx:
920 if standin in ctx or standin in mctx:
921 matchfiles.append(standin)
921 matchfiles.append(standin)
922 elif standin in wctx or lfdirstate[f] == b'r':
922 elif standin in wctx or lfdirstate[f] == b'r':
923 continue
923 continue
924 else:
924 else:
925 matchfiles.append(f)
925 matchfiles.append(f)
926 m._files = matchfiles
926 m._files = matchfiles
927 m._fileset = set(m._files)
927 m._fileset = set(m._files)
928 origmatchfn = m.matchfn
928 origmatchfn = m.matchfn
929
929
930 def matchfn(f):
930 def matchfn(f):
931 lfile = lfutil.splitstandin(f)
931 lfile = lfutil.splitstandin(f)
932 if lfile is not None:
932 if lfile is not None:
933 return origmatchfn(lfile) and (f in ctx or f in mctx)
933 return origmatchfn(lfile) and (f in ctx or f in mctx)
934 return origmatchfn(f)
934 return origmatchfn(f)
935
935
936 m.matchfn = matchfn
936 m.matchfn = matchfn
937 return m
937 return m
938
938
939 with extensions.wrappedfunction(scmutil, b'match', overridematch):
939 with extensions.wrappedfunction(scmutil, b'match', overridematch):
940 orig(ui, repo, ctx, *pats, **opts)
940 orig(ui, repo, ctx, *pats, **opts)
941
941
942 newstandins = lfutil.getstandinsstate(repo)
942 newstandins = lfutil.getstandinsstate(repo)
943 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
943 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
944 # lfdirstate should be 'normallookup'-ed for updated files,
944 # lfdirstate should be 'normallookup'-ed for updated files,
945 # because reverting doesn't touch dirstate for 'normal' files
945 # because reverting doesn't touch dirstate for 'normal' files
946 # when target revision is explicitly specified: in such case,
946 # when target revision is explicitly specified: in such case,
947 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
947 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
948 # of target (standin) file.
948 # of target (standin) file.
949 lfcommands.updatelfiles(
949 lfcommands.updatelfiles(
950 ui, repo, filelist, printmessage=False, normallookup=True
950 ui, repo, filelist, printmessage=False, normallookup=True
951 )
951 )
952
952
953
953
954 # after pulling changesets, we need to take some extra care to get
954 # after pulling changesets, we need to take some extra care to get
955 # largefiles updated remotely
955 # largefiles updated remotely
956 @eh.wrapcommand(
956 @eh.wrapcommand(
957 b'pull',
957 b'pull',
958 opts=[
958 opts=[
959 (
959 (
960 b'',
960 b'',
961 b'all-largefiles',
961 b'all-largefiles',
962 None,
962 None,
963 _(b'download all pulled versions of largefiles (DEPRECATED)'),
963 _(b'download all pulled versions of largefiles (DEPRECATED)'),
964 ),
964 ),
965 (
965 (
966 b'',
966 b'',
967 b'lfrev',
967 b'lfrev',
968 [],
968 [],
969 _(b'download largefiles for these revisions'),
969 _(b'download largefiles for these revisions'),
970 _(b'REV'),
970 _(b'REV'),
971 ),
971 ),
972 ],
972 ],
973 )
973 )
974 def overridepull(orig, ui, repo, source=None, **opts):
974 def overridepull(orig, ui, repo, source=None, **opts):
975 revsprepull = len(repo)
975 revsprepull = len(repo)
976 if not source:
976 if not source:
977 source = b'default'
977 source = b'default'
978 repo.lfpullsource = source
978 repo.lfpullsource = source
979 result = orig(ui, repo, source, **opts)
979 result = orig(ui, repo, source, **opts)
980 revspostpull = len(repo)
980 revspostpull = len(repo)
981 lfrevs = opts.get('lfrev', [])
981 lfrevs = opts.get('lfrev', [])
982 if opts.get('all_largefiles'):
982 if opts.get('all_largefiles'):
983 lfrevs.append(b'pulled()')
983 lfrevs.append(b'pulled()')
984 if lfrevs and revspostpull > revsprepull:
984 if lfrevs and revspostpull > revsprepull:
985 numcached = 0
985 numcached = 0
986 repo.firstpulled = revsprepull # for pulled() revset expression
986 repo.firstpulled = revsprepull # for pulled() revset expression
987 try:
987 try:
988 for rev in scmutil.revrange(repo, lfrevs):
988 for rev in scmutil.revrange(repo, lfrevs):
989 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
989 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
990 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
990 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
991 numcached += len(cached)
991 numcached += len(cached)
992 finally:
992 finally:
993 del repo.firstpulled
993 del repo.firstpulled
994 ui.status(_(b"%d largefiles cached\n") % numcached)
994 ui.status(_(b"%d largefiles cached\n") % numcached)
995 return result
995 return result
996
996
997
997
998 @eh.wrapcommand(
998 @eh.wrapcommand(
999 b'push',
999 b'push',
1000 opts=[
1000 opts=[
1001 (
1001 (
1002 b'',
1002 b'',
1003 b'lfrev',
1003 b'lfrev',
1004 [],
1004 [],
1005 _(b'upload largefiles for these revisions'),
1005 _(b'upload largefiles for these revisions'),
1006 _(b'REV'),
1006 _(b'REV'),
1007 )
1007 )
1008 ],
1008 ],
1009 )
1009 )
1010 def overridepush(orig, ui, repo, *args, **kwargs):
1010 def overridepush(orig, ui, repo, *args, **kwargs):
1011 """Override push command and store --lfrev parameters in opargs"""
1011 """Override push command and store --lfrev parameters in opargs"""
1012 lfrevs = kwargs.pop('lfrev', None)
1012 lfrevs = kwargs.pop('lfrev', None)
1013 if lfrevs:
1013 if lfrevs:
1014 opargs = kwargs.setdefault('opargs', {})
1014 opargs = kwargs.setdefault('opargs', {})
1015 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1015 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1016 return orig(ui, repo, *args, **kwargs)
1016 return orig(ui, repo, *args, **kwargs)
1017
1017
1018
1018
1019 @eh.wrapfunction(exchange, b'pushoperation')
1019 @eh.wrapfunction(exchange, b'pushoperation')
1020 def exchangepushoperation(orig, *args, **kwargs):
1020 def exchangepushoperation(orig, *args, **kwargs):
1021 """Override pushoperation constructor and store lfrevs parameter"""
1021 """Override pushoperation constructor and store lfrevs parameter"""
1022 lfrevs = kwargs.pop('lfrevs', None)
1022 lfrevs = kwargs.pop('lfrevs', None)
1023 pushop = orig(*args, **kwargs)
1023 pushop = orig(*args, **kwargs)
1024 pushop.lfrevs = lfrevs
1024 pushop.lfrevs = lfrevs
1025 return pushop
1025 return pushop
1026
1026
1027
1027
1028 @eh.revsetpredicate(b'pulled()')
1028 @eh.revsetpredicate(b'pulled()')
1029 def pulledrevsetsymbol(repo, subset, x):
1029 def pulledrevsetsymbol(repo, subset, x):
1030 """Changesets that just has been pulled.
1030 """Changesets that just has been pulled.
1031
1031
1032 Only available with largefiles from pull --lfrev expressions.
1032 Only available with largefiles from pull --lfrev expressions.
1033
1033
1034 .. container:: verbose
1034 .. container:: verbose
1035
1035
1036 Some examples:
1036 Some examples:
1037
1037
1038 - pull largefiles for all new changesets::
1038 - pull largefiles for all new changesets::
1039
1039
1040 hg pull -lfrev "pulled()"
1040 hg pull -lfrev "pulled()"
1041
1041
1042 - pull largefiles for all new branch heads::
1042 - pull largefiles for all new branch heads::
1043
1043
1044 hg pull -lfrev "head(pulled()) and not closed()"
1044 hg pull -lfrev "head(pulled()) and not closed()"
1045
1045
1046 """
1046 """
1047
1047
1048 try:
1048 try:
1049 firstpulled = repo.firstpulled
1049 firstpulled = repo.firstpulled
1050 except AttributeError:
1050 except AttributeError:
1051 raise error.Abort(_(b"pulled() only available in --lfrev"))
1051 raise error.Abort(_(b"pulled() only available in --lfrev"))
1052 return smartset.baseset([r for r in subset if r >= firstpulled])
1052 return smartset.baseset([r for r in subset if r >= firstpulled])
1053
1053
1054
1054
1055 @eh.wrapcommand(
1055 @eh.wrapcommand(
1056 b'clone',
1056 b'clone',
1057 opts=[
1057 opts=[
1058 (
1058 (
1059 b'',
1059 b'',
1060 b'all-largefiles',
1060 b'all-largefiles',
1061 None,
1061 None,
1062 _(b'download all versions of all largefiles'),
1062 _(b'download all versions of all largefiles'),
1063 )
1063 )
1064 ],
1064 ],
1065 )
1065 )
1066 def overrideclone(orig, ui, source, dest=None, **opts):
1066 def overrideclone(orig, ui, source, dest=None, **opts):
1067 d = dest
1067 d = dest
1068 if d is None:
1068 if d is None:
1069 d = hg.defaultdest(source)
1069 d = hg.defaultdest(source)
1070 if opts.get('all_largefiles') and not hg.islocal(d):
1070 if opts.get('all_largefiles') and not hg.islocal(d):
1071 raise error.Abort(
1071 raise error.Abort(
1072 _(b'--all-largefiles is incompatible with non-local destination %s')
1072 _(b'--all-largefiles is incompatible with non-local destination %s')
1073 % d
1073 % d
1074 )
1074 )
1075
1075
1076 return orig(ui, source, dest, **opts)
1076 return orig(ui, source, dest, **opts)
1077
1077
1078
1078
1079 @eh.wrapfunction(hg, b'clone')
1079 @eh.wrapfunction(hg, b'clone')
1080 def hgclone(orig, ui, opts, *args, **kwargs):
1080 def hgclone(orig, ui, opts, *args, **kwargs):
1081 result = orig(ui, opts, *args, **kwargs)
1081 result = orig(ui, opts, *args, **kwargs)
1082
1082
1083 if result is not None:
1083 if result is not None:
1084 sourcerepo, destrepo = result
1084 sourcerepo, destrepo = result
1085 repo = destrepo.local()
1085 repo = destrepo.local()
1086
1086
1087 # When cloning to a remote repo (like through SSH), no repo is available
1087 # When cloning to a remote repo (like through SSH), no repo is available
1088 # from the peer. Therefore the largefiles can't be downloaded and the
1088 # from the peer. Therefore the largefiles can't be downloaded and the
1089 # hgrc can't be updated.
1089 # hgrc can't be updated.
1090 if not repo:
1090 if not repo:
1091 return result
1091 return result
1092
1092
1093 # Caching is implicitly limited to 'rev' option, since the dest repo was
1093 # Caching is implicitly limited to 'rev' option, since the dest repo was
1094 # truncated at that point. The user may expect a download count with
1094 # truncated at that point. The user may expect a download count with
1095 # this option, so attempt whether or not this is a largefile repo.
1095 # this option, so attempt whether or not this is a largefile repo.
1096 if opts.get(b'all_largefiles'):
1096 if opts.get(b'all_largefiles'):
1097 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1097 success, missing = lfcommands.downloadlfiles(ui, repo)
1098
1098
1099 if missing != 0:
1099 if missing != 0:
1100 return None
1100 return None
1101
1101
1102 return result
1102 return result
1103
1103
1104
1104
1105 @eh.wrapcommand(b'rebase', extension=b'rebase')
1105 @eh.wrapcommand(b'rebase', extension=b'rebase')
1106 def overriderebase(orig, ui, repo, **opts):
1106 def overriderebase(orig, ui, repo, **opts):
1107 if not util.safehasattr(repo, b'_largefilesenabled'):
1107 if not util.safehasattr(repo, b'_largefilesenabled'):
1108 return orig(ui, repo, **opts)
1108 return orig(ui, repo, **opts)
1109
1109
1110 resuming = opts.get('continue')
1110 resuming = opts.get('continue')
1111 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1111 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1112 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1112 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1113 try:
1113 try:
1114 return orig(ui, repo, **opts)
1114 return orig(ui, repo, **opts)
1115 finally:
1115 finally:
1116 repo._lfstatuswriters.pop()
1116 repo._lfstatuswriters.pop()
1117 repo._lfcommithooks.pop()
1117 repo._lfcommithooks.pop()
1118
1118
1119
1119
1120 @eh.wrapcommand(b'archive')
1120 @eh.wrapcommand(b'archive')
1121 def overridearchivecmd(orig, ui, repo, dest, **opts):
1121 def overridearchivecmd(orig, ui, repo, dest, **opts):
1122 with lfstatus(repo.unfiltered()):
1122 with lfstatus(repo.unfiltered()):
1123 return orig(ui, repo.unfiltered(), dest, **opts)
1123 return orig(ui, repo.unfiltered(), dest, **opts)
1124
1124
1125
1125
1126 @eh.wrapfunction(webcommands, b'archive')
1126 @eh.wrapfunction(webcommands, b'archive')
1127 def hgwebarchive(orig, web):
1127 def hgwebarchive(orig, web):
1128 with lfstatus(web.repo):
1128 with lfstatus(web.repo):
1129 return orig(web)
1129 return orig(web)
1130
1130
1131
1131
1132 @eh.wrapfunction(archival, b'archive')
1132 @eh.wrapfunction(archival, b'archive')
1133 def overridearchive(
1133 def overridearchive(
1134 orig,
1134 orig,
1135 repo,
1135 repo,
1136 dest,
1136 dest,
1137 node,
1137 node,
1138 kind,
1138 kind,
1139 decode=True,
1139 decode=True,
1140 match=None,
1140 match=None,
1141 prefix=b'',
1141 prefix=b'',
1142 mtime=None,
1142 mtime=None,
1143 subrepos=None,
1143 subrepos=None,
1144 ):
1144 ):
1145 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1145 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1146 # unfiltered repo's attr, so check that as well.
1146 # unfiltered repo's attr, so check that as well.
1147 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1147 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1148 return orig(
1148 return orig(
1149 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1149 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1150 )
1150 )
1151
1151
1152 # No need to lock because we are only reading history and
1152 # No need to lock because we are only reading history and
1153 # largefile caches, neither of which are modified.
1153 # largefile caches, neither of which are modified.
1154 if node is not None:
1154 if node is not None:
1155 lfcommands.cachelfiles(repo.ui, repo, node)
1155 lfcommands.cachelfiles(repo.ui, repo, node)
1156
1156
1157 if kind not in archival.archivers:
1157 if kind not in archival.archivers:
1158 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1158 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1159
1159
1160 ctx = repo[node]
1160 ctx = repo[node]
1161
1161
1162 if kind == b'files':
1162 if kind == b'files':
1163 if prefix:
1163 if prefix:
1164 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1164 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1165 else:
1165 else:
1166 prefix = archival.tidyprefix(dest, kind, prefix)
1166 prefix = archival.tidyprefix(dest, kind, prefix)
1167
1167
1168 def write(name, mode, islink, getdata):
1168 def write(name, mode, islink, getdata):
1169 if match and not match(name):
1169 if match and not match(name):
1170 return
1170 return
1171 data = getdata()
1171 data = getdata()
1172 if decode:
1172 if decode:
1173 data = repo.wwritedata(name, data)
1173 data = repo.wwritedata(name, data)
1174 archiver.addfile(prefix + name, mode, islink, data)
1174 archiver.addfile(prefix + name, mode, islink, data)
1175
1175
1176 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1176 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1177
1177
1178 if repo.ui.configbool(b"ui", b"archivemeta"):
1178 if repo.ui.configbool(b"ui", b"archivemeta"):
1179 write(
1179 write(
1180 b'.hg_archival.txt',
1180 b'.hg_archival.txt',
1181 0o644,
1181 0o644,
1182 False,
1182 False,
1183 lambda: archival.buildmetadata(ctx),
1183 lambda: archival.buildmetadata(ctx),
1184 )
1184 )
1185
1185
1186 for f in ctx:
1186 for f in ctx:
1187 ff = ctx.flags(f)
1187 ff = ctx.flags(f)
1188 getdata = ctx[f].data
1188 getdata = ctx[f].data
1189 lfile = lfutil.splitstandin(f)
1189 lfile = lfutil.splitstandin(f)
1190 if lfile is not None:
1190 if lfile is not None:
1191 if node is not None:
1191 if node is not None:
1192 path = lfutil.findfile(repo, getdata().strip())
1192 path = lfutil.findfile(repo, getdata().strip())
1193
1193
1194 if path is None:
1194 if path is None:
1195 raise error.Abort(
1195 raise error.Abort(
1196 _(
1196 _(
1197 b'largefile %s not found in repo store or system cache'
1197 b'largefile %s not found in repo store or system cache'
1198 )
1198 )
1199 % lfile
1199 % lfile
1200 )
1200 )
1201 else:
1201 else:
1202 path = lfile
1202 path = lfile
1203
1203
1204 f = lfile
1204 f = lfile
1205
1205
1206 getdata = lambda: util.readfile(path)
1206 getdata = lambda: util.readfile(path)
1207 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1207 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1208
1208
1209 if subrepos:
1209 if subrepos:
1210 for subpath in sorted(ctx.substate):
1210 for subpath in sorted(ctx.substate):
1211 sub = ctx.workingsub(subpath)
1211 sub = ctx.workingsub(subpath)
1212 submatch = matchmod.subdirmatcher(subpath, match)
1212 submatch = matchmod.subdirmatcher(subpath, match)
1213 subprefix = prefix + subpath + b'/'
1213 subprefix = prefix + subpath + b'/'
1214
1214
1215 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1215 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1216 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1216 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1217 # allow only hgsubrepos to set this, instead of the current scheme
1217 # allow only hgsubrepos to set this, instead of the current scheme
1218 # where the parent sets this for the child.
1218 # where the parent sets this for the child.
1219 with (
1219 with (
1220 util.safehasattr(sub, '_repo')
1220 util.safehasattr(sub, '_repo')
1221 and lfstatus(sub._repo)
1221 and lfstatus(sub._repo)
1222 or util.nullcontextmanager()
1222 or util.nullcontextmanager()
1223 ):
1223 ):
1224 sub.archive(archiver, subprefix, submatch)
1224 sub.archive(archiver, subprefix, submatch)
1225
1225
1226 archiver.done()
1226 archiver.done()
1227
1227
1228
1228
1229 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1229 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1230 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1230 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1231 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1231 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1232 if not lfenabled or not repo._repo.lfstatus:
1232 if not lfenabled or not repo._repo.lfstatus:
1233 return orig(repo, archiver, prefix, match, decode)
1233 return orig(repo, archiver, prefix, match, decode)
1234
1234
1235 repo._get(repo._state + (b'hg',))
1235 repo._get(repo._state + (b'hg',))
1236 rev = repo._state[1]
1236 rev = repo._state[1]
1237 ctx = repo._repo[rev]
1237 ctx = repo._repo[rev]
1238
1238
1239 if ctx.node() is not None:
1239 if ctx.node() is not None:
1240 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1240 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1241
1241
1242 def write(name, mode, islink, getdata):
1242 def write(name, mode, islink, getdata):
1243 # At this point, the standin has been replaced with the largefile name,
1243 # At this point, the standin has been replaced with the largefile name,
1244 # so the normal matcher works here without the lfutil variants.
1244 # so the normal matcher works here without the lfutil variants.
1245 if match and not match(f):
1245 if match and not match(f):
1246 return
1246 return
1247 data = getdata()
1247 data = getdata()
1248 if decode:
1248 if decode:
1249 data = repo._repo.wwritedata(name, data)
1249 data = repo._repo.wwritedata(name, data)
1250
1250
1251 archiver.addfile(prefix + name, mode, islink, data)
1251 archiver.addfile(prefix + name, mode, islink, data)
1252
1252
1253 for f in ctx:
1253 for f in ctx:
1254 ff = ctx.flags(f)
1254 ff = ctx.flags(f)
1255 getdata = ctx[f].data
1255 getdata = ctx[f].data
1256 lfile = lfutil.splitstandin(f)
1256 lfile = lfutil.splitstandin(f)
1257 if lfile is not None:
1257 if lfile is not None:
1258 if ctx.node() is not None:
1258 if ctx.node() is not None:
1259 path = lfutil.findfile(repo._repo, getdata().strip())
1259 path = lfutil.findfile(repo._repo, getdata().strip())
1260
1260
1261 if path is None:
1261 if path is None:
1262 raise error.Abort(
1262 raise error.Abort(
1263 _(
1263 _(
1264 b'largefile %s not found in repo store or system cache'
1264 b'largefile %s not found in repo store or system cache'
1265 )
1265 )
1266 % lfile
1266 % lfile
1267 )
1267 )
1268 else:
1268 else:
1269 path = lfile
1269 path = lfile
1270
1270
1271 f = lfile
1271 f = lfile
1272
1272
1273 getdata = lambda: util.readfile(os.path.join(prefix, path))
1273 getdata = lambda: util.readfile(os.path.join(prefix, path))
1274
1274
1275 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1275 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1276
1276
1277 for subpath in sorted(ctx.substate):
1277 for subpath in sorted(ctx.substate):
1278 sub = ctx.workingsub(subpath)
1278 sub = ctx.workingsub(subpath)
1279 submatch = matchmod.subdirmatcher(subpath, match)
1279 submatch = matchmod.subdirmatcher(subpath, match)
1280 subprefix = prefix + subpath + b'/'
1280 subprefix = prefix + subpath + b'/'
1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1282 # infer and possibly set lfstatus at the top of this function. That
1282 # infer and possibly set lfstatus at the top of this function. That
1283 # would allow only hgsubrepos to set this, instead of the current scheme
1283 # would allow only hgsubrepos to set this, instead of the current scheme
1284 # where the parent sets this for the child.
1284 # where the parent sets this for the child.
1285 with (
1285 with (
1286 util.safehasattr(sub, '_repo')
1286 util.safehasattr(sub, '_repo')
1287 and lfstatus(sub._repo)
1287 and lfstatus(sub._repo)
1288 or util.nullcontextmanager()
1288 or util.nullcontextmanager()
1289 ):
1289 ):
1290 sub.archive(archiver, subprefix, submatch, decode)
1290 sub.archive(archiver, subprefix, submatch, decode)
1291
1291
1292
1292
1293 # If a largefile is modified, the change is not reflected in its
1293 # If a largefile is modified, the change is not reflected in its
1294 # standin until a commit. cmdutil.bailifchanged() raises an exception
1294 # standin until a commit. cmdutil.bailifchanged() raises an exception
1295 # if the repo has uncommitted changes. Wrap it to also check if
1295 # if the repo has uncommitted changes. Wrap it to also check if
1296 # largefiles were changed. This is used by bisect, backout and fetch.
1296 # largefiles were changed. This is used by bisect, backout and fetch.
1297 @eh.wrapfunction(cmdutil, b'bailifchanged')
1297 @eh.wrapfunction(cmdutil, b'bailifchanged')
1298 def overridebailifchanged(orig, repo, *args, **kwargs):
1298 def overridebailifchanged(orig, repo, *args, **kwargs):
1299 orig(repo, *args, **kwargs)
1299 orig(repo, *args, **kwargs)
1300 with lfstatus(repo):
1300 with lfstatus(repo):
1301 s = repo.status()
1301 s = repo.status()
1302 if s.modified or s.added or s.removed or s.deleted:
1302 if s.modified or s.added or s.removed or s.deleted:
1303 raise error.Abort(_(b'uncommitted changes'))
1303 raise error.Abort(_(b'uncommitted changes'))
1304
1304
1305
1305
1306 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1306 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1307 def postcommitstatus(orig, repo, *args, **kwargs):
1307 def postcommitstatus(orig, repo, *args, **kwargs):
1308 with lfstatus(repo):
1308 with lfstatus(repo):
1309 return orig(repo, *args, **kwargs)
1309 return orig(repo, *args, **kwargs)
1310
1310
1311
1311
1312 @eh.wrapfunction(cmdutil, b'forget')
1312 @eh.wrapfunction(cmdutil, b'forget')
1313 def cmdutilforget(
1313 def cmdutilforget(
1314 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1314 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1315 ):
1315 ):
1316 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1316 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1317 bad, forgot = orig(
1317 bad, forgot = orig(
1318 ui,
1318 ui,
1319 repo,
1319 repo,
1320 normalmatcher,
1320 normalmatcher,
1321 prefix,
1321 prefix,
1322 uipathfn,
1322 uipathfn,
1323 explicitonly,
1323 explicitonly,
1324 dryrun,
1324 dryrun,
1325 interactive,
1325 interactive,
1326 )
1326 )
1327 m = composelargefilematcher(match, repo[None].manifest())
1327 m = composelargefilematcher(match, repo[None].manifest())
1328
1328
1329 with lfstatus(repo):
1329 with lfstatus(repo):
1330 s = repo.status(match=m, clean=True)
1330 s = repo.status(match=m, clean=True)
1331 manifest = repo[None].manifest()
1331 manifest = repo[None].manifest()
1332 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1332 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1333 forget = [f for f in forget if lfutil.standin(f) in manifest]
1333 forget = [f for f in forget if lfutil.standin(f) in manifest]
1334
1334
1335 for f in forget:
1335 for f in forget:
1336 fstandin = lfutil.standin(f)
1336 fstandin = lfutil.standin(f)
1337 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1337 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1338 ui.warn(
1338 ui.warn(
1339 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1339 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1340 )
1340 )
1341 bad.append(f)
1341 bad.append(f)
1342
1342
1343 for f in forget:
1343 for f in forget:
1344 if ui.verbose or not m.exact(f):
1344 if ui.verbose or not m.exact(f):
1345 ui.status(_(b'removing %s\n') % uipathfn(f))
1345 ui.status(_(b'removing %s\n') % uipathfn(f))
1346
1346
1347 # Need to lock because standin files are deleted then removed from the
1347 # Need to lock because standin files are deleted then removed from the
1348 # repository and we could race in-between.
1348 # repository and we could race in-between.
1349 with repo.wlock():
1349 with repo.wlock():
1350 lfdirstate = lfutil.openlfdirstate(ui, repo)
1350 lfdirstate = lfutil.openlfdirstate(ui, repo)
1351 for f in forget:
1351 for f in forget:
1352 if lfdirstate[f] == b'a':
1352 if lfdirstate[f] == b'a':
1353 lfdirstate.drop(f)
1353 lfdirstate.drop(f)
1354 else:
1354 else:
1355 lfdirstate.remove(f)
1355 lfdirstate.remove(f)
1356 lfdirstate.write()
1356 lfdirstate.write()
1357 standins = [lfutil.standin(f) for f in forget]
1357 standins = [lfutil.standin(f) for f in forget]
1358 for f in standins:
1358 for f in standins:
1359 repo.wvfs.unlinkpath(f, ignoremissing=True)
1359 repo.wvfs.unlinkpath(f, ignoremissing=True)
1360 rejected = repo[None].forget(standins)
1360 rejected = repo[None].forget(standins)
1361
1361
1362 bad.extend(f for f in rejected if f in m.files())
1362 bad.extend(f for f in rejected if f in m.files())
1363 forgot.extend(f for f in forget if f not in rejected)
1363 forgot.extend(f for f in forget if f not in rejected)
1364 return bad, forgot
1364 return bad, forgot
1365
1365
1366
1366
1367 def _getoutgoings(repo, other, missing, addfunc):
1367 def _getoutgoings(repo, other, missing, addfunc):
1368 """get pairs of filename and largefile hash in outgoing revisions
1368 """get pairs of filename and largefile hash in outgoing revisions
1369 in 'missing'.
1369 in 'missing'.
1370
1370
1371 largefiles already existing on 'other' repository are ignored.
1371 largefiles already existing on 'other' repository are ignored.
1372
1372
1373 'addfunc' is invoked with each unique pairs of filename and
1373 'addfunc' is invoked with each unique pairs of filename and
1374 largefile hash value.
1374 largefile hash value.
1375 """
1375 """
1376 knowns = set()
1376 knowns = set()
1377 lfhashes = set()
1377 lfhashes = set()
1378
1378
1379 def dedup(fn, lfhash):
1379 def dedup(fn, lfhash):
1380 k = (fn, lfhash)
1380 k = (fn, lfhash)
1381 if k not in knowns:
1381 if k not in knowns:
1382 knowns.add(k)
1382 knowns.add(k)
1383 lfhashes.add(lfhash)
1383 lfhashes.add(lfhash)
1384
1384
1385 lfutil.getlfilestoupload(repo, missing, dedup)
1385 lfutil.getlfilestoupload(repo, missing, dedup)
1386 if lfhashes:
1386 if lfhashes:
1387 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1387 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1388 for fn, lfhash in knowns:
1388 for fn, lfhash in knowns:
1389 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1389 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1390 addfunc(fn, lfhash)
1390 addfunc(fn, lfhash)
1391
1391
1392
1392
1393 def outgoinghook(ui, repo, other, opts, missing):
1393 def outgoinghook(ui, repo, other, opts, missing):
1394 if opts.pop(b'large', None):
1394 if opts.pop(b'large', None):
1395 lfhashes = set()
1395 lfhashes = set()
1396 if ui.debugflag:
1396 if ui.debugflag:
1397 toupload = {}
1397 toupload = {}
1398
1398
1399 def addfunc(fn, lfhash):
1399 def addfunc(fn, lfhash):
1400 if fn not in toupload:
1400 if fn not in toupload:
1401 toupload[fn] = []
1401 toupload[fn] = []
1402 toupload[fn].append(lfhash)
1402 toupload[fn].append(lfhash)
1403 lfhashes.add(lfhash)
1403 lfhashes.add(lfhash)
1404
1404
1405 def showhashes(fn):
1405 def showhashes(fn):
1406 for lfhash in sorted(toupload[fn]):
1406 for lfhash in sorted(toupload[fn]):
1407 ui.debug(b' %s\n' % lfhash)
1407 ui.debug(b' %s\n' % lfhash)
1408
1408
1409 else:
1409 else:
1410 toupload = set()
1410 toupload = set()
1411
1411
1412 def addfunc(fn, lfhash):
1412 def addfunc(fn, lfhash):
1413 toupload.add(fn)
1413 toupload.add(fn)
1414 lfhashes.add(lfhash)
1414 lfhashes.add(lfhash)
1415
1415
1416 def showhashes(fn):
1416 def showhashes(fn):
1417 pass
1417 pass
1418
1418
1419 _getoutgoings(repo, other, missing, addfunc)
1419 _getoutgoings(repo, other, missing, addfunc)
1420
1420
1421 if not toupload:
1421 if not toupload:
1422 ui.status(_(b'largefiles: no files to upload\n'))
1422 ui.status(_(b'largefiles: no files to upload\n'))
1423 else:
1423 else:
1424 ui.status(
1424 ui.status(
1425 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1425 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1426 )
1426 )
1427 for file in sorted(toupload):
1427 for file in sorted(toupload):
1428 ui.status(lfutil.splitstandin(file) + b'\n')
1428 ui.status(lfutil.splitstandin(file) + b'\n')
1429 showhashes(file)
1429 showhashes(file)
1430 ui.status(b'\n')
1430 ui.status(b'\n')
1431
1431
1432
1432
1433 @eh.wrapcommand(
1433 @eh.wrapcommand(
1434 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1434 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1435 )
1435 )
1436 def _outgoingcmd(orig, *args, **kwargs):
1436 def _outgoingcmd(orig, *args, **kwargs):
1437 # Nothing to do here other than add the extra help option- the hook above
1437 # Nothing to do here other than add the extra help option- the hook above
1438 # processes it.
1438 # processes it.
1439 return orig(*args, **kwargs)
1439 return orig(*args, **kwargs)
1440
1440
1441
1441
1442 def summaryremotehook(ui, repo, opts, changes):
1442 def summaryremotehook(ui, repo, opts, changes):
1443 largeopt = opts.get(b'large', False)
1443 largeopt = opts.get(b'large', False)
1444 if changes is None:
1444 if changes is None:
1445 if largeopt:
1445 if largeopt:
1446 return (False, True) # only outgoing check is needed
1446 return (False, True) # only outgoing check is needed
1447 else:
1447 else:
1448 return (False, False)
1448 return (False, False)
1449 elif largeopt:
1449 elif largeopt:
1450 url, branch, peer, outgoing = changes[1]
1450 url, branch, peer, outgoing = changes[1]
1451 if peer is None:
1451 if peer is None:
1452 # i18n: column positioning for "hg summary"
1452 # i18n: column positioning for "hg summary"
1453 ui.status(_(b'largefiles: (no remote repo)\n'))
1453 ui.status(_(b'largefiles: (no remote repo)\n'))
1454 return
1454 return
1455
1455
1456 toupload = set()
1456 toupload = set()
1457 lfhashes = set()
1457 lfhashes = set()
1458
1458
1459 def addfunc(fn, lfhash):
1459 def addfunc(fn, lfhash):
1460 toupload.add(fn)
1460 toupload.add(fn)
1461 lfhashes.add(lfhash)
1461 lfhashes.add(lfhash)
1462
1462
1463 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1463 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1464
1464
1465 if not toupload:
1465 if not toupload:
1466 # i18n: column positioning for "hg summary"
1466 # i18n: column positioning for "hg summary"
1467 ui.status(_(b'largefiles: (no files to upload)\n'))
1467 ui.status(_(b'largefiles: (no files to upload)\n'))
1468 else:
1468 else:
1469 # i18n: column positioning for "hg summary"
1469 # i18n: column positioning for "hg summary"
1470 ui.status(
1470 ui.status(
1471 _(b'largefiles: %d entities for %d files to upload\n')
1471 _(b'largefiles: %d entities for %d files to upload\n')
1472 % (len(lfhashes), len(toupload))
1472 % (len(lfhashes), len(toupload))
1473 )
1473 )
1474
1474
1475
1475
1476 @eh.wrapcommand(
1476 @eh.wrapcommand(
1477 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1477 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1478 )
1478 )
1479 def overridesummary(orig, ui, repo, *pats, **opts):
1479 def overridesummary(orig, ui, repo, *pats, **opts):
1480 with lfstatus(repo):
1480 with lfstatus(repo):
1481 orig(ui, repo, *pats, **opts)
1481 orig(ui, repo, *pats, **opts)
1482
1482
1483
1483
1484 @eh.wrapfunction(scmutil, b'addremove')
1484 @eh.wrapfunction(scmutil, b'addremove')
1485 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1485 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1486 if opts is None:
1486 if opts is None:
1487 opts = {}
1487 opts = {}
1488 if not lfutil.islfilesrepo(repo):
1488 if not lfutil.islfilesrepo(repo):
1489 return orig(repo, matcher, prefix, uipathfn, opts)
1489 return orig(repo, matcher, prefix, uipathfn, opts)
1490 # Get the list of missing largefiles so we can remove them
1490 # Get the list of missing largefiles so we can remove them
1491 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1491 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1492 unsure, s = lfdirstate.status(
1492 unsure, s = lfdirstate.status(
1493 matchmod.always(),
1493 matchmod.always(),
1494 subrepos=[],
1494 subrepos=[],
1495 ignored=False,
1495 ignored=False,
1496 clean=False,
1496 clean=False,
1497 unknown=False,
1497 unknown=False,
1498 )
1498 )
1499
1499
1500 # Call into the normal remove code, but the removing of the standin, we want
1500 # Call into the normal remove code, but the removing of the standin, we want
1501 # to have handled by original addremove. Monkey patching here makes sure
1501 # to have handled by original addremove. Monkey patching here makes sure
1502 # we don't remove the standin in the largefiles code, preventing a very
1502 # we don't remove the standin in the largefiles code, preventing a very
1503 # confused state later.
1503 # confused state later.
1504 if s.deleted:
1504 if s.deleted:
1505 m = copy.copy(matcher)
1505 m = copy.copy(matcher)
1506
1506
1507 # The m._files and m._map attributes are not changed to the deleted list
1507 # The m._files and m._map attributes are not changed to the deleted list
1508 # because that affects the m.exact() test, which in turn governs whether
1508 # because that affects the m.exact() test, which in turn governs whether
1509 # or not the file name is printed, and how. Simply limit the original
1509 # or not the file name is printed, and how. Simply limit the original
1510 # matches to those in the deleted status list.
1510 # matches to those in the deleted status list.
1511 matchfn = m.matchfn
1511 matchfn = m.matchfn
1512 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1512 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1513
1513
1514 removelargefiles(
1514 removelargefiles(
1515 repo.ui,
1515 repo.ui,
1516 repo,
1516 repo,
1517 True,
1517 True,
1518 m,
1518 m,
1519 uipathfn,
1519 uipathfn,
1520 opts.get(b'dry_run'),
1520 opts.get(b'dry_run'),
1521 **pycompat.strkwargs(opts)
1521 **pycompat.strkwargs(opts)
1522 )
1522 )
1523 # Call into the normal add code, and any files that *should* be added as
1523 # Call into the normal add code, and any files that *should* be added as
1524 # largefiles will be
1524 # largefiles will be
1525 added, bad = addlargefiles(
1525 added, bad = addlargefiles(
1526 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1526 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1527 )
1527 )
1528 # Now that we've handled largefiles, hand off to the original addremove
1528 # Now that we've handled largefiles, hand off to the original addremove
1529 # function to take care of the rest. Make sure it doesn't do anything with
1529 # function to take care of the rest. Make sure it doesn't do anything with
1530 # largefiles by passing a matcher that will ignore them.
1530 # largefiles by passing a matcher that will ignore them.
1531 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1531 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1532 return orig(repo, matcher, prefix, uipathfn, opts)
1532 return orig(repo, matcher, prefix, uipathfn, opts)
1533
1533
1534
1534
1535 # Calling purge with --all will cause the largefiles to be deleted.
1535 # Calling purge with --all will cause the largefiles to be deleted.
1536 # Override repo.status to prevent this from happening.
1536 # Override repo.status to prevent this from happening.
1537 @eh.wrapcommand(b'purge', extension=b'purge')
1537 @eh.wrapcommand(b'purge', extension=b'purge')
1538 def overridepurge(orig, ui, repo, *dirs, **opts):
1538 def overridepurge(orig, ui, repo, *dirs, **opts):
1539 # XXX Monkey patching a repoview will not work. The assigned attribute will
1539 # XXX Monkey patching a repoview will not work. The assigned attribute will
1540 # be set on the unfiltered repo, but we will only lookup attributes in the
1540 # be set on the unfiltered repo, but we will only lookup attributes in the
1541 # unfiltered repo if the lookup in the repoview object itself fails. As the
1541 # unfiltered repo if the lookup in the repoview object itself fails. As the
1542 # monkey patched method exists on the repoview class the lookup will not
1542 # monkey patched method exists on the repoview class the lookup will not
1543 # fail. As a result, the original version will shadow the monkey patched
1543 # fail. As a result, the original version will shadow the monkey patched
1544 # one, defeating the monkey patch.
1544 # one, defeating the monkey patch.
1545 #
1545 #
1546 # As a work around we use an unfiltered repo here. We should do something
1546 # As a work around we use an unfiltered repo here. We should do something
1547 # cleaner instead.
1547 # cleaner instead.
1548 repo = repo.unfiltered()
1548 repo = repo.unfiltered()
1549 oldstatus = repo.status
1549 oldstatus = repo.status
1550
1550
1551 def overridestatus(
1551 def overridestatus(
1552 node1=b'.',
1552 node1=b'.',
1553 node2=None,
1553 node2=None,
1554 match=None,
1554 match=None,
1555 ignored=False,
1555 ignored=False,
1556 clean=False,
1556 clean=False,
1557 unknown=False,
1557 unknown=False,
1558 listsubrepos=False,
1558 listsubrepos=False,
1559 ):
1559 ):
1560 r = oldstatus(
1560 r = oldstatus(
1561 node1, node2, match, ignored, clean, unknown, listsubrepos
1561 node1, node2, match, ignored, clean, unknown, listsubrepos
1562 )
1562 )
1563 lfdirstate = lfutil.openlfdirstate(ui, repo)
1563 lfdirstate = lfutil.openlfdirstate(ui, repo)
1564 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1564 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1565 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1565 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1566 return scmutil.status(
1566 return scmutil.status(
1567 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1567 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1568 )
1568 )
1569
1569
1570 repo.status = overridestatus
1570 repo.status = overridestatus
1571 orig(ui, repo, *dirs, **opts)
1571 orig(ui, repo, *dirs, **opts)
1572 repo.status = oldstatus
1572 repo.status = oldstatus
1573
1573
1574
1574
1575 @eh.wrapcommand(b'rollback')
1575 @eh.wrapcommand(b'rollback')
1576 def overriderollback(orig, ui, repo, **opts):
1576 def overriderollback(orig, ui, repo, **opts):
1577 with repo.wlock():
1577 with repo.wlock():
1578 before = repo.dirstate.parents()
1578 before = repo.dirstate.parents()
1579 orphans = {
1579 orphans = {
1580 f
1580 f
1581 for f in repo.dirstate
1581 for f in repo.dirstate
1582 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1582 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1583 }
1583 }
1584 result = orig(ui, repo, **opts)
1584 result = orig(ui, repo, **opts)
1585 after = repo.dirstate.parents()
1585 after = repo.dirstate.parents()
1586 if before == after:
1586 if before == after:
1587 return result # no need to restore standins
1587 return result # no need to restore standins
1588
1588
1589 pctx = repo[b'.']
1589 pctx = repo[b'.']
1590 for f in repo.dirstate:
1590 for f in repo.dirstate:
1591 if lfutil.isstandin(f):
1591 if lfutil.isstandin(f):
1592 orphans.discard(f)
1592 orphans.discard(f)
1593 if repo.dirstate[f] == b'r':
1593 if repo.dirstate[f] == b'r':
1594 repo.wvfs.unlinkpath(f, ignoremissing=True)
1594 repo.wvfs.unlinkpath(f, ignoremissing=True)
1595 elif f in pctx:
1595 elif f in pctx:
1596 fctx = pctx[f]
1596 fctx = pctx[f]
1597 repo.wwrite(f, fctx.data(), fctx.flags())
1597 repo.wwrite(f, fctx.data(), fctx.flags())
1598 else:
1598 else:
1599 # content of standin is not so important in 'a',
1599 # content of standin is not so important in 'a',
1600 # 'm' or 'n' (coming from the 2nd parent) cases
1600 # 'm' or 'n' (coming from the 2nd parent) cases
1601 lfutil.writestandin(repo, f, b'', False)
1601 lfutil.writestandin(repo, f, b'', False)
1602 for standin in orphans:
1602 for standin in orphans:
1603 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1603 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1604
1604
1605 lfdirstate = lfutil.openlfdirstate(ui, repo)
1605 lfdirstate = lfutil.openlfdirstate(ui, repo)
1606 orphans = set(lfdirstate)
1606 orphans = set(lfdirstate)
1607 lfiles = lfutil.listlfiles(repo)
1607 lfiles = lfutil.listlfiles(repo)
1608 for file in lfiles:
1608 for file in lfiles:
1609 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1609 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1610 orphans.discard(file)
1610 orphans.discard(file)
1611 for lfile in orphans:
1611 for lfile in orphans:
1612 lfdirstate.drop(lfile)
1612 lfdirstate.drop(lfile)
1613 lfdirstate.write()
1613 lfdirstate.write()
1614 return result
1614 return result
1615
1615
1616
1616
1617 @eh.wrapcommand(b'transplant', extension=b'transplant')
1617 @eh.wrapcommand(b'transplant', extension=b'transplant')
1618 def overridetransplant(orig, ui, repo, *revs, **opts):
1618 def overridetransplant(orig, ui, repo, *revs, **opts):
1619 resuming = opts.get('continue')
1619 resuming = opts.get('continue')
1620 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1620 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1621 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1621 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1622 try:
1622 try:
1623 result = orig(ui, repo, *revs, **opts)
1623 result = orig(ui, repo, *revs, **opts)
1624 finally:
1624 finally:
1625 repo._lfstatuswriters.pop()
1625 repo._lfstatuswriters.pop()
1626 repo._lfcommithooks.pop()
1626 repo._lfcommithooks.pop()
1627 return result
1627 return result
1628
1628
1629
1629
1630 @eh.wrapcommand(b'cat')
1630 @eh.wrapcommand(b'cat')
1631 def overridecat(orig, ui, repo, file1, *pats, **opts):
1631 def overridecat(orig, ui, repo, file1, *pats, **opts):
1632 opts = pycompat.byteskwargs(opts)
1632 opts = pycompat.byteskwargs(opts)
1633 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1633 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1634 err = 1
1634 err = 1
1635 notbad = set()
1635 notbad = set()
1636 m = scmutil.match(ctx, (file1,) + pats, opts)
1636 m = scmutil.match(ctx, (file1,) + pats, opts)
1637 origmatchfn = m.matchfn
1637 origmatchfn = m.matchfn
1638
1638
1639 def lfmatchfn(f):
1639 def lfmatchfn(f):
1640 if origmatchfn(f):
1640 if origmatchfn(f):
1641 return True
1641 return True
1642 lf = lfutil.splitstandin(f)
1642 lf = lfutil.splitstandin(f)
1643 if lf is None:
1643 if lf is None:
1644 return False
1644 return False
1645 notbad.add(lf)
1645 notbad.add(lf)
1646 return origmatchfn(lf)
1646 return origmatchfn(lf)
1647
1647
1648 m.matchfn = lfmatchfn
1648 m.matchfn = lfmatchfn
1649 origbadfn = m.bad
1649 origbadfn = m.bad
1650
1650
1651 def lfbadfn(f, msg):
1651 def lfbadfn(f, msg):
1652 if not f in notbad:
1652 if not f in notbad:
1653 origbadfn(f, msg)
1653 origbadfn(f, msg)
1654
1654
1655 m.bad = lfbadfn
1655 m.bad = lfbadfn
1656
1656
1657 origvisitdirfn = m.visitdir
1657 origvisitdirfn = m.visitdir
1658
1658
1659 def lfvisitdirfn(dir):
1659 def lfvisitdirfn(dir):
1660 if dir == lfutil.shortname:
1660 if dir == lfutil.shortname:
1661 return True
1661 return True
1662 ret = origvisitdirfn(dir)
1662 ret = origvisitdirfn(dir)
1663 if ret:
1663 if ret:
1664 return ret
1664 return ret
1665 lf = lfutil.splitstandin(dir)
1665 lf = lfutil.splitstandin(dir)
1666 if lf is None:
1666 if lf is None:
1667 return False
1667 return False
1668 return origvisitdirfn(lf)
1668 return origvisitdirfn(lf)
1669
1669
1670 m.visitdir = lfvisitdirfn
1670 m.visitdir = lfvisitdirfn
1671
1671
1672 for f in ctx.walk(m):
1672 for f in ctx.walk(m):
1673 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1673 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1674 lf = lfutil.splitstandin(f)
1674 lf = lfutil.splitstandin(f)
1675 if lf is None or origmatchfn(f):
1675 if lf is None or origmatchfn(f):
1676 # duplicating unreachable code from commands.cat
1676 # duplicating unreachable code from commands.cat
1677 data = ctx[f].data()
1677 data = ctx[f].data()
1678 if opts.get(b'decode'):
1678 if opts.get(b'decode'):
1679 data = repo.wwritedata(f, data)
1679 data = repo.wwritedata(f, data)
1680 fp.write(data)
1680 fp.write(data)
1681 else:
1681 else:
1682 hash = lfutil.readasstandin(ctx[f])
1682 hash = lfutil.readasstandin(ctx[f])
1683 if not lfutil.inusercache(repo.ui, hash):
1683 if not lfutil.inusercache(repo.ui, hash):
1684 store = storefactory.openstore(repo)
1684 store = storefactory.openstore(repo)
1685 success, missing = store.get([(lf, hash)])
1685 success, missing = store.get([(lf, hash)])
1686 if len(success) != 1:
1686 if len(success) != 1:
1687 raise error.Abort(
1687 raise error.Abort(
1688 _(
1688 _(
1689 b'largefile %s is not in cache and could not be '
1689 b'largefile %s is not in cache and could not be '
1690 b'downloaded'
1690 b'downloaded'
1691 )
1691 )
1692 % lf
1692 % lf
1693 )
1693 )
1694 path = lfutil.usercachepath(repo.ui, hash)
1694 path = lfutil.usercachepath(repo.ui, hash)
1695 with open(path, b"rb") as fpin:
1695 with open(path, b"rb") as fpin:
1696 for chunk in util.filechunkiter(fpin):
1696 for chunk in util.filechunkiter(fpin):
1697 fp.write(chunk)
1697 fp.write(chunk)
1698 err = 0
1698 err = 0
1699 return err
1699 return err
1700
1700
1701
1701
1702 @eh.wrapfunction(merge, b'update')
1702 @eh.wrapfunction(merge, b'update')
1703 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1703 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1704 matcher = kwargs.get('matcher', None)
1704 matcher = kwargs.get('matcher', None)
1705 # note if this is a partial update
1705 # note if this is a partial update
1706 partial = matcher and not matcher.always()
1706 partial = matcher and not matcher.always()
1707 with repo.wlock():
1707 with repo.wlock():
1708 # branch | | |
1708 # branch | | |
1709 # merge | force | partial | action
1709 # merge | force | partial | action
1710 # -------+-------+---------+--------------
1710 # -------+-------+---------+--------------
1711 # x | x | x | linear-merge
1711 # x | x | x | linear-merge
1712 # o | x | x | branch-merge
1712 # o | x | x | branch-merge
1713 # x | o | x | overwrite (as clean update)
1713 # x | o | x | overwrite (as clean update)
1714 # o | o | x | force-branch-merge (*1)
1714 # o | o | x | force-branch-merge (*1)
1715 # x | x | o | (*)
1715 # x | x | o | (*)
1716 # o | x | o | (*)
1716 # o | x | o | (*)
1717 # x | o | o | overwrite (as revert)
1717 # x | o | o | overwrite (as revert)
1718 # o | o | o | (*)
1718 # o | o | o | (*)
1719 #
1719 #
1720 # (*) don't care
1720 # (*) don't care
1721 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1721 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1722
1722
1723 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1723 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1724 unsure, s = lfdirstate.status(
1724 unsure, s = lfdirstate.status(
1725 matchmod.always(),
1725 matchmod.always(),
1726 subrepos=[],
1726 subrepos=[],
1727 ignored=False,
1727 ignored=False,
1728 clean=True,
1728 clean=True,
1729 unknown=False,
1729 unknown=False,
1730 )
1730 )
1731 oldclean = set(s.clean)
1731 oldclean = set(s.clean)
1732 pctx = repo[b'.']
1732 pctx = repo[b'.']
1733 dctx = repo[node]
1733 dctx = repo[node]
1734 for lfile in unsure + s.modified:
1734 for lfile in unsure + s.modified:
1735 lfileabs = repo.wvfs.join(lfile)
1735 lfileabs = repo.wvfs.join(lfile)
1736 if not repo.wvfs.exists(lfileabs):
1736 if not repo.wvfs.exists(lfileabs):
1737 continue
1737 continue
1738 lfhash = lfutil.hashfile(lfileabs)
1738 lfhash = lfutil.hashfile(lfileabs)
1739 standin = lfutil.standin(lfile)
1739 standin = lfutil.standin(lfile)
1740 lfutil.writestandin(
1740 lfutil.writestandin(
1741 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1741 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1742 )
1742 )
1743 if standin in pctx and lfhash == lfutil.readasstandin(
1743 if standin in pctx and lfhash == lfutil.readasstandin(
1744 pctx[standin]
1744 pctx[standin]
1745 ):
1745 ):
1746 oldclean.add(lfile)
1746 oldclean.add(lfile)
1747 for lfile in s.added:
1747 for lfile in s.added:
1748 fstandin = lfutil.standin(lfile)
1748 fstandin = lfutil.standin(lfile)
1749 if fstandin not in dctx:
1749 if fstandin not in dctx:
1750 # in this case, content of standin file is meaningless
1750 # in this case, content of standin file is meaningless
1751 # (in dctx, lfile is unknown, or normal file)
1751 # (in dctx, lfile is unknown, or normal file)
1752 continue
1752 continue
1753 lfutil.updatestandin(repo, lfile, fstandin)
1753 lfutil.updatestandin(repo, lfile, fstandin)
1754 # mark all clean largefiles as dirty, just in case the update gets
1754 # mark all clean largefiles as dirty, just in case the update gets
1755 # interrupted before largefiles and lfdirstate are synchronized
1755 # interrupted before largefiles and lfdirstate are synchronized
1756 for lfile in oldclean:
1756 for lfile in oldclean:
1757 lfdirstate.normallookup(lfile)
1757 lfdirstate.normallookup(lfile)
1758 lfdirstate.write()
1758 lfdirstate.write()
1759
1759
1760 oldstandins = lfutil.getstandinsstate(repo)
1760 oldstandins = lfutil.getstandinsstate(repo)
1761 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1761 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1762 # good candidate for in-memory merge (large files, custom dirstate,
1762 # good candidate for in-memory merge (large files, custom dirstate,
1763 # matcher usage).
1763 # matcher usage).
1764 kwargs['wc'] = repo[None]
1764 kwargs['wc'] = repo[None]
1765 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1765 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1766
1766
1767 newstandins = lfutil.getstandinsstate(repo)
1767 newstandins = lfutil.getstandinsstate(repo)
1768 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1768 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1769
1769
1770 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1770 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1771 # all the ones that didn't change as clean
1771 # all the ones that didn't change as clean
1772 for lfile in oldclean.difference(filelist):
1772 for lfile in oldclean.difference(filelist):
1773 lfdirstate.normal(lfile)
1773 lfdirstate.normal(lfile)
1774 lfdirstate.write()
1774 lfdirstate.write()
1775
1775
1776 if branchmerge or force or partial:
1776 if branchmerge or force or partial:
1777 filelist.extend(s.deleted + s.removed)
1777 filelist.extend(s.deleted + s.removed)
1778
1778
1779 lfcommands.updatelfiles(
1779 lfcommands.updatelfiles(
1780 repo.ui, repo, filelist=filelist, normallookup=partial
1780 repo.ui, repo, filelist=filelist, normallookup=partial
1781 )
1781 )
1782
1782
1783 return result
1783 return result
1784
1784
1785
1785
1786 @eh.wrapfunction(scmutil, b'marktouched')
1786 @eh.wrapfunction(scmutil, b'marktouched')
1787 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1787 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1788 result = orig(repo, files, *args, **kwargs)
1788 result = orig(repo, files, *args, **kwargs)
1789
1789
1790 filelist = []
1790 filelist = []
1791 for f in files:
1791 for f in files:
1792 lf = lfutil.splitstandin(f)
1792 lf = lfutil.splitstandin(f)
1793 if lf is not None:
1793 if lf is not None:
1794 filelist.append(lf)
1794 filelist.append(lf)
1795 if filelist:
1795 if filelist:
1796 lfcommands.updatelfiles(
1796 lfcommands.updatelfiles(
1797 repo.ui,
1797 repo.ui,
1798 repo,
1798 repo,
1799 filelist=filelist,
1799 filelist=filelist,
1800 printmessage=False,
1800 printmessage=False,
1801 normallookup=True,
1801 normallookup=True,
1802 )
1802 )
1803
1803
1804 return result
1804 return result
1805
1805
1806
1806
1807 @eh.wrapfunction(upgrade, b'preservedrequirements')
1807 @eh.wrapfunction(upgrade, b'preservedrequirements')
1808 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1808 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1809 def upgraderequirements(orig, repo):
1809 def upgraderequirements(orig, repo):
1810 reqs = orig(repo)
1810 reqs = orig(repo)
1811 if b'largefiles' in repo.requirements:
1811 if b'largefiles' in repo.requirements:
1812 reqs.add(b'largefiles')
1812 reqs.add(b'largefiles')
1813 return reqs
1813 return reqs
1814
1814
1815
1815
1816 _lfscheme = b'largefile://'
1816 _lfscheme = b'largefile://'
1817
1817
1818
1818
1819 @eh.wrapfunction(urlmod, b'open')
1819 @eh.wrapfunction(urlmod, b'open')
1820 def openlargefile(orig, ui, url_, data=None):
1820 def openlargefile(orig, ui, url_, data=None):
1821 if url_.startswith(_lfscheme):
1821 if url_.startswith(_lfscheme):
1822 if data:
1822 if data:
1823 msg = b"cannot use data on a 'largefile://' url"
1823 msg = b"cannot use data on a 'largefile://' url"
1824 raise error.ProgrammingError(msg)
1824 raise error.ProgrammingError(msg)
1825 lfid = url_[len(_lfscheme) :]
1825 lfid = url_[len(_lfscheme) :]
1826 return storefactory.getlfile(ui, lfid)
1826 return storefactory.getlfile(ui, lfid)
1827 else:
1827 else:
1828 return orig(ui, url_, data=data)
1828 return orig(ui, url_, data=data)
General Comments 0
You need to be logged in to leave comments. Login now