##// END OF EJS Templates
largefiles: replace use of walkchangerevs() with simple revset query...
Yuya Nishihara -
r46026:ac7b9ed0 default
parent child Browse files
Show More
@@ -1,668 +1,664 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 from mercurial import (
18 from mercurial import (
19 cmdutil,
19 cmdutil,
20 context,
20 context,
21 error,
21 error,
22 exthelper,
22 exthelper,
23 hg,
23 hg,
24 lock,
24 lock,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from mercurial.utils import hashutil
31 from mercurial.utils import hashutil
32
32
33 from ..convert import (
33 from ..convert import (
34 convcmd,
34 convcmd,
35 filemap,
35 filemap,
36 )
36 )
37
37
38 from . import lfutil, storefactory
38 from . import lfutil, storefactory
39
39
40 release = lock.release
40 release = lock.release
41
41
42 # -- Commands ----------------------------------------------------------
42 # -- Commands ----------------------------------------------------------
43
43
44 eh = exthelper.exthelper()
44 eh = exthelper.exthelper()
45
45
46
46
47 @eh.command(
47 @eh.command(
48 b'lfconvert',
48 b'lfconvert',
49 [
49 [
50 (
50 (
51 b's',
51 b's',
52 b'size',
52 b'size',
53 b'',
53 b'',
54 _(b'minimum size (MB) for files to be converted as largefiles'),
54 _(b'minimum size (MB) for files to be converted as largefiles'),
55 b'SIZE',
55 b'SIZE',
56 ),
56 ),
57 (
57 (
58 b'',
58 b'',
59 b'to-normal',
59 b'to-normal',
60 False,
60 False,
61 _(b'convert from a largefiles repo to a normal repo'),
61 _(b'convert from a largefiles repo to a normal repo'),
62 ),
62 ),
63 ],
63 ],
64 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
64 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
65 norepo=True,
65 norepo=True,
66 inferrepo=True,
66 inferrepo=True,
67 )
67 )
68 def lfconvert(ui, src, dest, *pats, **opts):
68 def lfconvert(ui, src, dest, *pats, **opts):
69 '''convert a normal repository to a largefiles repository
69 '''convert a normal repository to a largefiles repository
70
70
71 Convert repository SOURCE to a new repository DEST, identical to
71 Convert repository SOURCE to a new repository DEST, identical to
72 SOURCE except that certain files will be converted as largefiles:
72 SOURCE except that certain files will be converted as largefiles:
73 specifically, any file that matches any PATTERN *or* whose size is
73 specifically, any file that matches any PATTERN *or* whose size is
74 above the minimum size threshold is converted as a largefile. The
74 above the minimum size threshold is converted as a largefile. The
75 size used to determine whether or not to track a file as a
75 size used to determine whether or not to track a file as a
76 largefile is the size of the first version of the file. The
76 largefile is the size of the first version of the file. The
77 minimum size can be specified either with --size or in
77 minimum size can be specified either with --size or in
78 configuration as ``largefiles.size``.
78 configuration as ``largefiles.size``.
79
79
80 After running this command you will need to make sure that
80 After running this command you will need to make sure that
81 largefiles is enabled anywhere you intend to push the new
81 largefiles is enabled anywhere you intend to push the new
82 repository.
82 repository.
83
83
84 Use --to-normal to convert largefiles back to normal files; after
84 Use --to-normal to convert largefiles back to normal files; after
85 this, the DEST repository can be used without largefiles at all.'''
85 this, the DEST repository can be used without largefiles at all.'''
86
86
87 opts = pycompat.byteskwargs(opts)
87 opts = pycompat.byteskwargs(opts)
88 if opts[b'to_normal']:
88 if opts[b'to_normal']:
89 tolfile = False
89 tolfile = False
90 else:
90 else:
91 tolfile = True
91 tolfile = True
92 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
92 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
93
93
94 if not hg.islocal(src):
94 if not hg.islocal(src):
95 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
95 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
96 if not hg.islocal(dest):
96 if not hg.islocal(dest):
97 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
97 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
98
98
99 rsrc = hg.repository(ui, src)
99 rsrc = hg.repository(ui, src)
100 ui.status(_(b'initializing destination %s\n') % dest)
100 ui.status(_(b'initializing destination %s\n') % dest)
101 rdst = hg.repository(ui, dest, create=True)
101 rdst = hg.repository(ui, dest, create=True)
102
102
103 success = False
103 success = False
104 dstwlock = dstlock = None
104 dstwlock = dstlock = None
105 try:
105 try:
106 # Get a list of all changesets in the source. The easy way to do this
106 # Get a list of all changesets in the source. The easy way to do this
107 # is to simply walk the changelog, using changelog.nodesbetween().
107 # is to simply walk the changelog, using changelog.nodesbetween().
108 # Take a look at mercurial/revlog.py:639 for more details.
108 # Take a look at mercurial/revlog.py:639 for more details.
109 # Use a generator instead of a list to decrease memory usage
109 # Use a generator instead of a list to decrease memory usage
110 ctxs = (
110 ctxs = (
111 rsrc[ctx]
111 rsrc[ctx]
112 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
112 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
113 )
113 )
114 revmap = {node.nullid: node.nullid}
114 revmap = {node.nullid: node.nullid}
115 if tolfile:
115 if tolfile:
116 # Lock destination to prevent modification while it is converted to.
116 # Lock destination to prevent modification while it is converted to.
117 # Don't need to lock src because we are just reading from its
117 # Don't need to lock src because we are just reading from its
118 # history which can't change.
118 # history which can't change.
119 dstwlock = rdst.wlock()
119 dstwlock = rdst.wlock()
120 dstlock = rdst.lock()
120 dstlock = rdst.lock()
121
121
122 lfiles = set()
122 lfiles = set()
123 normalfiles = set()
123 normalfiles = set()
124 if not pats:
124 if not pats:
125 pats = ui.configlist(lfutil.longname, b'patterns')
125 pats = ui.configlist(lfutil.longname, b'patterns')
126 if pats:
126 if pats:
127 matcher = matchmod.match(rsrc.root, b'', list(pats))
127 matcher = matchmod.match(rsrc.root, b'', list(pats))
128 else:
128 else:
129 matcher = None
129 matcher = None
130
130
131 lfiletohash = {}
131 lfiletohash = {}
132 with ui.makeprogress(
132 with ui.makeprogress(
133 _(b'converting revisions'),
133 _(b'converting revisions'),
134 unit=_(b'revisions'),
134 unit=_(b'revisions'),
135 total=rsrc[b'tip'].rev(),
135 total=rsrc[b'tip'].rev(),
136 ) as progress:
136 ) as progress:
137 for ctx in ctxs:
137 for ctx in ctxs:
138 progress.update(ctx.rev())
138 progress.update(ctx.rev())
139 _lfconvert_addchangeset(
139 _lfconvert_addchangeset(
140 rsrc,
140 rsrc,
141 rdst,
141 rdst,
142 ctx,
142 ctx,
143 revmap,
143 revmap,
144 lfiles,
144 lfiles,
145 normalfiles,
145 normalfiles,
146 matcher,
146 matcher,
147 size,
147 size,
148 lfiletohash,
148 lfiletohash,
149 )
149 )
150
150
151 if rdst.wvfs.exists(lfutil.shortname):
151 if rdst.wvfs.exists(lfutil.shortname):
152 rdst.wvfs.rmtree(lfutil.shortname)
152 rdst.wvfs.rmtree(lfutil.shortname)
153
153
154 for f in lfiletohash.keys():
154 for f in lfiletohash.keys():
155 if rdst.wvfs.isfile(f):
155 if rdst.wvfs.isfile(f):
156 rdst.wvfs.unlink(f)
156 rdst.wvfs.unlink(f)
157 try:
157 try:
158 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
158 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
159 except OSError:
159 except OSError:
160 pass
160 pass
161
161
162 # If there were any files converted to largefiles, add largefiles
162 # If there were any files converted to largefiles, add largefiles
163 # to the destination repository's requirements.
163 # to the destination repository's requirements.
164 if lfiles:
164 if lfiles:
165 rdst.requirements.add(b'largefiles')
165 rdst.requirements.add(b'largefiles')
166 scmutil.writereporequirements(rdst)
166 scmutil.writereporequirements(rdst)
167 else:
167 else:
168
168
169 class lfsource(filemap.filemap_source):
169 class lfsource(filemap.filemap_source):
170 def __init__(self, ui, source):
170 def __init__(self, ui, source):
171 super(lfsource, self).__init__(ui, source, None)
171 super(lfsource, self).__init__(ui, source, None)
172 self.filemapper.rename[lfutil.shortname] = b'.'
172 self.filemapper.rename[lfutil.shortname] = b'.'
173
173
174 def getfile(self, name, rev):
174 def getfile(self, name, rev):
175 realname, realrev = rev
175 realname, realrev = rev
176 f = super(lfsource, self).getfile(name, rev)
176 f = super(lfsource, self).getfile(name, rev)
177
177
178 if (
178 if (
179 not realname.startswith(lfutil.shortnameslash)
179 not realname.startswith(lfutil.shortnameslash)
180 or f[0] is None
180 or f[0] is None
181 ):
181 ):
182 return f
182 return f
183
183
184 # Substitute in the largefile data for the hash
184 # Substitute in the largefile data for the hash
185 hash = f[0].strip()
185 hash = f[0].strip()
186 path = lfutil.findfile(rsrc, hash)
186 path = lfutil.findfile(rsrc, hash)
187
187
188 if path is None:
188 if path is None:
189 raise error.Abort(
189 raise error.Abort(
190 _(b"missing largefile for '%s' in %s")
190 _(b"missing largefile for '%s' in %s")
191 % (realname, realrev)
191 % (realname, realrev)
192 )
192 )
193 return util.readfile(path), f[1]
193 return util.readfile(path), f[1]
194
194
195 class converter(convcmd.converter):
195 class converter(convcmd.converter):
196 def __init__(self, ui, source, dest, revmapfile, opts):
196 def __init__(self, ui, source, dest, revmapfile, opts):
197 src = lfsource(ui, source)
197 src = lfsource(ui, source)
198
198
199 super(converter, self).__init__(
199 super(converter, self).__init__(
200 ui, src, dest, revmapfile, opts
200 ui, src, dest, revmapfile, opts
201 )
201 )
202
202
203 found, missing = downloadlfiles(ui, rsrc)
203 found, missing = downloadlfiles(ui, rsrc)
204 if missing != 0:
204 if missing != 0:
205 raise error.Abort(_(b"all largefiles must be present locally"))
205 raise error.Abort(_(b"all largefiles must be present locally"))
206
206
207 orig = convcmd.converter
207 orig = convcmd.converter
208 convcmd.converter = converter
208 convcmd.converter = converter
209
209
210 try:
210 try:
211 convcmd.convert(
211 convcmd.convert(
212 ui, src, dest, source_type=b'hg', dest_type=b'hg'
212 ui, src, dest, source_type=b'hg', dest_type=b'hg'
213 )
213 )
214 finally:
214 finally:
215 convcmd.converter = orig
215 convcmd.converter = orig
216 success = True
216 success = True
217 finally:
217 finally:
218 if tolfile:
218 if tolfile:
219 rdst.dirstate.clear()
219 rdst.dirstate.clear()
220 release(dstlock, dstwlock)
220 release(dstlock, dstwlock)
221 if not success:
221 if not success:
222 # we failed, remove the new directory
222 # we failed, remove the new directory
223 shutil.rmtree(rdst.root)
223 shutil.rmtree(rdst.root)
224
224
225
225
226 def _lfconvert_addchangeset(
226 def _lfconvert_addchangeset(
227 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
227 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
228 ):
228 ):
229 # Convert src parents to dst parents
229 # Convert src parents to dst parents
230 parents = _convertparents(ctx, revmap)
230 parents = _convertparents(ctx, revmap)
231
231
232 # Generate list of changed files
232 # Generate list of changed files
233 files = _getchangedfiles(ctx, parents)
233 files = _getchangedfiles(ctx, parents)
234
234
235 dstfiles = []
235 dstfiles = []
236 for f in files:
236 for f in files:
237 if f not in lfiles and f not in normalfiles:
237 if f not in lfiles and f not in normalfiles:
238 islfile = _islfile(f, ctx, matcher, size)
238 islfile = _islfile(f, ctx, matcher, size)
239 # If this file was renamed or copied then copy
239 # If this file was renamed or copied then copy
240 # the largefile-ness of its predecessor
240 # the largefile-ness of its predecessor
241 if f in ctx.manifest():
241 if f in ctx.manifest():
242 fctx = ctx.filectx(f)
242 fctx = ctx.filectx(f)
243 renamed = fctx.copysource()
243 renamed = fctx.copysource()
244 if renamed is None:
244 if renamed is None:
245 # the code below assumes renamed to be a boolean or a list
245 # the code below assumes renamed to be a boolean or a list
246 # and won't quite work with the value None
246 # and won't quite work with the value None
247 renamed = False
247 renamed = False
248 renamedlfile = renamed and renamed in lfiles
248 renamedlfile = renamed and renamed in lfiles
249 islfile |= renamedlfile
249 islfile |= renamedlfile
250 if b'l' in fctx.flags():
250 if b'l' in fctx.flags():
251 if renamedlfile:
251 if renamedlfile:
252 raise error.Abort(
252 raise error.Abort(
253 _(b'renamed/copied largefile %s becomes symlink')
253 _(b'renamed/copied largefile %s becomes symlink')
254 % f
254 % f
255 )
255 )
256 islfile = False
256 islfile = False
257 if islfile:
257 if islfile:
258 lfiles.add(f)
258 lfiles.add(f)
259 else:
259 else:
260 normalfiles.add(f)
260 normalfiles.add(f)
261
261
262 if f in lfiles:
262 if f in lfiles:
263 fstandin = lfutil.standin(f)
263 fstandin = lfutil.standin(f)
264 dstfiles.append(fstandin)
264 dstfiles.append(fstandin)
265 # largefile in manifest if it has not been removed/renamed
265 # largefile in manifest if it has not been removed/renamed
266 if f in ctx.manifest():
266 if f in ctx.manifest():
267 fctx = ctx.filectx(f)
267 fctx = ctx.filectx(f)
268 if b'l' in fctx.flags():
268 if b'l' in fctx.flags():
269 renamed = fctx.copysource()
269 renamed = fctx.copysource()
270 if renamed and renamed in lfiles:
270 if renamed and renamed in lfiles:
271 raise error.Abort(
271 raise error.Abort(
272 _(b'largefile %s becomes symlink') % f
272 _(b'largefile %s becomes symlink') % f
273 )
273 )
274
274
275 # largefile was modified, update standins
275 # largefile was modified, update standins
276 m = hashutil.sha1(b'')
276 m = hashutil.sha1(b'')
277 m.update(ctx[f].data())
277 m.update(ctx[f].data())
278 hash = node.hex(m.digest())
278 hash = node.hex(m.digest())
279 if f not in lfiletohash or lfiletohash[f] != hash:
279 if f not in lfiletohash or lfiletohash[f] != hash:
280 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
280 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
281 executable = b'x' in ctx[f].flags()
281 executable = b'x' in ctx[f].flags()
282 lfutil.writestandin(rdst, fstandin, hash, executable)
282 lfutil.writestandin(rdst, fstandin, hash, executable)
283 lfiletohash[f] = hash
283 lfiletohash[f] = hash
284 else:
284 else:
285 # normal file
285 # normal file
286 dstfiles.append(f)
286 dstfiles.append(f)
287
287
288 def getfilectx(repo, memctx, f):
288 def getfilectx(repo, memctx, f):
289 srcfname = lfutil.splitstandin(f)
289 srcfname = lfutil.splitstandin(f)
290 if srcfname is not None:
290 if srcfname is not None:
291 # if the file isn't in the manifest then it was removed
291 # if the file isn't in the manifest then it was removed
292 # or renamed, return None to indicate this
292 # or renamed, return None to indicate this
293 try:
293 try:
294 fctx = ctx.filectx(srcfname)
294 fctx = ctx.filectx(srcfname)
295 except error.LookupError:
295 except error.LookupError:
296 return None
296 return None
297 renamed = fctx.copysource()
297 renamed = fctx.copysource()
298 if renamed:
298 if renamed:
299 # standin is always a largefile because largefile-ness
299 # standin is always a largefile because largefile-ness
300 # doesn't change after rename or copy
300 # doesn't change after rename or copy
301 renamed = lfutil.standin(renamed)
301 renamed = lfutil.standin(renamed)
302
302
303 return context.memfilectx(
303 return context.memfilectx(
304 repo,
304 repo,
305 memctx,
305 memctx,
306 f,
306 f,
307 lfiletohash[srcfname] + b'\n',
307 lfiletohash[srcfname] + b'\n',
308 b'l' in fctx.flags(),
308 b'l' in fctx.flags(),
309 b'x' in fctx.flags(),
309 b'x' in fctx.flags(),
310 renamed,
310 renamed,
311 )
311 )
312 else:
312 else:
313 return _getnormalcontext(repo, ctx, f, revmap)
313 return _getnormalcontext(repo, ctx, f, revmap)
314
314
315 # Commit
315 # Commit
316 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
316 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
317
317
318
318
319 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
319 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
320 mctx = context.memctx(
320 mctx = context.memctx(
321 rdst,
321 rdst,
322 parents,
322 parents,
323 ctx.description(),
323 ctx.description(),
324 dstfiles,
324 dstfiles,
325 getfilectx,
325 getfilectx,
326 ctx.user(),
326 ctx.user(),
327 ctx.date(),
327 ctx.date(),
328 ctx.extra(),
328 ctx.extra(),
329 )
329 )
330 ret = rdst.commitctx(mctx)
330 ret = rdst.commitctx(mctx)
331 lfutil.copyalltostore(rdst, ret)
331 lfutil.copyalltostore(rdst, ret)
332 rdst.setparents(ret)
332 rdst.setparents(ret)
333 revmap[ctx.node()] = rdst.changelog.tip()
333 revmap[ctx.node()] = rdst.changelog.tip()
334
334
335
335
336 # Generate list of changed files
336 # Generate list of changed files
337 def _getchangedfiles(ctx, parents):
337 def _getchangedfiles(ctx, parents):
338 files = set(ctx.files())
338 files = set(ctx.files())
339 if node.nullid not in parents:
339 if node.nullid not in parents:
340 mc = ctx.manifest()
340 mc = ctx.manifest()
341 for pctx in ctx.parents():
341 for pctx in ctx.parents():
342 for fn in pctx.manifest().diff(mc):
342 for fn in pctx.manifest().diff(mc):
343 files.add(fn)
343 files.add(fn)
344 return files
344 return files
345
345
346
346
347 # Convert src parents to dst parents
347 # Convert src parents to dst parents
348 def _convertparents(ctx, revmap):
348 def _convertparents(ctx, revmap):
349 parents = []
349 parents = []
350 for p in ctx.parents():
350 for p in ctx.parents():
351 parents.append(revmap[p.node()])
351 parents.append(revmap[p.node()])
352 while len(parents) < 2:
352 while len(parents) < 2:
353 parents.append(node.nullid)
353 parents.append(node.nullid)
354 return parents
354 return parents
355
355
356
356
357 # Get memfilectx for a normal file
357 # Get memfilectx for a normal file
358 def _getnormalcontext(repo, ctx, f, revmap):
358 def _getnormalcontext(repo, ctx, f, revmap):
359 try:
359 try:
360 fctx = ctx.filectx(f)
360 fctx = ctx.filectx(f)
361 except error.LookupError:
361 except error.LookupError:
362 return None
362 return None
363 renamed = fctx.copysource()
363 renamed = fctx.copysource()
364
364
365 data = fctx.data()
365 data = fctx.data()
366 if f == b'.hgtags':
366 if f == b'.hgtags':
367 data = _converttags(repo.ui, revmap, data)
367 data = _converttags(repo.ui, revmap, data)
368 return context.memfilectx(
368 return context.memfilectx(
369 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
369 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
370 )
370 )
371
371
372
372
373 # Remap tag data using a revision map
373 # Remap tag data using a revision map
374 def _converttags(ui, revmap, data):
374 def _converttags(ui, revmap, data):
375 newdata = []
375 newdata = []
376 for line in data.splitlines():
376 for line in data.splitlines():
377 try:
377 try:
378 id, name = line.split(b' ', 1)
378 id, name = line.split(b' ', 1)
379 except ValueError:
379 except ValueError:
380 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
380 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
381 continue
381 continue
382 try:
382 try:
383 newid = node.bin(id)
383 newid = node.bin(id)
384 except TypeError:
384 except TypeError:
385 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
385 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
386 continue
386 continue
387 try:
387 try:
388 newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name))
388 newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name))
389 except KeyError:
389 except KeyError:
390 ui.warn(_(b'no mapping for id %s\n') % id)
390 ui.warn(_(b'no mapping for id %s\n') % id)
391 continue
391 continue
392 return b''.join(newdata)
392 return b''.join(newdata)
393
393
394
394
395 def _islfile(file, ctx, matcher, size):
395 def _islfile(file, ctx, matcher, size):
396 '''Return true if file should be considered a largefile, i.e.
396 '''Return true if file should be considered a largefile, i.e.
397 matcher matches it or it is larger than size.'''
397 matcher matches it or it is larger than size.'''
398 # never store special .hg* files as largefiles
398 # never store special .hg* files as largefiles
399 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
399 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
400 return False
400 return False
401 if matcher and matcher(file):
401 if matcher and matcher(file):
402 return True
402 return True
403 try:
403 try:
404 return ctx.filectx(file).size() >= size * 1024 * 1024
404 return ctx.filectx(file).size() >= size * 1024 * 1024
405 except error.LookupError:
405 except error.LookupError:
406 return False
406 return False
407
407
408
408
409 def uploadlfiles(ui, rsrc, rdst, files):
409 def uploadlfiles(ui, rsrc, rdst, files):
410 '''upload largefiles to the central store'''
410 '''upload largefiles to the central store'''
411
411
412 if not files:
412 if not files:
413 return
413 return
414
414
415 store = storefactory.openstore(rsrc, rdst, put=True)
415 store = storefactory.openstore(rsrc, rdst, put=True)
416
416
417 at = 0
417 at = 0
418 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
418 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
419 retval = store.exists(files)
419 retval = store.exists(files)
420 files = [h for h in files if not retval[h]]
420 files = [h for h in files if not retval[h]]
421 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
421 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
422
422
423 with ui.makeprogress(
423 with ui.makeprogress(
424 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
424 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
425 ) as progress:
425 ) as progress:
426 for hash in files:
426 for hash in files:
427 progress.update(at)
427 progress.update(at)
428 source = lfutil.findfile(rsrc, hash)
428 source = lfutil.findfile(rsrc, hash)
429 if not source:
429 if not source:
430 raise error.Abort(
430 raise error.Abort(
431 _(
431 _(
432 b'largefile %s missing from store'
432 b'largefile %s missing from store'
433 b' (needs to be uploaded)'
433 b' (needs to be uploaded)'
434 )
434 )
435 % hash
435 % hash
436 )
436 )
437 # XXX check for errors here
437 # XXX check for errors here
438 store.put(source, hash)
438 store.put(source, hash)
439 at += 1
439 at += 1
440
440
441
441
442 def verifylfiles(ui, repo, all=False, contents=False):
442 def verifylfiles(ui, repo, all=False, contents=False):
443 '''Verify that every largefile revision in the current changeset
443 '''Verify that every largefile revision in the current changeset
444 exists in the central store. With --contents, also verify that
444 exists in the central store. With --contents, also verify that
445 the contents of each local largefile file revision are correct (SHA-1 hash
445 the contents of each local largefile file revision are correct (SHA-1 hash
446 matches the revision ID). With --all, check every changeset in
446 matches the revision ID). With --all, check every changeset in
447 this repository.'''
447 this repository.'''
448 if all:
448 if all:
449 revs = repo.revs(b'all()')
449 revs = repo.revs(b'all()')
450 else:
450 else:
451 revs = [b'.']
451 revs = [b'.']
452
452
453 store = storefactory.openstore(repo)
453 store = storefactory.openstore(repo)
454 return store.verify(revs, contents=contents)
454 return store.verify(revs, contents=contents)
455
455
456
456
457 def cachelfiles(ui, repo, node, filelist=None):
457 def cachelfiles(ui, repo, node, filelist=None):
458 '''cachelfiles ensures that all largefiles needed by the specified revision
458 '''cachelfiles ensures that all largefiles needed by the specified revision
459 are present in the repository's largefile cache.
459 are present in the repository's largefile cache.
460
460
461 returns a tuple (cached, missing). cached is the list of files downloaded
461 returns a tuple (cached, missing). cached is the list of files downloaded
462 by this operation; missing is the list of files that were needed but could
462 by this operation; missing is the list of files that were needed but could
463 not be found.'''
463 not be found.'''
464 lfiles = lfutil.listlfiles(repo, node)
464 lfiles = lfutil.listlfiles(repo, node)
465 if filelist:
465 if filelist:
466 lfiles = set(lfiles) & set(filelist)
466 lfiles = set(lfiles) & set(filelist)
467 toget = []
467 toget = []
468
468
469 ctx = repo[node]
469 ctx = repo[node]
470 for lfile in lfiles:
470 for lfile in lfiles:
471 try:
471 try:
472 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
472 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
473 except IOError as err:
473 except IOError as err:
474 if err.errno == errno.ENOENT:
474 if err.errno == errno.ENOENT:
475 continue # node must be None and standin wasn't found in wctx
475 continue # node must be None and standin wasn't found in wctx
476 raise
476 raise
477 if not lfutil.findfile(repo, expectedhash):
477 if not lfutil.findfile(repo, expectedhash):
478 toget.append((lfile, expectedhash))
478 toget.append((lfile, expectedhash))
479
479
480 if toget:
480 if toget:
481 store = storefactory.openstore(repo)
481 store = storefactory.openstore(repo)
482 ret = store.get(toget)
482 ret = store.get(toget)
483 return ret
483 return ret
484
484
485 return ([], [])
485 return ([], [])
486
486
487
487
488 def downloadlfiles(ui, repo):
488 def downloadlfiles(ui, repo):
489 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
489 tonode = repo.changelog.node
490
491 def prepare(ctx, fns):
492 pass
493
494 totalsuccess = 0
490 totalsuccess = 0
495 totalmissing = 0
491 totalmissing = 0
496 for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': None}, prepare):
492 for rev in repo.revs(b'reverse(file(%s))', b'path:' + lfutil.shortname):
497 success, missing = cachelfiles(ui, repo, ctx.node())
493 success, missing = cachelfiles(ui, repo, tonode(rev))
498 totalsuccess += len(success)
494 totalsuccess += len(success)
499 totalmissing += len(missing)
495 totalmissing += len(missing)
500 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
496 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
501 if totalmissing > 0:
497 if totalmissing > 0:
502 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
498 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
503 return totalsuccess, totalmissing
499 return totalsuccess, totalmissing
504
500
505
501
506 def updatelfiles(
502 def updatelfiles(
507 ui, repo, filelist=None, printmessage=None, normallookup=False
503 ui, repo, filelist=None, printmessage=None, normallookup=False
508 ):
504 ):
509 '''Update largefiles according to standins in the working directory
505 '''Update largefiles according to standins in the working directory
510
506
511 If ``printmessage`` is other than ``None``, it means "print (or
507 If ``printmessage`` is other than ``None``, it means "print (or
512 ignore, for false) message forcibly".
508 ignore, for false) message forcibly".
513 '''
509 '''
514 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
510 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
515 with repo.wlock():
511 with repo.wlock():
516 lfdirstate = lfutil.openlfdirstate(ui, repo)
512 lfdirstate = lfutil.openlfdirstate(ui, repo)
517 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
513 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
518
514
519 if filelist is not None:
515 if filelist is not None:
520 filelist = set(filelist)
516 filelist = set(filelist)
521 lfiles = [f for f in lfiles if f in filelist]
517 lfiles = [f for f in lfiles if f in filelist]
522
518
523 update = {}
519 update = {}
524 dropped = set()
520 dropped = set()
525 updated, removed = 0, 0
521 updated, removed = 0, 0
526 wvfs = repo.wvfs
522 wvfs = repo.wvfs
527 wctx = repo[None]
523 wctx = repo[None]
528 for lfile in lfiles:
524 for lfile in lfiles:
529 lfileorig = os.path.relpath(
525 lfileorig = os.path.relpath(
530 scmutil.backuppath(ui, repo, lfile), start=repo.root
526 scmutil.backuppath(ui, repo, lfile), start=repo.root
531 )
527 )
532 standin = lfutil.standin(lfile)
528 standin = lfutil.standin(lfile)
533 standinorig = os.path.relpath(
529 standinorig = os.path.relpath(
534 scmutil.backuppath(ui, repo, standin), start=repo.root
530 scmutil.backuppath(ui, repo, standin), start=repo.root
535 )
531 )
536 if wvfs.exists(standin):
532 if wvfs.exists(standin):
537 if wvfs.exists(standinorig) and wvfs.exists(lfile):
533 if wvfs.exists(standinorig) and wvfs.exists(lfile):
538 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
534 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
539 wvfs.unlinkpath(standinorig)
535 wvfs.unlinkpath(standinorig)
540 expecthash = lfutil.readasstandin(wctx[standin])
536 expecthash = lfutil.readasstandin(wctx[standin])
541 if expecthash != b'':
537 if expecthash != b'':
542 if lfile not in wctx: # not switched to normal file
538 if lfile not in wctx: # not switched to normal file
543 if repo.dirstate[standin] != b'?':
539 if repo.dirstate[standin] != b'?':
544 wvfs.unlinkpath(lfile, ignoremissing=True)
540 wvfs.unlinkpath(lfile, ignoremissing=True)
545 else:
541 else:
546 dropped.add(lfile)
542 dropped.add(lfile)
547
543
548 # use normallookup() to allocate an entry in largefiles
544 # use normallookup() to allocate an entry in largefiles
549 # dirstate to prevent lfilesrepo.status() from reporting
545 # dirstate to prevent lfilesrepo.status() from reporting
550 # missing files as removed.
546 # missing files as removed.
551 lfdirstate.normallookup(lfile)
547 lfdirstate.normallookup(lfile)
552 update[lfile] = expecthash
548 update[lfile] = expecthash
553 else:
549 else:
554 # Remove lfiles for which the standin is deleted, unless the
550 # Remove lfiles for which the standin is deleted, unless the
555 # lfile is added to the repository again. This happens when a
551 # lfile is added to the repository again. This happens when a
556 # largefile is converted back to a normal file: the standin
552 # largefile is converted back to a normal file: the standin
557 # disappears, but a new (normal) file appears as the lfile.
553 # disappears, but a new (normal) file appears as the lfile.
558 if (
554 if (
559 wvfs.exists(lfile)
555 wvfs.exists(lfile)
560 and repo.dirstate.normalize(lfile) not in wctx
556 and repo.dirstate.normalize(lfile) not in wctx
561 ):
557 ):
562 wvfs.unlinkpath(lfile)
558 wvfs.unlinkpath(lfile)
563 removed += 1
559 removed += 1
564
560
565 # largefile processing might be slow and be interrupted - be prepared
561 # largefile processing might be slow and be interrupted - be prepared
566 lfdirstate.write()
562 lfdirstate.write()
567
563
568 if lfiles:
564 if lfiles:
569 lfiles = [f for f in lfiles if f not in dropped]
565 lfiles = [f for f in lfiles if f not in dropped]
570
566
571 for f in dropped:
567 for f in dropped:
572 repo.wvfs.unlinkpath(lfutil.standin(f))
568 repo.wvfs.unlinkpath(lfutil.standin(f))
573
569
574 # This needs to happen for dropped files, otherwise they stay in
570 # This needs to happen for dropped files, otherwise they stay in
575 # the M state.
571 # the M state.
576 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
572 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
577
573
578 statuswriter(_(b'getting changed largefiles\n'))
574 statuswriter(_(b'getting changed largefiles\n'))
579 cachelfiles(ui, repo, None, lfiles)
575 cachelfiles(ui, repo, None, lfiles)
580
576
581 for lfile in lfiles:
577 for lfile in lfiles:
582 update1 = 0
578 update1 = 0
583
579
584 expecthash = update.get(lfile)
580 expecthash = update.get(lfile)
585 if expecthash:
581 if expecthash:
586 if not lfutil.copyfromcache(repo, expecthash, lfile):
582 if not lfutil.copyfromcache(repo, expecthash, lfile):
587 # failed ... but already removed and set to normallookup
583 # failed ... but already removed and set to normallookup
588 continue
584 continue
589 # Synchronize largefile dirstate to the last modified
585 # Synchronize largefile dirstate to the last modified
590 # time of the file
586 # time of the file
591 lfdirstate.normal(lfile)
587 lfdirstate.normal(lfile)
592 update1 = 1
588 update1 = 1
593
589
594 # copy the exec mode of largefile standin from the repository's
590 # copy the exec mode of largefile standin from the repository's
595 # dirstate to its state in the lfdirstate.
591 # dirstate to its state in the lfdirstate.
596 standin = lfutil.standin(lfile)
592 standin = lfutil.standin(lfile)
597 if wvfs.exists(standin):
593 if wvfs.exists(standin):
598 # exec is decided by the users permissions using mask 0o100
594 # exec is decided by the users permissions using mask 0o100
599 standinexec = wvfs.stat(standin).st_mode & 0o100
595 standinexec = wvfs.stat(standin).st_mode & 0o100
600 st = wvfs.stat(lfile)
596 st = wvfs.stat(lfile)
601 mode = st.st_mode
597 mode = st.st_mode
602 if standinexec != mode & 0o100:
598 if standinexec != mode & 0o100:
603 # first remove all X bits, then shift all R bits to X
599 # first remove all X bits, then shift all R bits to X
604 mode &= ~0o111
600 mode &= ~0o111
605 if standinexec:
601 if standinexec:
606 mode |= (mode >> 2) & 0o111 & ~util.umask
602 mode |= (mode >> 2) & 0o111 & ~util.umask
607 wvfs.chmod(lfile, mode)
603 wvfs.chmod(lfile, mode)
608 update1 = 1
604 update1 = 1
609
605
610 updated += update1
606 updated += update1
611
607
612 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
608 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
613
609
614 lfdirstate.write()
610 lfdirstate.write()
615 if lfiles:
611 if lfiles:
616 statuswriter(
612 statuswriter(
617 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
613 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
618 )
614 )
619
615
620
616
621 @eh.command(
617 @eh.command(
622 b'lfpull',
618 b'lfpull',
623 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
619 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
624 + cmdutil.remoteopts,
620 + cmdutil.remoteopts,
625 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
621 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
626 )
622 )
627 def lfpull(ui, repo, source=b"default", **opts):
623 def lfpull(ui, repo, source=b"default", **opts):
628 """pull largefiles for the specified revisions from the specified source
624 """pull largefiles for the specified revisions from the specified source
629
625
630 Pull largefiles that are referenced from local changesets but missing
626 Pull largefiles that are referenced from local changesets but missing
631 locally, pulling from a remote repository to the local cache.
627 locally, pulling from a remote repository to the local cache.
632
628
633 If SOURCE is omitted, the 'default' path will be used.
629 If SOURCE is omitted, the 'default' path will be used.
634 See :hg:`help urls` for more information.
630 See :hg:`help urls` for more information.
635
631
636 .. container:: verbose
632 .. container:: verbose
637
633
638 Some examples:
634 Some examples:
639
635
640 - pull largefiles for all branch heads::
636 - pull largefiles for all branch heads::
641
637
642 hg lfpull -r "head() and not closed()"
638 hg lfpull -r "head() and not closed()"
643
639
644 - pull largefiles on the default branch::
640 - pull largefiles on the default branch::
645
641
646 hg lfpull -r "branch(default)"
642 hg lfpull -r "branch(default)"
647 """
643 """
648 repo.lfpullsource = source
644 repo.lfpullsource = source
649
645
650 revs = opts.get('rev', [])
646 revs = opts.get('rev', [])
651 if not revs:
647 if not revs:
652 raise error.Abort(_(b'no revisions specified'))
648 raise error.Abort(_(b'no revisions specified'))
653 revs = scmutil.revrange(repo, revs)
649 revs = scmutil.revrange(repo, revs)
654
650
655 numcached = 0
651 numcached = 0
656 for rev in revs:
652 for rev in revs:
657 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
653 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
658 (cached, missing) = cachelfiles(ui, repo, rev)
654 (cached, missing) = cachelfiles(ui, repo, rev)
659 numcached += len(cached)
655 numcached += len(cached)
660 ui.status(_(b"%d largefiles cached\n") % numcached)
656 ui.status(_(b"%d largefiles cached\n") % numcached)
661
657
662
658
663 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
659 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
664 def debuglfput(ui, repo, filepath, **kwargs):
660 def debuglfput(ui, repo, filepath, **kwargs):
665 hash = lfutil.hashfile(filepath)
661 hash = lfutil.hashfile(filepath)
666 storefactory.openstore(repo).put(filepath, hash)
662 storefactory.openstore(repo).put(filepath, hash)
667 ui.write(b'%s\n' % hash)
663 ui.write(b'%s\n' % hash)
668 return 0
664 return 0
General Comments 0
You need to be logged in to leave comments. Login now