##// END OF EJS Templates
lfconvert: use a `changing_parents` context to clear the dirstate...
marmoute -
r51005:a8602987 default
parent child Browse files
Show More
@@ -1,674 +1,675 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import binascii
11 import binascii
12 import os
12 import os
13 import shutil
13 import shutil
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import (
16 from mercurial.node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 )
19 )
20
20
21 from mercurial import (
21 from mercurial import (
22 cmdutil,
22 cmdutil,
23 context,
23 context,
24 error,
24 error,
25 exthelper,
25 exthelper,
26 hg,
26 hg,
27 lock,
27 lock,
28 logcmdutil,
28 logcmdutil,
29 match as matchmod,
29 match as matchmod,
30 pycompat,
30 pycompat,
31 scmutil,
31 scmutil,
32 util,
32 util,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 from ..convert import (
36 from ..convert import (
37 convcmd,
37 convcmd,
38 filemap,
38 filemap,
39 )
39 )
40
40
41 from . import lfutil, storefactory
41 from . import lfutil, storefactory
42
42
43 release = lock.release
43 release = lock.release
44
44
45 # -- Commands ----------------------------------------------------------
45 # -- Commands ----------------------------------------------------------
46
46
47 eh = exthelper.exthelper()
47 eh = exthelper.exthelper()
48
48
49
49
50 @eh.command(
50 @eh.command(
51 b'lfconvert',
51 b'lfconvert',
52 [
52 [
53 (
53 (
54 b's',
54 b's',
55 b'size',
55 b'size',
56 b'',
56 b'',
57 _(b'minimum size (MB) for files to be converted as largefiles'),
57 _(b'minimum size (MB) for files to be converted as largefiles'),
58 b'SIZE',
58 b'SIZE',
59 ),
59 ),
60 (
60 (
61 b'',
61 b'',
62 b'to-normal',
62 b'to-normal',
63 False,
63 False,
64 _(b'convert from a largefiles repo to a normal repo'),
64 _(b'convert from a largefiles repo to a normal repo'),
65 ),
65 ),
66 ],
66 ],
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 norepo=True,
68 norepo=True,
69 inferrepo=True,
69 inferrepo=True,
70 )
70 )
71 def lfconvert(ui, src, dest, *pats, **opts):
71 def lfconvert(ui, src, dest, *pats, **opts):
72 """convert a normal repository to a largefiles repository
72 """convert a normal repository to a largefiles repository
73
73
74 Convert repository SOURCE to a new repository DEST, identical to
74 Convert repository SOURCE to a new repository DEST, identical to
75 SOURCE except that certain files will be converted as largefiles:
75 SOURCE except that certain files will be converted as largefiles:
76 specifically, any file that matches any PATTERN *or* whose size is
76 specifically, any file that matches any PATTERN *or* whose size is
77 above the minimum size threshold is converted as a largefile. The
77 above the minimum size threshold is converted as a largefile. The
78 size used to determine whether or not to track a file as a
78 size used to determine whether or not to track a file as a
79 largefile is the size of the first version of the file. The
79 largefile is the size of the first version of the file. The
80 minimum size can be specified either with --size or in
80 minimum size can be specified either with --size or in
81 configuration as ``largefiles.size``.
81 configuration as ``largefiles.size``.
82
82
83 After running this command you will need to make sure that
83 After running this command you will need to make sure that
84 largefiles is enabled anywhere you intend to push the new
84 largefiles is enabled anywhere you intend to push the new
85 repository.
85 repository.
86
86
87 Use --to-normal to convert largefiles back to normal files; after
87 Use --to-normal to convert largefiles back to normal files; after
88 this, the DEST repository can be used without largefiles at all."""
88 this, the DEST repository can be used without largefiles at all."""
89
89
90 opts = pycompat.byteskwargs(opts)
90 opts = pycompat.byteskwargs(opts)
91 if opts[b'to_normal']:
91 if opts[b'to_normal']:
92 tolfile = False
92 tolfile = False
93 else:
93 else:
94 tolfile = True
94 tolfile = True
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
96
96
97 if not hg.islocal(src):
97 if not hg.islocal(src):
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 if not hg.islocal(dest):
99 if not hg.islocal(dest):
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101
101
102 rsrc = hg.repository(ui, src)
102 rsrc = hg.repository(ui, src)
103 ui.status(_(b'initializing destination %s\n') % dest)
103 ui.status(_(b'initializing destination %s\n') % dest)
104 rdst = hg.repository(ui, dest, create=True)
104 rdst = hg.repository(ui, dest, create=True)
105
105
106 success = False
106 success = False
107 dstwlock = dstlock = None
107 dstwlock = dstlock = None
108 try:
108 try:
109 # Get a list of all changesets in the source. The easy way to do this
109 # Get a list of all changesets in the source. The easy way to do this
110 # is to simply walk the changelog, using changelog.nodesbetween().
110 # is to simply walk the changelog, using changelog.nodesbetween().
111 # Take a look at mercurial/revlog.py:639 for more details.
111 # Take a look at mercurial/revlog.py:639 for more details.
112 # Use a generator instead of a list to decrease memory usage
112 # Use a generator instead of a list to decrease memory usage
113 ctxs = (
113 ctxs = (
114 rsrc[ctx]
114 rsrc[ctx]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 )
116 )
117 revmap = {rsrc.nullid: rdst.nullid}
117 revmap = {rsrc.nullid: rdst.nullid}
118 if tolfile:
118 if tolfile:
119 # Lock destination to prevent modification while it is converted to.
119 # Lock destination to prevent modification while it is converted to.
120 # Don't need to lock src because we are just reading from its
120 # Don't need to lock src because we are just reading from its
121 # history which can't change.
121 # history which can't change.
122 dstwlock = rdst.wlock()
122 dstwlock = rdst.wlock()
123 dstlock = rdst.lock()
123 dstlock = rdst.lock()
124
124
125 lfiles = set()
125 lfiles = set()
126 normalfiles = set()
126 normalfiles = set()
127 if not pats:
127 if not pats:
128 pats = ui.configlist(lfutil.longname, b'patterns')
128 pats = ui.configlist(lfutil.longname, b'patterns')
129 if pats:
129 if pats:
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 else:
131 else:
132 matcher = None
132 matcher = None
133
133
134 lfiletohash = {}
134 lfiletohash = {}
135 with ui.makeprogress(
135 with ui.makeprogress(
136 _(b'converting revisions'),
136 _(b'converting revisions'),
137 unit=_(b'revisions'),
137 unit=_(b'revisions'),
138 total=rsrc[b'tip'].rev(),
138 total=rsrc[b'tip'].rev(),
139 ) as progress:
139 ) as progress:
140 for ctx in ctxs:
140 for ctx in ctxs:
141 progress.update(ctx.rev())
141 progress.update(ctx.rev())
142 _lfconvert_addchangeset(
142 _lfconvert_addchangeset(
143 rsrc,
143 rsrc,
144 rdst,
144 rdst,
145 ctx,
145 ctx,
146 revmap,
146 revmap,
147 lfiles,
147 lfiles,
148 normalfiles,
148 normalfiles,
149 matcher,
149 matcher,
150 size,
150 size,
151 lfiletohash,
151 lfiletohash,
152 )
152 )
153
153
154 if rdst.wvfs.exists(lfutil.shortname):
154 if rdst.wvfs.exists(lfutil.shortname):
155 rdst.wvfs.rmtree(lfutil.shortname)
155 rdst.wvfs.rmtree(lfutil.shortname)
156
156
157 for f in lfiletohash.keys():
157 for f in lfiletohash.keys():
158 if rdst.wvfs.isfile(f):
158 if rdst.wvfs.isfile(f):
159 rdst.wvfs.unlink(f)
159 rdst.wvfs.unlink(f)
160 try:
160 try:
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 except OSError:
162 except OSError:
163 pass
163 pass
164
164
165 # If there were any files converted to largefiles, add largefiles
165 # If there were any files converted to largefiles, add largefiles
166 # to the destination repository's requirements.
166 # to the destination repository's requirements.
167 if lfiles:
167 if lfiles:
168 rdst.requirements.add(b'largefiles')
168 rdst.requirements.add(b'largefiles')
169 scmutil.writereporequirements(rdst)
169 scmutil.writereporequirements(rdst)
170 else:
170 else:
171
171
172 class lfsource(filemap.filemap_source):
172 class lfsource(filemap.filemap_source):
173 def __init__(self, ui, source):
173 def __init__(self, ui, source):
174 super(lfsource, self).__init__(ui, source, None)
174 super(lfsource, self).__init__(ui, source, None)
175 self.filemapper.rename[lfutil.shortname] = b'.'
175 self.filemapper.rename[lfutil.shortname] = b'.'
176
176
177 def getfile(self, name, rev):
177 def getfile(self, name, rev):
178 realname, realrev = rev
178 realname, realrev = rev
179 f = super(lfsource, self).getfile(name, rev)
179 f = super(lfsource, self).getfile(name, rev)
180
180
181 if (
181 if (
182 not realname.startswith(lfutil.shortnameslash)
182 not realname.startswith(lfutil.shortnameslash)
183 or f[0] is None
183 or f[0] is None
184 ):
184 ):
185 return f
185 return f
186
186
187 # Substitute in the largefile data for the hash
187 # Substitute in the largefile data for the hash
188 hash = f[0].strip()
188 hash = f[0].strip()
189 path = lfutil.findfile(rsrc, hash)
189 path = lfutil.findfile(rsrc, hash)
190
190
191 if path is None:
191 if path is None:
192 raise error.Abort(
192 raise error.Abort(
193 _(b"missing largefile for '%s' in %s")
193 _(b"missing largefile for '%s' in %s")
194 % (realname, realrev)
194 % (realname, realrev)
195 )
195 )
196 return util.readfile(path), f[1]
196 return util.readfile(path), f[1]
197
197
198 class converter(convcmd.converter):
198 class converter(convcmd.converter):
199 def __init__(self, ui, source, dest, revmapfile, opts):
199 def __init__(self, ui, source, dest, revmapfile, opts):
200 src = lfsource(ui, source)
200 src = lfsource(ui, source)
201
201
202 super(converter, self).__init__(
202 super(converter, self).__init__(
203 ui, src, dest, revmapfile, opts
203 ui, src, dest, revmapfile, opts
204 )
204 )
205
205
206 found, missing = downloadlfiles(ui, rsrc)
206 found, missing = downloadlfiles(ui, rsrc)
207 if missing != 0:
207 if missing != 0:
208 raise error.Abort(_(b"all largefiles must be present locally"))
208 raise error.Abort(_(b"all largefiles must be present locally"))
209
209
210 orig = convcmd.converter
210 orig = convcmd.converter
211 convcmd.converter = converter
211 convcmd.converter = converter
212
212
213 try:
213 try:
214 convcmd.convert(
214 convcmd.convert(
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 )
216 )
217 finally:
217 finally:
218 convcmd.converter = orig
218 convcmd.converter = orig
219 success = True
219 success = True
220 finally:
220 finally:
221 if tolfile:
221 if tolfile:
222 with rdst.dirstate.changing_files(rdst):
222 # XXX is this the right context semantically ?
223 with rdst.dirstate.changing_parents(rdst):
223 rdst.dirstate.clear()
224 rdst.dirstate.clear()
224 release(dstlock, dstwlock)
225 release(dstlock, dstwlock)
225 if not success:
226 if not success:
226 # we failed, remove the new directory
227 # we failed, remove the new directory
227 shutil.rmtree(rdst.root)
228 shutil.rmtree(rdst.root)
228
229
229
230
230 def _lfconvert_addchangeset(
231 def _lfconvert_addchangeset(
231 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
232 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
232 ):
233 ):
233 # Convert src parents to dst parents
234 # Convert src parents to dst parents
234 parents = _convertparents(ctx, revmap)
235 parents = _convertparents(ctx, revmap)
235
236
236 # Generate list of changed files
237 # Generate list of changed files
237 files = _getchangedfiles(ctx, parents)
238 files = _getchangedfiles(ctx, parents)
238
239
239 dstfiles = []
240 dstfiles = []
240 for f in files:
241 for f in files:
241 if f not in lfiles and f not in normalfiles:
242 if f not in lfiles and f not in normalfiles:
242 islfile = _islfile(f, ctx, matcher, size)
243 islfile = _islfile(f, ctx, matcher, size)
243 # If this file was renamed or copied then copy
244 # If this file was renamed or copied then copy
244 # the largefile-ness of its predecessor
245 # the largefile-ness of its predecessor
245 if f in ctx.manifest():
246 if f in ctx.manifest():
246 fctx = ctx.filectx(f)
247 fctx = ctx.filectx(f)
247 renamed = fctx.copysource()
248 renamed = fctx.copysource()
248 if renamed is None:
249 if renamed is None:
249 # the code below assumes renamed to be a boolean or a list
250 # the code below assumes renamed to be a boolean or a list
250 # and won't quite work with the value None
251 # and won't quite work with the value None
251 renamed = False
252 renamed = False
252 renamedlfile = renamed and renamed in lfiles
253 renamedlfile = renamed and renamed in lfiles
253 islfile |= renamedlfile
254 islfile |= renamedlfile
254 if b'l' in fctx.flags():
255 if b'l' in fctx.flags():
255 if renamedlfile:
256 if renamedlfile:
256 raise error.Abort(
257 raise error.Abort(
257 _(b'renamed/copied largefile %s becomes symlink')
258 _(b'renamed/copied largefile %s becomes symlink')
258 % f
259 % f
259 )
260 )
260 islfile = False
261 islfile = False
261 if islfile:
262 if islfile:
262 lfiles.add(f)
263 lfiles.add(f)
263 else:
264 else:
264 normalfiles.add(f)
265 normalfiles.add(f)
265
266
266 if f in lfiles:
267 if f in lfiles:
267 fstandin = lfutil.standin(f)
268 fstandin = lfutil.standin(f)
268 dstfiles.append(fstandin)
269 dstfiles.append(fstandin)
269 # largefile in manifest if it has not been removed/renamed
270 # largefile in manifest if it has not been removed/renamed
270 if f in ctx.manifest():
271 if f in ctx.manifest():
271 fctx = ctx.filectx(f)
272 fctx = ctx.filectx(f)
272 if b'l' in fctx.flags():
273 if b'l' in fctx.flags():
273 renamed = fctx.copysource()
274 renamed = fctx.copysource()
274 if renamed and renamed in lfiles:
275 if renamed and renamed in lfiles:
275 raise error.Abort(
276 raise error.Abort(
276 _(b'largefile %s becomes symlink') % f
277 _(b'largefile %s becomes symlink') % f
277 )
278 )
278
279
279 # largefile was modified, update standins
280 # largefile was modified, update standins
280 m = hashutil.sha1(b'')
281 m = hashutil.sha1(b'')
281 m.update(ctx[f].data())
282 m.update(ctx[f].data())
282 hash = hex(m.digest())
283 hash = hex(m.digest())
283 if f not in lfiletohash or lfiletohash[f] != hash:
284 if f not in lfiletohash or lfiletohash[f] != hash:
284 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
285 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
285 executable = b'x' in ctx[f].flags()
286 executable = b'x' in ctx[f].flags()
286 lfutil.writestandin(rdst, fstandin, hash, executable)
287 lfutil.writestandin(rdst, fstandin, hash, executable)
287 lfiletohash[f] = hash
288 lfiletohash[f] = hash
288 else:
289 else:
289 # normal file
290 # normal file
290 dstfiles.append(f)
291 dstfiles.append(f)
291
292
292 def getfilectx(repo, memctx, f):
293 def getfilectx(repo, memctx, f):
293 srcfname = lfutil.splitstandin(f)
294 srcfname = lfutil.splitstandin(f)
294 if srcfname is not None:
295 if srcfname is not None:
295 # if the file isn't in the manifest then it was removed
296 # if the file isn't in the manifest then it was removed
296 # or renamed, return None to indicate this
297 # or renamed, return None to indicate this
297 try:
298 try:
298 fctx = ctx.filectx(srcfname)
299 fctx = ctx.filectx(srcfname)
299 except error.LookupError:
300 except error.LookupError:
300 return None
301 return None
301 renamed = fctx.copysource()
302 renamed = fctx.copysource()
302 if renamed:
303 if renamed:
303 # standin is always a largefile because largefile-ness
304 # standin is always a largefile because largefile-ness
304 # doesn't change after rename or copy
305 # doesn't change after rename or copy
305 renamed = lfutil.standin(renamed)
306 renamed = lfutil.standin(renamed)
306
307
307 return context.memfilectx(
308 return context.memfilectx(
308 repo,
309 repo,
309 memctx,
310 memctx,
310 f,
311 f,
311 lfiletohash[srcfname] + b'\n',
312 lfiletohash[srcfname] + b'\n',
312 b'l' in fctx.flags(),
313 b'l' in fctx.flags(),
313 b'x' in fctx.flags(),
314 b'x' in fctx.flags(),
314 renamed,
315 renamed,
315 )
316 )
316 else:
317 else:
317 return _getnormalcontext(repo, ctx, f, revmap)
318 return _getnormalcontext(repo, ctx, f, revmap)
318
319
319 # Commit
320 # Commit
320 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
321 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
321
322
322
323
323 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
324 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
324 mctx = context.memctx(
325 mctx = context.memctx(
325 rdst,
326 rdst,
326 parents,
327 parents,
327 ctx.description(),
328 ctx.description(),
328 dstfiles,
329 dstfiles,
329 getfilectx,
330 getfilectx,
330 ctx.user(),
331 ctx.user(),
331 ctx.date(),
332 ctx.date(),
332 ctx.extra(),
333 ctx.extra(),
333 )
334 )
334 ret = rdst.commitctx(mctx)
335 ret = rdst.commitctx(mctx)
335 lfutil.copyalltostore(rdst, ret)
336 lfutil.copyalltostore(rdst, ret)
336 rdst.setparents(ret)
337 rdst.setparents(ret)
337 revmap[ctx.node()] = rdst.changelog.tip()
338 revmap[ctx.node()] = rdst.changelog.tip()
338
339
339
340
340 # Generate list of changed files
341 # Generate list of changed files
341 def _getchangedfiles(ctx, parents):
342 def _getchangedfiles(ctx, parents):
342 files = set(ctx.files())
343 files = set(ctx.files())
343 if ctx.repo().nullid not in parents:
344 if ctx.repo().nullid not in parents:
344 mc = ctx.manifest()
345 mc = ctx.manifest()
345 for pctx in ctx.parents():
346 for pctx in ctx.parents():
346 for fn in pctx.manifest().diff(mc):
347 for fn in pctx.manifest().diff(mc):
347 files.add(fn)
348 files.add(fn)
348 return files
349 return files
349
350
350
351
351 # Convert src parents to dst parents
352 # Convert src parents to dst parents
352 def _convertparents(ctx, revmap):
353 def _convertparents(ctx, revmap):
353 parents = []
354 parents = []
354 for p in ctx.parents():
355 for p in ctx.parents():
355 parents.append(revmap[p.node()])
356 parents.append(revmap[p.node()])
356 while len(parents) < 2:
357 while len(parents) < 2:
357 parents.append(ctx.repo().nullid)
358 parents.append(ctx.repo().nullid)
358 return parents
359 return parents
359
360
360
361
361 # Get memfilectx for a normal file
362 # Get memfilectx for a normal file
362 def _getnormalcontext(repo, ctx, f, revmap):
363 def _getnormalcontext(repo, ctx, f, revmap):
363 try:
364 try:
364 fctx = ctx.filectx(f)
365 fctx = ctx.filectx(f)
365 except error.LookupError:
366 except error.LookupError:
366 return None
367 return None
367 renamed = fctx.copysource()
368 renamed = fctx.copysource()
368
369
369 data = fctx.data()
370 data = fctx.data()
370 if f == b'.hgtags':
371 if f == b'.hgtags':
371 data = _converttags(repo.ui, revmap, data)
372 data = _converttags(repo.ui, revmap, data)
372 return context.memfilectx(
373 return context.memfilectx(
373 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
374 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
374 )
375 )
375
376
376
377
377 # Remap tag data using a revision map
378 # Remap tag data using a revision map
378 def _converttags(ui, revmap, data):
379 def _converttags(ui, revmap, data):
379 newdata = []
380 newdata = []
380 for line in data.splitlines():
381 for line in data.splitlines():
381 try:
382 try:
382 id, name = line.split(b' ', 1)
383 id, name = line.split(b' ', 1)
383 except ValueError:
384 except ValueError:
384 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
385 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
385 continue
386 continue
386 try:
387 try:
387 newid = bin(id)
388 newid = bin(id)
388 except binascii.Error:
389 except binascii.Error:
389 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
390 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
390 continue
391 continue
391 try:
392 try:
392 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
393 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
393 except KeyError:
394 except KeyError:
394 ui.warn(_(b'no mapping for id %s\n') % id)
395 ui.warn(_(b'no mapping for id %s\n') % id)
395 continue
396 continue
396 return b''.join(newdata)
397 return b''.join(newdata)
397
398
398
399
399 def _islfile(file, ctx, matcher, size):
400 def _islfile(file, ctx, matcher, size):
400 """Return true if file should be considered a largefile, i.e.
401 """Return true if file should be considered a largefile, i.e.
401 matcher matches it or it is larger than size."""
402 matcher matches it or it is larger than size."""
402 # never store special .hg* files as largefiles
403 # never store special .hg* files as largefiles
403 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
404 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
404 return False
405 return False
405 if matcher and matcher(file):
406 if matcher and matcher(file):
406 return True
407 return True
407 try:
408 try:
408 return ctx.filectx(file).size() >= size * 1024 * 1024
409 return ctx.filectx(file).size() >= size * 1024 * 1024
409 except error.LookupError:
410 except error.LookupError:
410 return False
411 return False
411
412
412
413
413 def uploadlfiles(ui, rsrc, rdst, files):
414 def uploadlfiles(ui, rsrc, rdst, files):
414 '''upload largefiles to the central store'''
415 '''upload largefiles to the central store'''
415
416
416 if not files:
417 if not files:
417 return
418 return
418
419
419 store = storefactory.openstore(rsrc, rdst, put=True)
420 store = storefactory.openstore(rsrc, rdst, put=True)
420
421
421 at = 0
422 at = 0
422 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
423 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
423 retval = store.exists(files)
424 retval = store.exists(files)
424 files = [h for h in files if not retval[h]]
425 files = [h for h in files if not retval[h]]
425 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
426 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
426
427
427 with ui.makeprogress(
428 with ui.makeprogress(
428 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
429 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
429 ) as progress:
430 ) as progress:
430 for hash in files:
431 for hash in files:
431 progress.update(at)
432 progress.update(at)
432 source = lfutil.findfile(rsrc, hash)
433 source = lfutil.findfile(rsrc, hash)
433 if not source:
434 if not source:
434 raise error.Abort(
435 raise error.Abort(
435 _(
436 _(
436 b'largefile %s missing from store'
437 b'largefile %s missing from store'
437 b' (needs to be uploaded)'
438 b' (needs to be uploaded)'
438 )
439 )
439 % hash
440 % hash
440 )
441 )
441 # XXX check for errors here
442 # XXX check for errors here
442 store.put(source, hash)
443 store.put(source, hash)
443 at += 1
444 at += 1
444
445
445
446
446 def verifylfiles(ui, repo, all=False, contents=False):
447 def verifylfiles(ui, repo, all=False, contents=False):
447 """Verify that every largefile revision in the current changeset
448 """Verify that every largefile revision in the current changeset
448 exists in the central store. With --contents, also verify that
449 exists in the central store. With --contents, also verify that
449 the contents of each local largefile file revision are correct (SHA-1 hash
450 the contents of each local largefile file revision are correct (SHA-1 hash
450 matches the revision ID). With --all, check every changeset in
451 matches the revision ID). With --all, check every changeset in
451 this repository."""
452 this repository."""
452 if all:
453 if all:
453 revs = repo.revs(b'all()')
454 revs = repo.revs(b'all()')
454 else:
455 else:
455 revs = [b'.']
456 revs = [b'.']
456
457
457 store = storefactory.openstore(repo)
458 store = storefactory.openstore(repo)
458 return store.verify(revs, contents=contents)
459 return store.verify(revs, contents=contents)
459
460
460
461
461 def cachelfiles(ui, repo, node, filelist=None):
462 def cachelfiles(ui, repo, node, filelist=None):
462 """cachelfiles ensures that all largefiles needed by the specified revision
463 """cachelfiles ensures that all largefiles needed by the specified revision
463 are present in the repository's largefile cache.
464 are present in the repository's largefile cache.
464
465
465 returns a tuple (cached, missing). cached is the list of files downloaded
466 returns a tuple (cached, missing). cached is the list of files downloaded
466 by this operation; missing is the list of files that were needed but could
467 by this operation; missing is the list of files that were needed but could
467 not be found."""
468 not be found."""
468 lfiles = lfutil.listlfiles(repo, node)
469 lfiles = lfutil.listlfiles(repo, node)
469 if filelist:
470 if filelist:
470 lfiles = set(lfiles) & set(filelist)
471 lfiles = set(lfiles) & set(filelist)
471 toget = []
472 toget = []
472
473
473 ctx = repo[node]
474 ctx = repo[node]
474 for lfile in lfiles:
475 for lfile in lfiles:
475 try:
476 try:
476 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
477 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
477 except FileNotFoundError:
478 except FileNotFoundError:
478 continue # node must be None and standin wasn't found in wctx
479 continue # node must be None and standin wasn't found in wctx
479 if not lfutil.findfile(repo, expectedhash):
480 if not lfutil.findfile(repo, expectedhash):
480 toget.append((lfile, expectedhash))
481 toget.append((lfile, expectedhash))
481
482
482 if toget:
483 if toget:
483 store = storefactory.openstore(repo)
484 store = storefactory.openstore(repo)
484 ret = store.get(toget)
485 ret = store.get(toget)
485 return ret
486 return ret
486
487
487 return ([], [])
488 return ([], [])
488
489
489
490
490 def downloadlfiles(ui, repo):
491 def downloadlfiles(ui, repo):
491 tonode = repo.changelog.node
492 tonode = repo.changelog.node
492 totalsuccess = 0
493 totalsuccess = 0
493 totalmissing = 0
494 totalmissing = 0
494 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
495 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
495 success, missing = cachelfiles(ui, repo, tonode(rev))
496 success, missing = cachelfiles(ui, repo, tonode(rev))
496 totalsuccess += len(success)
497 totalsuccess += len(success)
497 totalmissing += len(missing)
498 totalmissing += len(missing)
498 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
499 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
499 if totalmissing > 0:
500 if totalmissing > 0:
500 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
501 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
501 return totalsuccess, totalmissing
502 return totalsuccess, totalmissing
502
503
503
504
504 def updatelfiles(
505 def updatelfiles(
505 ui, repo, filelist=None, printmessage=None, normallookup=False
506 ui, repo, filelist=None, printmessage=None, normallookup=False
506 ):
507 ):
507 """Update largefiles according to standins in the working directory
508 """Update largefiles according to standins in the working directory
508
509
509 If ``printmessage`` is other than ``None``, it means "print (or
510 If ``printmessage`` is other than ``None``, it means "print (or
510 ignore, for false) message forcibly".
511 ignore, for false) message forcibly".
511 """
512 """
512 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
513 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
513 with repo.wlock():
514 with repo.wlock():
514 lfdirstate = lfutil.openlfdirstate(ui, repo)
515 lfdirstate = lfutil.openlfdirstate(ui, repo)
515 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
516 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
516
517
517 if filelist is not None:
518 if filelist is not None:
518 filelist = set(filelist)
519 filelist = set(filelist)
519 lfiles = [f for f in lfiles if f in filelist]
520 lfiles = [f for f in lfiles if f in filelist]
520
521
521 update = {}
522 update = {}
522 dropped = set()
523 dropped = set()
523 updated, removed = 0, 0
524 updated, removed = 0, 0
524 wvfs = repo.wvfs
525 wvfs = repo.wvfs
525 wctx = repo[None]
526 wctx = repo[None]
526 for lfile in lfiles:
527 for lfile in lfiles:
527 lfileorig = os.path.relpath(
528 lfileorig = os.path.relpath(
528 scmutil.backuppath(ui, repo, lfile), start=repo.root
529 scmutil.backuppath(ui, repo, lfile), start=repo.root
529 )
530 )
530 standin = lfutil.standin(lfile)
531 standin = lfutil.standin(lfile)
531 standinorig = os.path.relpath(
532 standinorig = os.path.relpath(
532 scmutil.backuppath(ui, repo, standin), start=repo.root
533 scmutil.backuppath(ui, repo, standin), start=repo.root
533 )
534 )
534 if wvfs.exists(standin):
535 if wvfs.exists(standin):
535 if wvfs.exists(standinorig) and wvfs.exists(lfile):
536 if wvfs.exists(standinorig) and wvfs.exists(lfile):
536 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
537 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
537 wvfs.unlinkpath(standinorig)
538 wvfs.unlinkpath(standinorig)
538 expecthash = lfutil.readasstandin(wctx[standin])
539 expecthash = lfutil.readasstandin(wctx[standin])
539 if expecthash != b'':
540 if expecthash != b'':
540 if lfile not in wctx: # not switched to normal file
541 if lfile not in wctx: # not switched to normal file
541 if repo.dirstate.get_entry(standin).any_tracked:
542 if repo.dirstate.get_entry(standin).any_tracked:
542 wvfs.unlinkpath(lfile, ignoremissing=True)
543 wvfs.unlinkpath(lfile, ignoremissing=True)
543 else:
544 else:
544 dropped.add(lfile)
545 dropped.add(lfile)
545
546
546 # allocate an entry in largefiles dirstate to prevent
547 # allocate an entry in largefiles dirstate to prevent
547 # lfilesrepo.status() from reporting missing files as
548 # lfilesrepo.status() from reporting missing files as
548 # removed.
549 # removed.
549 lfdirstate.hacky_extension_update_file(
550 lfdirstate.hacky_extension_update_file(
550 lfile,
551 lfile,
551 p1_tracked=True,
552 p1_tracked=True,
552 wc_tracked=True,
553 wc_tracked=True,
553 possibly_dirty=True,
554 possibly_dirty=True,
554 )
555 )
555 update[lfile] = expecthash
556 update[lfile] = expecthash
556 else:
557 else:
557 # Remove lfiles for which the standin is deleted, unless the
558 # Remove lfiles for which the standin is deleted, unless the
558 # lfile is added to the repository again. This happens when a
559 # lfile is added to the repository again. This happens when a
559 # largefile is converted back to a normal file: the standin
560 # largefile is converted back to a normal file: the standin
560 # disappears, but a new (normal) file appears as the lfile.
561 # disappears, but a new (normal) file appears as the lfile.
561 if (
562 if (
562 wvfs.exists(lfile)
563 wvfs.exists(lfile)
563 and repo.dirstate.normalize(lfile) not in wctx
564 and repo.dirstate.normalize(lfile) not in wctx
564 ):
565 ):
565 wvfs.unlinkpath(lfile)
566 wvfs.unlinkpath(lfile)
566 removed += 1
567 removed += 1
567
568
568 # largefile processing might be slow and be interrupted - be prepared
569 # largefile processing might be slow and be interrupted - be prepared
569 lfdirstate.write(repo.currenttransaction())
570 lfdirstate.write(repo.currenttransaction())
570
571
571 if lfiles:
572 if lfiles:
572 lfiles = [f for f in lfiles if f not in dropped]
573 lfiles = [f for f in lfiles if f not in dropped]
573
574
574 for f in dropped:
575 for f in dropped:
575 repo.wvfs.unlinkpath(lfutil.standin(f))
576 repo.wvfs.unlinkpath(lfutil.standin(f))
576 # This needs to happen for dropped files, otherwise they stay in
577 # This needs to happen for dropped files, otherwise they stay in
577 # the M state.
578 # the M state.
578 lfdirstate._map.reset_state(f)
579 lfdirstate._map.reset_state(f)
579
580
580 statuswriter(_(b'getting changed largefiles\n'))
581 statuswriter(_(b'getting changed largefiles\n'))
581 cachelfiles(ui, repo, None, lfiles)
582 cachelfiles(ui, repo, None, lfiles)
582
583
583 for lfile in lfiles:
584 for lfile in lfiles:
584 update1 = 0
585 update1 = 0
585
586
586 expecthash = update.get(lfile)
587 expecthash = update.get(lfile)
587 if expecthash:
588 if expecthash:
588 if not lfutil.copyfromcache(repo, expecthash, lfile):
589 if not lfutil.copyfromcache(repo, expecthash, lfile):
589 # failed ... but already removed and set to normallookup
590 # failed ... but already removed and set to normallookup
590 continue
591 continue
591 # Synchronize largefile dirstate to the last modified
592 # Synchronize largefile dirstate to the last modified
592 # time of the file
593 # time of the file
593 lfdirstate.hacky_extension_update_file(
594 lfdirstate.hacky_extension_update_file(
594 lfile,
595 lfile,
595 p1_tracked=True,
596 p1_tracked=True,
596 wc_tracked=True,
597 wc_tracked=True,
597 )
598 )
598 update1 = 1
599 update1 = 1
599
600
600 # copy the exec mode of largefile standin from the repository's
601 # copy the exec mode of largefile standin from the repository's
601 # dirstate to its state in the lfdirstate.
602 # dirstate to its state in the lfdirstate.
602 standin = lfutil.standin(lfile)
603 standin = lfutil.standin(lfile)
603 if wvfs.exists(standin):
604 if wvfs.exists(standin):
604 # exec is decided by the users permissions using mask 0o100
605 # exec is decided by the users permissions using mask 0o100
605 standinexec = wvfs.stat(standin).st_mode & 0o100
606 standinexec = wvfs.stat(standin).st_mode & 0o100
606 st = wvfs.stat(lfile)
607 st = wvfs.stat(lfile)
607 mode = st.st_mode
608 mode = st.st_mode
608 if standinexec != mode & 0o100:
609 if standinexec != mode & 0o100:
609 # first remove all X bits, then shift all R bits to X
610 # first remove all X bits, then shift all R bits to X
610 mode &= ~0o111
611 mode &= ~0o111
611 if standinexec:
612 if standinexec:
612 mode |= (mode >> 2) & 0o111 & ~util.umask
613 mode |= (mode >> 2) & 0o111 & ~util.umask
613 wvfs.chmod(lfile, mode)
614 wvfs.chmod(lfile, mode)
614 update1 = 1
615 update1 = 1
615
616
616 updated += update1
617 updated += update1
617
618
618 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
619 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
619
620
620 lfdirstate.write(repo.currenttransaction())
621 lfdirstate.write(repo.currenttransaction())
621 if lfiles:
622 if lfiles:
622 statuswriter(
623 statuswriter(
623 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
624 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
624 )
625 )
625
626
626
627
627 @eh.command(
628 @eh.command(
628 b'lfpull',
629 b'lfpull',
629 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
630 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
630 + cmdutil.remoteopts,
631 + cmdutil.remoteopts,
631 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
632 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
632 )
633 )
633 def lfpull(ui, repo, source=b"default", **opts):
634 def lfpull(ui, repo, source=b"default", **opts):
634 """pull largefiles for the specified revisions from the specified source
635 """pull largefiles for the specified revisions from the specified source
635
636
636 Pull largefiles that are referenced from local changesets but missing
637 Pull largefiles that are referenced from local changesets but missing
637 locally, pulling from a remote repository to the local cache.
638 locally, pulling from a remote repository to the local cache.
638
639
639 If SOURCE is omitted, the 'default' path will be used.
640 If SOURCE is omitted, the 'default' path will be used.
640 See :hg:`help urls` for more information.
641 See :hg:`help urls` for more information.
641
642
642 .. container:: verbose
643 .. container:: verbose
643
644
644 Some examples:
645 Some examples:
645
646
646 - pull largefiles for all branch heads::
647 - pull largefiles for all branch heads::
647
648
648 hg lfpull -r "head() and not closed()"
649 hg lfpull -r "head() and not closed()"
649
650
650 - pull largefiles on the default branch::
651 - pull largefiles on the default branch::
651
652
652 hg lfpull -r "branch(default)"
653 hg lfpull -r "branch(default)"
653 """
654 """
654 repo.lfpullsource = source
655 repo.lfpullsource = source
655
656
656 revs = opts.get('rev', [])
657 revs = opts.get('rev', [])
657 if not revs:
658 if not revs:
658 raise error.Abort(_(b'no revisions specified'))
659 raise error.Abort(_(b'no revisions specified'))
659 revs = logcmdutil.revrange(repo, revs)
660 revs = logcmdutil.revrange(repo, revs)
660
661
661 numcached = 0
662 numcached = 0
662 for rev in revs:
663 for rev in revs:
663 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
664 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
664 (cached, missing) = cachelfiles(ui, repo, rev)
665 (cached, missing) = cachelfiles(ui, repo, rev)
665 numcached += len(cached)
666 numcached += len(cached)
666 ui.status(_(b"%d largefiles cached\n") % numcached)
667 ui.status(_(b"%d largefiles cached\n") % numcached)
667
668
668
669
669 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
670 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
670 def debuglfput(ui, repo, filepath, **kwargs):
671 def debuglfput(ui, repo, filepath, **kwargs):
671 hash = lfutil.hashfile(filepath)
672 hash = lfutil.hashfile(filepath)
672 storefactory.openstore(repo).put(filepath, hash)
673 storefactory.openstore(repo).put(filepath, hash)
673 ui.write(b'%s\n' % hash)
674 ui.write(b'%s\n' % hash)
674 return 0
675 return 0
General Comments 0
You need to be logged in to leave comments. Login now