##// END OF EJS Templates
dirstate: use `dirstate.change_files` to scope the change in `lfconvert`...
marmoute -
r50946:9409f294 default
parent child Browse files
Show More
@@ -1,673 +1,674 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import binascii
11 import binascii
12 import os
12 import os
13 import shutil
13 import shutil
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import (
16 from mercurial.node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 )
19 )
20
20
21 from mercurial import (
21 from mercurial import (
22 cmdutil,
22 cmdutil,
23 context,
23 context,
24 error,
24 error,
25 exthelper,
25 exthelper,
26 hg,
26 hg,
27 lock,
27 lock,
28 logcmdutil,
28 logcmdutil,
29 match as matchmod,
29 match as matchmod,
30 pycompat,
30 pycompat,
31 scmutil,
31 scmutil,
32 util,
32 util,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 from ..convert import (
36 from ..convert import (
37 convcmd,
37 convcmd,
38 filemap,
38 filemap,
39 )
39 )
40
40
41 from . import lfutil, storefactory
41 from . import lfutil, storefactory
42
42
43 release = lock.release
43 release = lock.release
44
44
45 # -- Commands ----------------------------------------------------------
45 # -- Commands ----------------------------------------------------------
46
46
47 eh = exthelper.exthelper()
47 eh = exthelper.exthelper()
48
48
49
49
50 @eh.command(
50 @eh.command(
51 b'lfconvert',
51 b'lfconvert',
52 [
52 [
53 (
53 (
54 b's',
54 b's',
55 b'size',
55 b'size',
56 b'',
56 b'',
57 _(b'minimum size (MB) for files to be converted as largefiles'),
57 _(b'minimum size (MB) for files to be converted as largefiles'),
58 b'SIZE',
58 b'SIZE',
59 ),
59 ),
60 (
60 (
61 b'',
61 b'',
62 b'to-normal',
62 b'to-normal',
63 False,
63 False,
64 _(b'convert from a largefiles repo to a normal repo'),
64 _(b'convert from a largefiles repo to a normal repo'),
65 ),
65 ),
66 ],
66 ],
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 norepo=True,
68 norepo=True,
69 inferrepo=True,
69 inferrepo=True,
70 )
70 )
71 def lfconvert(ui, src, dest, *pats, **opts):
71 def lfconvert(ui, src, dest, *pats, **opts):
72 """convert a normal repository to a largefiles repository
72 """convert a normal repository to a largefiles repository
73
73
74 Convert repository SOURCE to a new repository DEST, identical to
74 Convert repository SOURCE to a new repository DEST, identical to
75 SOURCE except that certain files will be converted as largefiles:
75 SOURCE except that certain files will be converted as largefiles:
76 specifically, any file that matches any PATTERN *or* whose size is
76 specifically, any file that matches any PATTERN *or* whose size is
77 above the minimum size threshold is converted as a largefile. The
77 above the minimum size threshold is converted as a largefile. The
78 size used to determine whether or not to track a file as a
78 size used to determine whether or not to track a file as a
79 largefile is the size of the first version of the file. The
79 largefile is the size of the first version of the file. The
80 minimum size can be specified either with --size or in
80 minimum size can be specified either with --size or in
81 configuration as ``largefiles.size``.
81 configuration as ``largefiles.size``.
82
82
83 After running this command you will need to make sure that
83 After running this command you will need to make sure that
84 largefiles is enabled anywhere you intend to push the new
84 largefiles is enabled anywhere you intend to push the new
85 repository.
85 repository.
86
86
87 Use --to-normal to convert largefiles back to normal files; after
87 Use --to-normal to convert largefiles back to normal files; after
88 this, the DEST repository can be used without largefiles at all."""
88 this, the DEST repository can be used without largefiles at all."""
89
89
90 opts = pycompat.byteskwargs(opts)
90 opts = pycompat.byteskwargs(opts)
91 if opts[b'to_normal']:
91 if opts[b'to_normal']:
92 tolfile = False
92 tolfile = False
93 else:
93 else:
94 tolfile = True
94 tolfile = True
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
96
96
97 if not hg.islocal(src):
97 if not hg.islocal(src):
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 if not hg.islocal(dest):
99 if not hg.islocal(dest):
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101
101
102 rsrc = hg.repository(ui, src)
102 rsrc = hg.repository(ui, src)
103 ui.status(_(b'initializing destination %s\n') % dest)
103 ui.status(_(b'initializing destination %s\n') % dest)
104 rdst = hg.repository(ui, dest, create=True)
104 rdst = hg.repository(ui, dest, create=True)
105
105
106 success = False
106 success = False
107 dstwlock = dstlock = None
107 dstwlock = dstlock = None
108 try:
108 try:
109 # Get a list of all changesets in the source. The easy way to do this
109 # Get a list of all changesets in the source. The easy way to do this
110 # is to simply walk the changelog, using changelog.nodesbetween().
110 # is to simply walk the changelog, using changelog.nodesbetween().
111 # Take a look at mercurial/revlog.py:639 for more details.
111 # Take a look at mercurial/revlog.py:639 for more details.
112 # Use a generator instead of a list to decrease memory usage
112 # Use a generator instead of a list to decrease memory usage
113 ctxs = (
113 ctxs = (
114 rsrc[ctx]
114 rsrc[ctx]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 )
116 )
117 revmap = {rsrc.nullid: rdst.nullid}
117 revmap = {rsrc.nullid: rdst.nullid}
118 if tolfile:
118 if tolfile:
119 # Lock destination to prevent modification while it is converted to.
119 # Lock destination to prevent modification while it is converted to.
120 # Don't need to lock src because we are just reading from its
120 # Don't need to lock src because we are just reading from its
121 # history which can't change.
121 # history which can't change.
122 dstwlock = rdst.wlock()
122 dstwlock = rdst.wlock()
123 dstlock = rdst.lock()
123 dstlock = rdst.lock()
124
124
125 lfiles = set()
125 lfiles = set()
126 normalfiles = set()
126 normalfiles = set()
127 if not pats:
127 if not pats:
128 pats = ui.configlist(lfutil.longname, b'patterns')
128 pats = ui.configlist(lfutil.longname, b'patterns')
129 if pats:
129 if pats:
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 else:
131 else:
132 matcher = None
132 matcher = None
133
133
134 lfiletohash = {}
134 lfiletohash = {}
135 with ui.makeprogress(
135 with ui.makeprogress(
136 _(b'converting revisions'),
136 _(b'converting revisions'),
137 unit=_(b'revisions'),
137 unit=_(b'revisions'),
138 total=rsrc[b'tip'].rev(),
138 total=rsrc[b'tip'].rev(),
139 ) as progress:
139 ) as progress:
140 for ctx in ctxs:
140 for ctx in ctxs:
141 progress.update(ctx.rev())
141 progress.update(ctx.rev())
142 _lfconvert_addchangeset(
142 _lfconvert_addchangeset(
143 rsrc,
143 rsrc,
144 rdst,
144 rdst,
145 ctx,
145 ctx,
146 revmap,
146 revmap,
147 lfiles,
147 lfiles,
148 normalfiles,
148 normalfiles,
149 matcher,
149 matcher,
150 size,
150 size,
151 lfiletohash,
151 lfiletohash,
152 )
152 )
153
153
154 if rdst.wvfs.exists(lfutil.shortname):
154 if rdst.wvfs.exists(lfutil.shortname):
155 rdst.wvfs.rmtree(lfutil.shortname)
155 rdst.wvfs.rmtree(lfutil.shortname)
156
156
157 for f in lfiletohash.keys():
157 for f in lfiletohash.keys():
158 if rdst.wvfs.isfile(f):
158 if rdst.wvfs.isfile(f):
159 rdst.wvfs.unlink(f)
159 rdst.wvfs.unlink(f)
160 try:
160 try:
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 except OSError:
162 except OSError:
163 pass
163 pass
164
164
165 # If there were any files converted to largefiles, add largefiles
165 # If there were any files converted to largefiles, add largefiles
166 # to the destination repository's requirements.
166 # to the destination repository's requirements.
167 if lfiles:
167 if lfiles:
168 rdst.requirements.add(b'largefiles')
168 rdst.requirements.add(b'largefiles')
169 scmutil.writereporequirements(rdst)
169 scmutil.writereporequirements(rdst)
170 else:
170 else:
171
171
172 class lfsource(filemap.filemap_source):
172 class lfsource(filemap.filemap_source):
173 def __init__(self, ui, source):
173 def __init__(self, ui, source):
174 super(lfsource, self).__init__(ui, source, None)
174 super(lfsource, self).__init__(ui, source, None)
175 self.filemapper.rename[lfutil.shortname] = b'.'
175 self.filemapper.rename[lfutil.shortname] = b'.'
176
176
177 def getfile(self, name, rev):
177 def getfile(self, name, rev):
178 realname, realrev = rev
178 realname, realrev = rev
179 f = super(lfsource, self).getfile(name, rev)
179 f = super(lfsource, self).getfile(name, rev)
180
180
181 if (
181 if (
182 not realname.startswith(lfutil.shortnameslash)
182 not realname.startswith(lfutil.shortnameslash)
183 or f[0] is None
183 or f[0] is None
184 ):
184 ):
185 return f
185 return f
186
186
187 # Substitute in the largefile data for the hash
187 # Substitute in the largefile data for the hash
188 hash = f[0].strip()
188 hash = f[0].strip()
189 path = lfutil.findfile(rsrc, hash)
189 path = lfutil.findfile(rsrc, hash)
190
190
191 if path is None:
191 if path is None:
192 raise error.Abort(
192 raise error.Abort(
193 _(b"missing largefile for '%s' in %s")
193 _(b"missing largefile for '%s' in %s")
194 % (realname, realrev)
194 % (realname, realrev)
195 )
195 )
196 return util.readfile(path), f[1]
196 return util.readfile(path), f[1]
197
197
198 class converter(convcmd.converter):
198 class converter(convcmd.converter):
199 def __init__(self, ui, source, dest, revmapfile, opts):
199 def __init__(self, ui, source, dest, revmapfile, opts):
200 src = lfsource(ui, source)
200 src = lfsource(ui, source)
201
201
202 super(converter, self).__init__(
202 super(converter, self).__init__(
203 ui, src, dest, revmapfile, opts
203 ui, src, dest, revmapfile, opts
204 )
204 )
205
205
206 found, missing = downloadlfiles(ui, rsrc)
206 found, missing = downloadlfiles(ui, rsrc)
207 if missing != 0:
207 if missing != 0:
208 raise error.Abort(_(b"all largefiles must be present locally"))
208 raise error.Abort(_(b"all largefiles must be present locally"))
209
209
210 orig = convcmd.converter
210 orig = convcmd.converter
211 convcmd.converter = converter
211 convcmd.converter = converter
212
212
213 try:
213 try:
214 convcmd.convert(
214 convcmd.convert(
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 )
216 )
217 finally:
217 finally:
218 convcmd.converter = orig
218 convcmd.converter = orig
219 success = True
219 success = True
220 finally:
220 finally:
221 if tolfile:
221 if tolfile:
222 rdst.dirstate.clear()
222 with rdst.dirstate.changing_files(rdst):
223 rdst.dirstate.clear()
223 release(dstlock, dstwlock)
224 release(dstlock, dstwlock)
224 if not success:
225 if not success:
225 # we failed, remove the new directory
226 # we failed, remove the new directory
226 shutil.rmtree(rdst.root)
227 shutil.rmtree(rdst.root)
227
228
228
229
229 def _lfconvert_addchangeset(
230 def _lfconvert_addchangeset(
230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
231 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
231 ):
232 ):
232 # Convert src parents to dst parents
233 # Convert src parents to dst parents
233 parents = _convertparents(ctx, revmap)
234 parents = _convertparents(ctx, revmap)
234
235
235 # Generate list of changed files
236 # Generate list of changed files
236 files = _getchangedfiles(ctx, parents)
237 files = _getchangedfiles(ctx, parents)
237
238
238 dstfiles = []
239 dstfiles = []
239 for f in files:
240 for f in files:
240 if f not in lfiles and f not in normalfiles:
241 if f not in lfiles and f not in normalfiles:
241 islfile = _islfile(f, ctx, matcher, size)
242 islfile = _islfile(f, ctx, matcher, size)
242 # If this file was renamed or copied then copy
243 # If this file was renamed or copied then copy
243 # the largefile-ness of its predecessor
244 # the largefile-ness of its predecessor
244 if f in ctx.manifest():
245 if f in ctx.manifest():
245 fctx = ctx.filectx(f)
246 fctx = ctx.filectx(f)
246 renamed = fctx.copysource()
247 renamed = fctx.copysource()
247 if renamed is None:
248 if renamed is None:
248 # the code below assumes renamed to be a boolean or a list
249 # the code below assumes renamed to be a boolean or a list
249 # and won't quite work with the value None
250 # and won't quite work with the value None
250 renamed = False
251 renamed = False
251 renamedlfile = renamed and renamed in lfiles
252 renamedlfile = renamed and renamed in lfiles
252 islfile |= renamedlfile
253 islfile |= renamedlfile
253 if b'l' in fctx.flags():
254 if b'l' in fctx.flags():
254 if renamedlfile:
255 if renamedlfile:
255 raise error.Abort(
256 raise error.Abort(
256 _(b'renamed/copied largefile %s becomes symlink')
257 _(b'renamed/copied largefile %s becomes symlink')
257 % f
258 % f
258 )
259 )
259 islfile = False
260 islfile = False
260 if islfile:
261 if islfile:
261 lfiles.add(f)
262 lfiles.add(f)
262 else:
263 else:
263 normalfiles.add(f)
264 normalfiles.add(f)
264
265
265 if f in lfiles:
266 if f in lfiles:
266 fstandin = lfutil.standin(f)
267 fstandin = lfutil.standin(f)
267 dstfiles.append(fstandin)
268 dstfiles.append(fstandin)
268 # largefile in manifest if it has not been removed/renamed
269 # largefile in manifest if it has not been removed/renamed
269 if f in ctx.manifest():
270 if f in ctx.manifest():
270 fctx = ctx.filectx(f)
271 fctx = ctx.filectx(f)
271 if b'l' in fctx.flags():
272 if b'l' in fctx.flags():
272 renamed = fctx.copysource()
273 renamed = fctx.copysource()
273 if renamed and renamed in lfiles:
274 if renamed and renamed in lfiles:
274 raise error.Abort(
275 raise error.Abort(
275 _(b'largefile %s becomes symlink') % f
276 _(b'largefile %s becomes symlink') % f
276 )
277 )
277
278
278 # largefile was modified, update standins
279 # largefile was modified, update standins
279 m = hashutil.sha1(b'')
280 m = hashutil.sha1(b'')
280 m.update(ctx[f].data())
281 m.update(ctx[f].data())
281 hash = hex(m.digest())
282 hash = hex(m.digest())
282 if f not in lfiletohash or lfiletohash[f] != hash:
283 if f not in lfiletohash or lfiletohash[f] != hash:
283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
284 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
284 executable = b'x' in ctx[f].flags()
285 executable = b'x' in ctx[f].flags()
285 lfutil.writestandin(rdst, fstandin, hash, executable)
286 lfutil.writestandin(rdst, fstandin, hash, executable)
286 lfiletohash[f] = hash
287 lfiletohash[f] = hash
287 else:
288 else:
288 # normal file
289 # normal file
289 dstfiles.append(f)
290 dstfiles.append(f)
290
291
291 def getfilectx(repo, memctx, f):
292 def getfilectx(repo, memctx, f):
292 srcfname = lfutil.splitstandin(f)
293 srcfname = lfutil.splitstandin(f)
293 if srcfname is not None:
294 if srcfname is not None:
294 # if the file isn't in the manifest then it was removed
295 # if the file isn't in the manifest then it was removed
295 # or renamed, return None to indicate this
296 # or renamed, return None to indicate this
296 try:
297 try:
297 fctx = ctx.filectx(srcfname)
298 fctx = ctx.filectx(srcfname)
298 except error.LookupError:
299 except error.LookupError:
299 return None
300 return None
300 renamed = fctx.copysource()
301 renamed = fctx.copysource()
301 if renamed:
302 if renamed:
302 # standin is always a largefile because largefile-ness
303 # standin is always a largefile because largefile-ness
303 # doesn't change after rename or copy
304 # doesn't change after rename or copy
304 renamed = lfutil.standin(renamed)
305 renamed = lfutil.standin(renamed)
305
306
306 return context.memfilectx(
307 return context.memfilectx(
307 repo,
308 repo,
308 memctx,
309 memctx,
309 f,
310 f,
310 lfiletohash[srcfname] + b'\n',
311 lfiletohash[srcfname] + b'\n',
311 b'l' in fctx.flags(),
312 b'l' in fctx.flags(),
312 b'x' in fctx.flags(),
313 b'x' in fctx.flags(),
313 renamed,
314 renamed,
314 )
315 )
315 else:
316 else:
316 return _getnormalcontext(repo, ctx, f, revmap)
317 return _getnormalcontext(repo, ctx, f, revmap)
317
318
318 # Commit
319 # Commit
319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
320 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
320
321
321
322
322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
323 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
323 mctx = context.memctx(
324 mctx = context.memctx(
324 rdst,
325 rdst,
325 parents,
326 parents,
326 ctx.description(),
327 ctx.description(),
327 dstfiles,
328 dstfiles,
328 getfilectx,
329 getfilectx,
329 ctx.user(),
330 ctx.user(),
330 ctx.date(),
331 ctx.date(),
331 ctx.extra(),
332 ctx.extra(),
332 )
333 )
333 ret = rdst.commitctx(mctx)
334 ret = rdst.commitctx(mctx)
334 lfutil.copyalltostore(rdst, ret)
335 lfutil.copyalltostore(rdst, ret)
335 rdst.setparents(ret)
336 rdst.setparents(ret)
336 revmap[ctx.node()] = rdst.changelog.tip()
337 revmap[ctx.node()] = rdst.changelog.tip()
337
338
338
339
339 # Generate list of changed files
340 # Generate list of changed files
340 def _getchangedfiles(ctx, parents):
341 def _getchangedfiles(ctx, parents):
341 files = set(ctx.files())
342 files = set(ctx.files())
342 if ctx.repo().nullid not in parents:
343 if ctx.repo().nullid not in parents:
343 mc = ctx.manifest()
344 mc = ctx.manifest()
344 for pctx in ctx.parents():
345 for pctx in ctx.parents():
345 for fn in pctx.manifest().diff(mc):
346 for fn in pctx.manifest().diff(mc):
346 files.add(fn)
347 files.add(fn)
347 return files
348 return files
348
349
349
350
350 # Convert src parents to dst parents
351 # Convert src parents to dst parents
351 def _convertparents(ctx, revmap):
352 def _convertparents(ctx, revmap):
352 parents = []
353 parents = []
353 for p in ctx.parents():
354 for p in ctx.parents():
354 parents.append(revmap[p.node()])
355 parents.append(revmap[p.node()])
355 while len(parents) < 2:
356 while len(parents) < 2:
356 parents.append(ctx.repo().nullid)
357 parents.append(ctx.repo().nullid)
357 return parents
358 return parents
358
359
359
360
360 # Get memfilectx for a normal file
361 # Get memfilectx for a normal file
361 def _getnormalcontext(repo, ctx, f, revmap):
362 def _getnormalcontext(repo, ctx, f, revmap):
362 try:
363 try:
363 fctx = ctx.filectx(f)
364 fctx = ctx.filectx(f)
364 except error.LookupError:
365 except error.LookupError:
365 return None
366 return None
366 renamed = fctx.copysource()
367 renamed = fctx.copysource()
367
368
368 data = fctx.data()
369 data = fctx.data()
369 if f == b'.hgtags':
370 if f == b'.hgtags':
370 data = _converttags(repo.ui, revmap, data)
371 data = _converttags(repo.ui, revmap, data)
371 return context.memfilectx(
372 return context.memfilectx(
372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
373 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
373 )
374 )
374
375
375
376
376 # Remap tag data using a revision map
377 # Remap tag data using a revision map
377 def _converttags(ui, revmap, data):
378 def _converttags(ui, revmap, data):
378 newdata = []
379 newdata = []
379 for line in data.splitlines():
380 for line in data.splitlines():
380 try:
381 try:
381 id, name = line.split(b' ', 1)
382 id, name = line.split(b' ', 1)
382 except ValueError:
383 except ValueError:
383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
384 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
384 continue
385 continue
385 try:
386 try:
386 newid = bin(id)
387 newid = bin(id)
387 except binascii.Error:
388 except binascii.Error:
388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
389 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
389 continue
390 continue
390 try:
391 try:
391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
392 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
392 except KeyError:
393 except KeyError:
393 ui.warn(_(b'no mapping for id %s\n') % id)
394 ui.warn(_(b'no mapping for id %s\n') % id)
394 continue
395 continue
395 return b''.join(newdata)
396 return b''.join(newdata)
396
397
397
398
398 def _islfile(file, ctx, matcher, size):
399 def _islfile(file, ctx, matcher, size):
399 """Return true if file should be considered a largefile, i.e.
400 """Return true if file should be considered a largefile, i.e.
400 matcher matches it or it is larger than size."""
401 matcher matches it or it is larger than size."""
401 # never store special .hg* files as largefiles
402 # never store special .hg* files as largefiles
402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
403 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
403 return False
404 return False
404 if matcher and matcher(file):
405 if matcher and matcher(file):
405 return True
406 return True
406 try:
407 try:
407 return ctx.filectx(file).size() >= size * 1024 * 1024
408 return ctx.filectx(file).size() >= size * 1024 * 1024
408 except error.LookupError:
409 except error.LookupError:
409 return False
410 return False
410
411
411
412
412 def uploadlfiles(ui, rsrc, rdst, files):
413 def uploadlfiles(ui, rsrc, rdst, files):
413 '''upload largefiles to the central store'''
414 '''upload largefiles to the central store'''
414
415
415 if not files:
416 if not files:
416 return
417 return
417
418
418 store = storefactory.openstore(rsrc, rdst, put=True)
419 store = storefactory.openstore(rsrc, rdst, put=True)
419
420
420 at = 0
421 at = 0
421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
422 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
422 retval = store.exists(files)
423 retval = store.exists(files)
423 files = [h for h in files if not retval[h]]
424 files = [h for h in files if not retval[h]]
424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
425 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
425
426
426 with ui.makeprogress(
427 with ui.makeprogress(
427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
428 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
428 ) as progress:
429 ) as progress:
429 for hash in files:
430 for hash in files:
430 progress.update(at)
431 progress.update(at)
431 source = lfutil.findfile(rsrc, hash)
432 source = lfutil.findfile(rsrc, hash)
432 if not source:
433 if not source:
433 raise error.Abort(
434 raise error.Abort(
434 _(
435 _(
435 b'largefile %s missing from store'
436 b'largefile %s missing from store'
436 b' (needs to be uploaded)'
437 b' (needs to be uploaded)'
437 )
438 )
438 % hash
439 % hash
439 )
440 )
440 # XXX check for errors here
441 # XXX check for errors here
441 store.put(source, hash)
442 store.put(source, hash)
442 at += 1
443 at += 1
443
444
444
445
445 def verifylfiles(ui, repo, all=False, contents=False):
446 def verifylfiles(ui, repo, all=False, contents=False):
446 """Verify that every largefile revision in the current changeset
447 """Verify that every largefile revision in the current changeset
447 exists in the central store. With --contents, also verify that
448 exists in the central store. With --contents, also verify that
448 the contents of each local largefile file revision are correct (SHA-1 hash
449 the contents of each local largefile file revision are correct (SHA-1 hash
449 matches the revision ID). With --all, check every changeset in
450 matches the revision ID). With --all, check every changeset in
450 this repository."""
451 this repository."""
451 if all:
452 if all:
452 revs = repo.revs(b'all()')
453 revs = repo.revs(b'all()')
453 else:
454 else:
454 revs = [b'.']
455 revs = [b'.']
455
456
456 store = storefactory.openstore(repo)
457 store = storefactory.openstore(repo)
457 return store.verify(revs, contents=contents)
458 return store.verify(revs, contents=contents)
458
459
459
460
460 def cachelfiles(ui, repo, node, filelist=None):
461 def cachelfiles(ui, repo, node, filelist=None):
461 """cachelfiles ensures that all largefiles needed by the specified revision
462 """cachelfiles ensures that all largefiles needed by the specified revision
462 are present in the repository's largefile cache.
463 are present in the repository's largefile cache.
463
464
464 returns a tuple (cached, missing). cached is the list of files downloaded
465 returns a tuple (cached, missing). cached is the list of files downloaded
465 by this operation; missing is the list of files that were needed but could
466 by this operation; missing is the list of files that were needed but could
466 not be found."""
467 not be found."""
467 lfiles = lfutil.listlfiles(repo, node)
468 lfiles = lfutil.listlfiles(repo, node)
468 if filelist:
469 if filelist:
469 lfiles = set(lfiles) & set(filelist)
470 lfiles = set(lfiles) & set(filelist)
470 toget = []
471 toget = []
471
472
472 ctx = repo[node]
473 ctx = repo[node]
473 for lfile in lfiles:
474 for lfile in lfiles:
474 try:
475 try:
475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
476 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
476 except FileNotFoundError:
477 except FileNotFoundError:
477 continue # node must be None and standin wasn't found in wctx
478 continue # node must be None and standin wasn't found in wctx
478 if not lfutil.findfile(repo, expectedhash):
479 if not lfutil.findfile(repo, expectedhash):
479 toget.append((lfile, expectedhash))
480 toget.append((lfile, expectedhash))
480
481
481 if toget:
482 if toget:
482 store = storefactory.openstore(repo)
483 store = storefactory.openstore(repo)
483 ret = store.get(toget)
484 ret = store.get(toget)
484 return ret
485 return ret
485
486
486 return ([], [])
487 return ([], [])
487
488
488
489
489 def downloadlfiles(ui, repo):
490 def downloadlfiles(ui, repo):
490 tonode = repo.changelog.node
491 tonode = repo.changelog.node
491 totalsuccess = 0
492 totalsuccess = 0
492 totalmissing = 0
493 totalmissing = 0
493 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
494 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
494 success, missing = cachelfiles(ui, repo, tonode(rev))
495 success, missing = cachelfiles(ui, repo, tonode(rev))
495 totalsuccess += len(success)
496 totalsuccess += len(success)
496 totalmissing += len(missing)
497 totalmissing += len(missing)
497 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
498 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
498 if totalmissing > 0:
499 if totalmissing > 0:
499 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
500 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
500 return totalsuccess, totalmissing
501 return totalsuccess, totalmissing
501
502
502
503
503 def updatelfiles(
504 def updatelfiles(
504 ui, repo, filelist=None, printmessage=None, normallookup=False
505 ui, repo, filelist=None, printmessage=None, normallookup=False
505 ):
506 ):
506 """Update largefiles according to standins in the working directory
507 """Update largefiles according to standins in the working directory
507
508
508 If ``printmessage`` is other than ``None``, it means "print (or
509 If ``printmessage`` is other than ``None``, it means "print (or
509 ignore, for false) message forcibly".
510 ignore, for false) message forcibly".
510 """
511 """
511 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
512 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
512 with repo.wlock():
513 with repo.wlock():
513 lfdirstate = lfutil.openlfdirstate(ui, repo)
514 lfdirstate = lfutil.openlfdirstate(ui, repo)
514 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
515 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
515
516
516 if filelist is not None:
517 if filelist is not None:
517 filelist = set(filelist)
518 filelist = set(filelist)
518 lfiles = [f for f in lfiles if f in filelist]
519 lfiles = [f for f in lfiles if f in filelist]
519
520
520 update = {}
521 update = {}
521 dropped = set()
522 dropped = set()
522 updated, removed = 0, 0
523 updated, removed = 0, 0
523 wvfs = repo.wvfs
524 wvfs = repo.wvfs
524 wctx = repo[None]
525 wctx = repo[None]
525 for lfile in lfiles:
526 for lfile in lfiles:
526 lfileorig = os.path.relpath(
527 lfileorig = os.path.relpath(
527 scmutil.backuppath(ui, repo, lfile), start=repo.root
528 scmutil.backuppath(ui, repo, lfile), start=repo.root
528 )
529 )
529 standin = lfutil.standin(lfile)
530 standin = lfutil.standin(lfile)
530 standinorig = os.path.relpath(
531 standinorig = os.path.relpath(
531 scmutil.backuppath(ui, repo, standin), start=repo.root
532 scmutil.backuppath(ui, repo, standin), start=repo.root
532 )
533 )
533 if wvfs.exists(standin):
534 if wvfs.exists(standin):
534 if wvfs.exists(standinorig) and wvfs.exists(lfile):
535 if wvfs.exists(standinorig) and wvfs.exists(lfile):
535 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
536 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
536 wvfs.unlinkpath(standinorig)
537 wvfs.unlinkpath(standinorig)
537 expecthash = lfutil.readasstandin(wctx[standin])
538 expecthash = lfutil.readasstandin(wctx[standin])
538 if expecthash != b'':
539 if expecthash != b'':
539 if lfile not in wctx: # not switched to normal file
540 if lfile not in wctx: # not switched to normal file
540 if repo.dirstate.get_entry(standin).any_tracked:
541 if repo.dirstate.get_entry(standin).any_tracked:
541 wvfs.unlinkpath(lfile, ignoremissing=True)
542 wvfs.unlinkpath(lfile, ignoremissing=True)
542 else:
543 else:
543 dropped.add(lfile)
544 dropped.add(lfile)
544
545
545 # allocate an entry in largefiles dirstate to prevent
546 # allocate an entry in largefiles dirstate to prevent
546 # lfilesrepo.status() from reporting missing files as
547 # lfilesrepo.status() from reporting missing files as
547 # removed.
548 # removed.
548 lfdirstate.hacky_extension_update_file(
549 lfdirstate.hacky_extension_update_file(
549 lfile,
550 lfile,
550 p1_tracked=True,
551 p1_tracked=True,
551 wc_tracked=True,
552 wc_tracked=True,
552 possibly_dirty=True,
553 possibly_dirty=True,
553 )
554 )
554 update[lfile] = expecthash
555 update[lfile] = expecthash
555 else:
556 else:
556 # Remove lfiles for which the standin is deleted, unless the
557 # Remove lfiles for which the standin is deleted, unless the
557 # lfile is added to the repository again. This happens when a
558 # lfile is added to the repository again. This happens when a
558 # largefile is converted back to a normal file: the standin
559 # largefile is converted back to a normal file: the standin
559 # disappears, but a new (normal) file appears as the lfile.
560 # disappears, but a new (normal) file appears as the lfile.
560 if (
561 if (
561 wvfs.exists(lfile)
562 wvfs.exists(lfile)
562 and repo.dirstate.normalize(lfile) not in wctx
563 and repo.dirstate.normalize(lfile) not in wctx
563 ):
564 ):
564 wvfs.unlinkpath(lfile)
565 wvfs.unlinkpath(lfile)
565 removed += 1
566 removed += 1
566
567
567 # largefile processing might be slow and be interrupted - be prepared
568 # largefile processing might be slow and be interrupted - be prepared
568 lfdirstate.write(repo.currenttransaction())
569 lfdirstate.write(repo.currenttransaction())
569
570
570 if lfiles:
571 if lfiles:
571 lfiles = [f for f in lfiles if f not in dropped]
572 lfiles = [f for f in lfiles if f not in dropped]
572
573
573 for f in dropped:
574 for f in dropped:
574 repo.wvfs.unlinkpath(lfutil.standin(f))
575 repo.wvfs.unlinkpath(lfutil.standin(f))
575 # This needs to happen for dropped files, otherwise they stay in
576 # This needs to happen for dropped files, otherwise they stay in
576 # the M state.
577 # the M state.
577 lfdirstate._map.reset_state(f)
578 lfdirstate._map.reset_state(f)
578
579
579 statuswriter(_(b'getting changed largefiles\n'))
580 statuswriter(_(b'getting changed largefiles\n'))
580 cachelfiles(ui, repo, None, lfiles)
581 cachelfiles(ui, repo, None, lfiles)
581
582
582 for lfile in lfiles:
583 for lfile in lfiles:
583 update1 = 0
584 update1 = 0
584
585
585 expecthash = update.get(lfile)
586 expecthash = update.get(lfile)
586 if expecthash:
587 if expecthash:
587 if not lfutil.copyfromcache(repo, expecthash, lfile):
588 if not lfutil.copyfromcache(repo, expecthash, lfile):
588 # failed ... but already removed and set to normallookup
589 # failed ... but already removed and set to normallookup
589 continue
590 continue
590 # Synchronize largefile dirstate to the last modified
591 # Synchronize largefile dirstate to the last modified
591 # time of the file
592 # time of the file
592 lfdirstate.hacky_extension_update_file(
593 lfdirstate.hacky_extension_update_file(
593 lfile,
594 lfile,
594 p1_tracked=True,
595 p1_tracked=True,
595 wc_tracked=True,
596 wc_tracked=True,
596 )
597 )
597 update1 = 1
598 update1 = 1
598
599
599 # copy the exec mode of largefile standin from the repository's
600 # copy the exec mode of largefile standin from the repository's
600 # dirstate to its state in the lfdirstate.
601 # dirstate to its state in the lfdirstate.
601 standin = lfutil.standin(lfile)
602 standin = lfutil.standin(lfile)
602 if wvfs.exists(standin):
603 if wvfs.exists(standin):
603 # exec is decided by the users permissions using mask 0o100
604 # exec is decided by the users permissions using mask 0o100
604 standinexec = wvfs.stat(standin).st_mode & 0o100
605 standinexec = wvfs.stat(standin).st_mode & 0o100
605 st = wvfs.stat(lfile)
606 st = wvfs.stat(lfile)
606 mode = st.st_mode
607 mode = st.st_mode
607 if standinexec != mode & 0o100:
608 if standinexec != mode & 0o100:
608 # first remove all X bits, then shift all R bits to X
609 # first remove all X bits, then shift all R bits to X
609 mode &= ~0o111
610 mode &= ~0o111
610 if standinexec:
611 if standinexec:
611 mode |= (mode >> 2) & 0o111 & ~util.umask
612 mode |= (mode >> 2) & 0o111 & ~util.umask
612 wvfs.chmod(lfile, mode)
613 wvfs.chmod(lfile, mode)
613 update1 = 1
614 update1 = 1
614
615
615 updated += update1
616 updated += update1
616
617
617 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
618 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
618
619
619 lfdirstate.write(repo.currenttransaction())
620 lfdirstate.write(repo.currenttransaction())
620 if lfiles:
621 if lfiles:
621 statuswriter(
622 statuswriter(
622 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
623 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
623 )
624 )
624
625
625
626
626 @eh.command(
627 @eh.command(
627 b'lfpull',
628 b'lfpull',
628 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
629 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
629 + cmdutil.remoteopts,
630 + cmdutil.remoteopts,
630 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
631 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
631 )
632 )
632 def lfpull(ui, repo, source=b"default", **opts):
633 def lfpull(ui, repo, source=b"default", **opts):
633 """pull largefiles for the specified revisions from the specified source
634 """pull largefiles for the specified revisions from the specified source
634
635
635 Pull largefiles that are referenced from local changesets but missing
636 Pull largefiles that are referenced from local changesets but missing
636 locally, pulling from a remote repository to the local cache.
637 locally, pulling from a remote repository to the local cache.
637
638
638 If SOURCE is omitted, the 'default' path will be used.
639 If SOURCE is omitted, the 'default' path will be used.
639 See :hg:`help urls` for more information.
640 See :hg:`help urls` for more information.
640
641
641 .. container:: verbose
642 .. container:: verbose
642
643
643 Some examples:
644 Some examples:
644
645
645 - pull largefiles for all branch heads::
646 - pull largefiles for all branch heads::
646
647
647 hg lfpull -r "head() and not closed()"
648 hg lfpull -r "head() and not closed()"
648
649
649 - pull largefiles on the default branch::
650 - pull largefiles on the default branch::
650
651
651 hg lfpull -r "branch(default)"
652 hg lfpull -r "branch(default)"
652 """
653 """
653 repo.lfpullsource = source
654 repo.lfpullsource = source
654
655
655 revs = opts.get('rev', [])
656 revs = opts.get('rev', [])
656 if not revs:
657 if not revs:
657 raise error.Abort(_(b'no revisions specified'))
658 raise error.Abort(_(b'no revisions specified'))
658 revs = logcmdutil.revrange(repo, revs)
659 revs = logcmdutil.revrange(repo, revs)
659
660
660 numcached = 0
661 numcached = 0
661 for rev in revs:
662 for rev in revs:
662 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
663 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
663 (cached, missing) = cachelfiles(ui, repo, rev)
664 (cached, missing) = cachelfiles(ui, repo, rev)
664 numcached += len(cached)
665 numcached += len(cached)
665 ui.status(_(b"%d largefiles cached\n") % numcached)
666 ui.status(_(b"%d largefiles cached\n") % numcached)
666
667
667
668
668 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
669 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
669 def debuglfput(ui, repo, filepath, **kwargs):
670 def debuglfput(ui, repo, filepath, **kwargs):
670 hash = lfutil.hashfile(filepath)
671 hash = lfutil.hashfile(filepath)
671 storefactory.openstore(repo).put(filepath, hash)
672 storefactory.openstore(repo).put(filepath, hash)
672 ui.write(b'%s\n' % hash)
673 ui.write(b'%s\n' % hash)
673 return 0
674 return 0
General Comments 0
You need to be logged in to leave comments. Login now