##// END OF EJS Templates
largefiles: remove the first `changing_parents` in `updatelfiles`...
marmoute -
r50913:ef1540c5 default
parent child Browse files
Show More
@@ -1,675 +1,674 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import binascii
11 import binascii
12 import os
12 import os
13 import shutil
13 import shutil
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import (
16 from mercurial.node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 )
19 )
20
20
21 from mercurial import (
21 from mercurial import (
22 cmdutil,
22 cmdutil,
23 context,
23 context,
24 error,
24 error,
25 exthelper,
25 exthelper,
26 hg,
26 hg,
27 lock,
27 lock,
28 logcmdutil,
28 logcmdutil,
29 match as matchmod,
29 match as matchmod,
30 pycompat,
30 pycompat,
31 scmutil,
31 scmutil,
32 util,
32 util,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 from ..convert import (
36 from ..convert import (
37 convcmd,
37 convcmd,
38 filemap,
38 filemap,
39 )
39 )
40
40
41 from . import lfutil, storefactory
41 from . import lfutil, storefactory
42
42
43 release = lock.release
43 release = lock.release
44
44
45 # -- Commands ----------------------------------------------------------
45 # -- Commands ----------------------------------------------------------
46
46
47 eh = exthelper.exthelper()
47 eh = exthelper.exthelper()
48
48
49
49
50 @eh.command(
50 @eh.command(
51 b'lfconvert',
51 b'lfconvert',
52 [
52 [
53 (
53 (
54 b's',
54 b's',
55 b'size',
55 b'size',
56 b'',
56 b'',
57 _(b'minimum size (MB) for files to be converted as largefiles'),
57 _(b'minimum size (MB) for files to be converted as largefiles'),
58 b'SIZE',
58 b'SIZE',
59 ),
59 ),
60 (
60 (
61 b'',
61 b'',
62 b'to-normal',
62 b'to-normal',
63 False,
63 False,
64 _(b'convert from a largefiles repo to a normal repo'),
64 _(b'convert from a largefiles repo to a normal repo'),
65 ),
65 ),
66 ],
66 ],
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 norepo=True,
68 norepo=True,
69 inferrepo=True,
69 inferrepo=True,
70 )
70 )
71 def lfconvert(ui, src, dest, *pats, **opts):
71 def lfconvert(ui, src, dest, *pats, **opts):
72 """convert a normal repository to a largefiles repository
72 """convert a normal repository to a largefiles repository
73
73
74 Convert repository SOURCE to a new repository DEST, identical to
74 Convert repository SOURCE to a new repository DEST, identical to
75 SOURCE except that certain files will be converted as largefiles:
75 SOURCE except that certain files will be converted as largefiles:
76 specifically, any file that matches any PATTERN *or* whose size is
76 specifically, any file that matches any PATTERN *or* whose size is
77 above the minimum size threshold is converted as a largefile. The
77 above the minimum size threshold is converted as a largefile. The
78 size used to determine whether or not to track a file as a
78 size used to determine whether or not to track a file as a
79 largefile is the size of the first version of the file. The
79 largefile is the size of the first version of the file. The
80 minimum size can be specified either with --size or in
80 minimum size can be specified either with --size or in
81 configuration as ``largefiles.size``.
81 configuration as ``largefiles.size``.
82
82
83 After running this command you will need to make sure that
83 After running this command you will need to make sure that
84 largefiles is enabled anywhere you intend to push the new
84 largefiles is enabled anywhere you intend to push the new
85 repository.
85 repository.
86
86
87 Use --to-normal to convert largefiles back to normal files; after
87 Use --to-normal to convert largefiles back to normal files; after
88 this, the DEST repository can be used without largefiles at all."""
88 this, the DEST repository can be used without largefiles at all."""
89
89
90 opts = pycompat.byteskwargs(opts)
90 opts = pycompat.byteskwargs(opts)
91 if opts[b'to_normal']:
91 if opts[b'to_normal']:
92 tolfile = False
92 tolfile = False
93 else:
93 else:
94 tolfile = True
94 tolfile = True
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
96
96
97 if not hg.islocal(src):
97 if not hg.islocal(src):
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 if not hg.islocal(dest):
99 if not hg.islocal(dest):
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101
101
102 rsrc = hg.repository(ui, src)
102 rsrc = hg.repository(ui, src)
103 ui.status(_(b'initializing destination %s\n') % dest)
103 ui.status(_(b'initializing destination %s\n') % dest)
104 rdst = hg.repository(ui, dest, create=True)
104 rdst = hg.repository(ui, dest, create=True)
105
105
106 success = False
106 success = False
107 dstwlock = dstlock = None
107 dstwlock = dstlock = None
108 try:
108 try:
109 # Get a list of all changesets in the source. The easy way to do this
109 # Get a list of all changesets in the source. The easy way to do this
110 # is to simply walk the changelog, using changelog.nodesbetween().
110 # is to simply walk the changelog, using changelog.nodesbetween().
111 # Take a look at mercurial/revlog.py:639 for more details.
111 # Take a look at mercurial/revlog.py:639 for more details.
112 # Use a generator instead of a list to decrease memory usage
112 # Use a generator instead of a list to decrease memory usage
113 ctxs = (
113 ctxs = (
114 rsrc[ctx]
114 rsrc[ctx]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 )
116 )
117 revmap = {rsrc.nullid: rdst.nullid}
117 revmap = {rsrc.nullid: rdst.nullid}
118 if tolfile:
118 if tolfile:
119 # Lock destination to prevent modification while it is converted to.
119 # Lock destination to prevent modification while it is converted to.
120 # Don't need to lock src because we are just reading from its
120 # Don't need to lock src because we are just reading from its
121 # history which can't change.
121 # history which can't change.
122 dstwlock = rdst.wlock()
122 dstwlock = rdst.wlock()
123 dstlock = rdst.lock()
123 dstlock = rdst.lock()
124
124
125 lfiles = set()
125 lfiles = set()
126 normalfiles = set()
126 normalfiles = set()
127 if not pats:
127 if not pats:
128 pats = ui.configlist(lfutil.longname, b'patterns')
128 pats = ui.configlist(lfutil.longname, b'patterns')
129 if pats:
129 if pats:
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 else:
131 else:
132 matcher = None
132 matcher = None
133
133
134 lfiletohash = {}
134 lfiletohash = {}
135 with ui.makeprogress(
135 with ui.makeprogress(
136 _(b'converting revisions'),
136 _(b'converting revisions'),
137 unit=_(b'revisions'),
137 unit=_(b'revisions'),
138 total=rsrc[b'tip'].rev(),
138 total=rsrc[b'tip'].rev(),
139 ) as progress:
139 ) as progress:
140 for ctx in ctxs:
140 for ctx in ctxs:
141 progress.update(ctx.rev())
141 progress.update(ctx.rev())
142 _lfconvert_addchangeset(
142 _lfconvert_addchangeset(
143 rsrc,
143 rsrc,
144 rdst,
144 rdst,
145 ctx,
145 ctx,
146 revmap,
146 revmap,
147 lfiles,
147 lfiles,
148 normalfiles,
148 normalfiles,
149 matcher,
149 matcher,
150 size,
150 size,
151 lfiletohash,
151 lfiletohash,
152 )
152 )
153
153
154 if rdst.wvfs.exists(lfutil.shortname):
154 if rdst.wvfs.exists(lfutil.shortname):
155 rdst.wvfs.rmtree(lfutil.shortname)
155 rdst.wvfs.rmtree(lfutil.shortname)
156
156
157 for f in lfiletohash.keys():
157 for f in lfiletohash.keys():
158 if rdst.wvfs.isfile(f):
158 if rdst.wvfs.isfile(f):
159 rdst.wvfs.unlink(f)
159 rdst.wvfs.unlink(f)
160 try:
160 try:
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 except OSError:
162 except OSError:
163 pass
163 pass
164
164
165 # If there were any files converted to largefiles, add largefiles
165 # If there were any files converted to largefiles, add largefiles
166 # to the destination repository's requirements.
166 # to the destination repository's requirements.
167 if lfiles:
167 if lfiles:
168 rdst.requirements.add(b'largefiles')
168 rdst.requirements.add(b'largefiles')
169 scmutil.writereporequirements(rdst)
169 scmutil.writereporequirements(rdst)
170 else:
170 else:
171
171
172 class lfsource(filemap.filemap_source):
172 class lfsource(filemap.filemap_source):
173 def __init__(self, ui, source):
173 def __init__(self, ui, source):
174 super(lfsource, self).__init__(ui, source, None)
174 super(lfsource, self).__init__(ui, source, None)
175 self.filemapper.rename[lfutil.shortname] = b'.'
175 self.filemapper.rename[lfutil.shortname] = b'.'
176
176
177 def getfile(self, name, rev):
177 def getfile(self, name, rev):
178 realname, realrev = rev
178 realname, realrev = rev
179 f = super(lfsource, self).getfile(name, rev)
179 f = super(lfsource, self).getfile(name, rev)
180
180
181 if (
181 if (
182 not realname.startswith(lfutil.shortnameslash)
182 not realname.startswith(lfutil.shortnameslash)
183 or f[0] is None
183 or f[0] is None
184 ):
184 ):
185 return f
185 return f
186
186
187 # Substitute in the largefile data for the hash
187 # Substitute in the largefile data for the hash
188 hash = f[0].strip()
188 hash = f[0].strip()
189 path = lfutil.findfile(rsrc, hash)
189 path = lfutil.findfile(rsrc, hash)
190
190
191 if path is None:
191 if path is None:
192 raise error.Abort(
192 raise error.Abort(
193 _(b"missing largefile for '%s' in %s")
193 _(b"missing largefile for '%s' in %s")
194 % (realname, realrev)
194 % (realname, realrev)
195 )
195 )
196 return util.readfile(path), f[1]
196 return util.readfile(path), f[1]
197
197
198 class converter(convcmd.converter):
198 class converter(convcmd.converter):
199 def __init__(self, ui, source, dest, revmapfile, opts):
199 def __init__(self, ui, source, dest, revmapfile, opts):
200 src = lfsource(ui, source)
200 src = lfsource(ui, source)
201
201
202 super(converter, self).__init__(
202 super(converter, self).__init__(
203 ui, src, dest, revmapfile, opts
203 ui, src, dest, revmapfile, opts
204 )
204 )
205
205
206 found, missing = downloadlfiles(ui, rsrc)
206 found, missing = downloadlfiles(ui, rsrc)
207 if missing != 0:
207 if missing != 0:
208 raise error.Abort(_(b"all largefiles must be present locally"))
208 raise error.Abort(_(b"all largefiles must be present locally"))
209
209
210 orig = convcmd.converter
210 orig = convcmd.converter
211 convcmd.converter = converter
211 convcmd.converter = converter
212
212
213 try:
213 try:
214 convcmd.convert(
214 convcmd.convert(
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 )
216 )
217 finally:
217 finally:
218 convcmd.converter = orig
218 convcmd.converter = orig
219 success = True
219 success = True
220 finally:
220 finally:
221 if tolfile:
221 if tolfile:
222 rdst.dirstate.clear()
222 rdst.dirstate.clear()
223 release(dstlock, dstwlock)
223 release(dstlock, dstwlock)
224 if not success:
224 if not success:
225 # we failed, remove the new directory
225 # we failed, remove the new directory
226 shutil.rmtree(rdst.root)
226 shutil.rmtree(rdst.root)
227
227
228
228
229 def _lfconvert_addchangeset(
229 def _lfconvert_addchangeset(
230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
231 ):
231 ):
232 # Convert src parents to dst parents
232 # Convert src parents to dst parents
233 parents = _convertparents(ctx, revmap)
233 parents = _convertparents(ctx, revmap)
234
234
235 # Generate list of changed files
235 # Generate list of changed files
236 files = _getchangedfiles(ctx, parents)
236 files = _getchangedfiles(ctx, parents)
237
237
238 dstfiles = []
238 dstfiles = []
239 for f in files:
239 for f in files:
240 if f not in lfiles and f not in normalfiles:
240 if f not in lfiles and f not in normalfiles:
241 islfile = _islfile(f, ctx, matcher, size)
241 islfile = _islfile(f, ctx, matcher, size)
242 # If this file was renamed or copied then copy
242 # If this file was renamed or copied then copy
243 # the largefile-ness of its predecessor
243 # the largefile-ness of its predecessor
244 if f in ctx.manifest():
244 if f in ctx.manifest():
245 fctx = ctx.filectx(f)
245 fctx = ctx.filectx(f)
246 renamed = fctx.copysource()
246 renamed = fctx.copysource()
247 if renamed is None:
247 if renamed is None:
248 # the code below assumes renamed to be a boolean or a list
248 # the code below assumes renamed to be a boolean or a list
249 # and won't quite work with the value None
249 # and won't quite work with the value None
250 renamed = False
250 renamed = False
251 renamedlfile = renamed and renamed in lfiles
251 renamedlfile = renamed and renamed in lfiles
252 islfile |= renamedlfile
252 islfile |= renamedlfile
253 if b'l' in fctx.flags():
253 if b'l' in fctx.flags():
254 if renamedlfile:
254 if renamedlfile:
255 raise error.Abort(
255 raise error.Abort(
256 _(b'renamed/copied largefile %s becomes symlink')
256 _(b'renamed/copied largefile %s becomes symlink')
257 % f
257 % f
258 )
258 )
259 islfile = False
259 islfile = False
260 if islfile:
260 if islfile:
261 lfiles.add(f)
261 lfiles.add(f)
262 else:
262 else:
263 normalfiles.add(f)
263 normalfiles.add(f)
264
264
265 if f in lfiles:
265 if f in lfiles:
266 fstandin = lfutil.standin(f)
266 fstandin = lfutil.standin(f)
267 dstfiles.append(fstandin)
267 dstfiles.append(fstandin)
268 # largefile in manifest if it has not been removed/renamed
268 # largefile in manifest if it has not been removed/renamed
269 if f in ctx.manifest():
269 if f in ctx.manifest():
270 fctx = ctx.filectx(f)
270 fctx = ctx.filectx(f)
271 if b'l' in fctx.flags():
271 if b'l' in fctx.flags():
272 renamed = fctx.copysource()
272 renamed = fctx.copysource()
273 if renamed and renamed in lfiles:
273 if renamed and renamed in lfiles:
274 raise error.Abort(
274 raise error.Abort(
275 _(b'largefile %s becomes symlink') % f
275 _(b'largefile %s becomes symlink') % f
276 )
276 )
277
277
278 # largefile was modified, update standins
278 # largefile was modified, update standins
279 m = hashutil.sha1(b'')
279 m = hashutil.sha1(b'')
280 m.update(ctx[f].data())
280 m.update(ctx[f].data())
281 hash = hex(m.digest())
281 hash = hex(m.digest())
282 if f not in lfiletohash or lfiletohash[f] != hash:
282 if f not in lfiletohash or lfiletohash[f] != hash:
283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
284 executable = b'x' in ctx[f].flags()
284 executable = b'x' in ctx[f].flags()
285 lfutil.writestandin(rdst, fstandin, hash, executable)
285 lfutil.writestandin(rdst, fstandin, hash, executable)
286 lfiletohash[f] = hash
286 lfiletohash[f] = hash
287 else:
287 else:
288 # normal file
288 # normal file
289 dstfiles.append(f)
289 dstfiles.append(f)
290
290
291 def getfilectx(repo, memctx, f):
291 def getfilectx(repo, memctx, f):
292 srcfname = lfutil.splitstandin(f)
292 srcfname = lfutil.splitstandin(f)
293 if srcfname is not None:
293 if srcfname is not None:
294 # if the file isn't in the manifest then it was removed
294 # if the file isn't in the manifest then it was removed
295 # or renamed, return None to indicate this
295 # or renamed, return None to indicate this
296 try:
296 try:
297 fctx = ctx.filectx(srcfname)
297 fctx = ctx.filectx(srcfname)
298 except error.LookupError:
298 except error.LookupError:
299 return None
299 return None
300 renamed = fctx.copysource()
300 renamed = fctx.copysource()
301 if renamed:
301 if renamed:
302 # standin is always a largefile because largefile-ness
302 # standin is always a largefile because largefile-ness
303 # doesn't change after rename or copy
303 # doesn't change after rename or copy
304 renamed = lfutil.standin(renamed)
304 renamed = lfutil.standin(renamed)
305
305
306 return context.memfilectx(
306 return context.memfilectx(
307 repo,
307 repo,
308 memctx,
308 memctx,
309 f,
309 f,
310 lfiletohash[srcfname] + b'\n',
310 lfiletohash[srcfname] + b'\n',
311 b'l' in fctx.flags(),
311 b'l' in fctx.flags(),
312 b'x' in fctx.flags(),
312 b'x' in fctx.flags(),
313 renamed,
313 renamed,
314 )
314 )
315 else:
315 else:
316 return _getnormalcontext(repo, ctx, f, revmap)
316 return _getnormalcontext(repo, ctx, f, revmap)
317
317
318 # Commit
318 # Commit
319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
320
320
321
321
322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
323 mctx = context.memctx(
323 mctx = context.memctx(
324 rdst,
324 rdst,
325 parents,
325 parents,
326 ctx.description(),
326 ctx.description(),
327 dstfiles,
327 dstfiles,
328 getfilectx,
328 getfilectx,
329 ctx.user(),
329 ctx.user(),
330 ctx.date(),
330 ctx.date(),
331 ctx.extra(),
331 ctx.extra(),
332 )
332 )
333 ret = rdst.commitctx(mctx)
333 ret = rdst.commitctx(mctx)
334 lfutil.copyalltostore(rdst, ret)
334 lfutil.copyalltostore(rdst, ret)
335 rdst.setparents(ret)
335 rdst.setparents(ret)
336 revmap[ctx.node()] = rdst.changelog.tip()
336 revmap[ctx.node()] = rdst.changelog.tip()
337
337
338
338
339 # Generate list of changed files
339 # Generate list of changed files
340 def _getchangedfiles(ctx, parents):
340 def _getchangedfiles(ctx, parents):
341 files = set(ctx.files())
341 files = set(ctx.files())
342 if ctx.repo().nullid not in parents:
342 if ctx.repo().nullid not in parents:
343 mc = ctx.manifest()
343 mc = ctx.manifest()
344 for pctx in ctx.parents():
344 for pctx in ctx.parents():
345 for fn in pctx.manifest().diff(mc):
345 for fn in pctx.manifest().diff(mc):
346 files.add(fn)
346 files.add(fn)
347 return files
347 return files
348
348
349
349
350 # Convert src parents to dst parents
350 # Convert src parents to dst parents
351 def _convertparents(ctx, revmap):
351 def _convertparents(ctx, revmap):
352 parents = []
352 parents = []
353 for p in ctx.parents():
353 for p in ctx.parents():
354 parents.append(revmap[p.node()])
354 parents.append(revmap[p.node()])
355 while len(parents) < 2:
355 while len(parents) < 2:
356 parents.append(ctx.repo().nullid)
356 parents.append(ctx.repo().nullid)
357 return parents
357 return parents
358
358
359
359
360 # Get memfilectx for a normal file
360 # Get memfilectx for a normal file
361 def _getnormalcontext(repo, ctx, f, revmap):
361 def _getnormalcontext(repo, ctx, f, revmap):
362 try:
362 try:
363 fctx = ctx.filectx(f)
363 fctx = ctx.filectx(f)
364 except error.LookupError:
364 except error.LookupError:
365 return None
365 return None
366 renamed = fctx.copysource()
366 renamed = fctx.copysource()
367
367
368 data = fctx.data()
368 data = fctx.data()
369 if f == b'.hgtags':
369 if f == b'.hgtags':
370 data = _converttags(repo.ui, revmap, data)
370 data = _converttags(repo.ui, revmap, data)
371 return context.memfilectx(
371 return context.memfilectx(
372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
373 )
373 )
374
374
375
375
376 # Remap tag data using a revision map
376 # Remap tag data using a revision map
377 def _converttags(ui, revmap, data):
377 def _converttags(ui, revmap, data):
378 newdata = []
378 newdata = []
379 for line in data.splitlines():
379 for line in data.splitlines():
380 try:
380 try:
381 id, name = line.split(b' ', 1)
381 id, name = line.split(b' ', 1)
382 except ValueError:
382 except ValueError:
383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
384 continue
384 continue
385 try:
385 try:
386 newid = bin(id)
386 newid = bin(id)
387 except binascii.Error:
387 except binascii.Error:
388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
389 continue
389 continue
390 try:
390 try:
391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
392 except KeyError:
392 except KeyError:
393 ui.warn(_(b'no mapping for id %s\n') % id)
393 ui.warn(_(b'no mapping for id %s\n') % id)
394 continue
394 continue
395 return b''.join(newdata)
395 return b''.join(newdata)
396
396
397
397
398 def _islfile(file, ctx, matcher, size):
398 def _islfile(file, ctx, matcher, size):
399 """Return true if file should be considered a largefile, i.e.
399 """Return true if file should be considered a largefile, i.e.
400 matcher matches it or it is larger than size."""
400 matcher matches it or it is larger than size."""
401 # never store special .hg* files as largefiles
401 # never store special .hg* files as largefiles
402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
403 return False
403 return False
404 if matcher and matcher(file):
404 if matcher and matcher(file):
405 return True
405 return True
406 try:
406 try:
407 return ctx.filectx(file).size() >= size * 1024 * 1024
407 return ctx.filectx(file).size() >= size * 1024 * 1024
408 except error.LookupError:
408 except error.LookupError:
409 return False
409 return False
410
410
411
411
412 def uploadlfiles(ui, rsrc, rdst, files):
412 def uploadlfiles(ui, rsrc, rdst, files):
413 '''upload largefiles to the central store'''
413 '''upload largefiles to the central store'''
414
414
415 if not files:
415 if not files:
416 return
416 return
417
417
418 store = storefactory.openstore(rsrc, rdst, put=True)
418 store = storefactory.openstore(rsrc, rdst, put=True)
419
419
420 at = 0
420 at = 0
421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
422 retval = store.exists(files)
422 retval = store.exists(files)
423 files = [h for h in files if not retval[h]]
423 files = [h for h in files if not retval[h]]
424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
425
425
426 with ui.makeprogress(
426 with ui.makeprogress(
427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
428 ) as progress:
428 ) as progress:
429 for hash in files:
429 for hash in files:
430 progress.update(at)
430 progress.update(at)
431 source = lfutil.findfile(rsrc, hash)
431 source = lfutil.findfile(rsrc, hash)
432 if not source:
432 if not source:
433 raise error.Abort(
433 raise error.Abort(
434 _(
434 _(
435 b'largefile %s missing from store'
435 b'largefile %s missing from store'
436 b' (needs to be uploaded)'
436 b' (needs to be uploaded)'
437 )
437 )
438 % hash
438 % hash
439 )
439 )
440 # XXX check for errors here
440 # XXX check for errors here
441 store.put(source, hash)
441 store.put(source, hash)
442 at += 1
442 at += 1
443
443
444
444
445 def verifylfiles(ui, repo, all=False, contents=False):
445 def verifylfiles(ui, repo, all=False, contents=False):
446 """Verify that every largefile revision in the current changeset
446 """Verify that every largefile revision in the current changeset
447 exists in the central store. With --contents, also verify that
447 exists in the central store. With --contents, also verify that
448 the contents of each local largefile file revision are correct (SHA-1 hash
448 the contents of each local largefile file revision are correct (SHA-1 hash
449 matches the revision ID). With --all, check every changeset in
449 matches the revision ID). With --all, check every changeset in
450 this repository."""
450 this repository."""
451 if all:
451 if all:
452 revs = repo.revs(b'all()')
452 revs = repo.revs(b'all()')
453 else:
453 else:
454 revs = [b'.']
454 revs = [b'.']
455
455
456 store = storefactory.openstore(repo)
456 store = storefactory.openstore(repo)
457 return store.verify(revs, contents=contents)
457 return store.verify(revs, contents=contents)
458
458
459
459
460 def cachelfiles(ui, repo, node, filelist=None):
460 def cachelfiles(ui, repo, node, filelist=None):
461 """cachelfiles ensures that all largefiles needed by the specified revision
461 """cachelfiles ensures that all largefiles needed by the specified revision
462 are present in the repository's largefile cache.
462 are present in the repository's largefile cache.
463
463
464 returns a tuple (cached, missing). cached is the list of files downloaded
464 returns a tuple (cached, missing). cached is the list of files downloaded
465 by this operation; missing is the list of files that were needed but could
465 by this operation; missing is the list of files that were needed but could
466 not be found."""
466 not be found."""
467 lfiles = lfutil.listlfiles(repo, node)
467 lfiles = lfutil.listlfiles(repo, node)
468 if filelist:
468 if filelist:
469 lfiles = set(lfiles) & set(filelist)
469 lfiles = set(lfiles) & set(filelist)
470 toget = []
470 toget = []
471
471
472 ctx = repo[node]
472 ctx = repo[node]
473 for lfile in lfiles:
473 for lfile in lfiles:
474 try:
474 try:
475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
476 except FileNotFoundError:
476 except FileNotFoundError:
477 continue # node must be None and standin wasn't found in wctx
477 continue # node must be None and standin wasn't found in wctx
478 if not lfutil.findfile(repo, expectedhash):
478 if not lfutil.findfile(repo, expectedhash):
479 toget.append((lfile, expectedhash))
479 toget.append((lfile, expectedhash))
480
480
481 if toget:
481 if toget:
482 store = storefactory.openstore(repo)
482 store = storefactory.openstore(repo)
483 ret = store.get(toget)
483 ret = store.get(toget)
484 return ret
484 return ret
485
485
486 return ([], [])
486 return ([], [])
487
487
488
488
489 def downloadlfiles(ui, repo):
489 def downloadlfiles(ui, repo):
490 tonode = repo.changelog.node
490 tonode = repo.changelog.node
491 totalsuccess = 0
491 totalsuccess = 0
492 totalmissing = 0
492 totalmissing = 0
493 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
493 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
494 success, missing = cachelfiles(ui, repo, tonode(rev))
494 success, missing = cachelfiles(ui, repo, tonode(rev))
495 totalsuccess += len(success)
495 totalsuccess += len(success)
496 totalmissing += len(missing)
496 totalmissing += len(missing)
497 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
497 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
498 if totalmissing > 0:
498 if totalmissing > 0:
499 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
499 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
500 return totalsuccess, totalmissing
500 return totalsuccess, totalmissing
501
501
502
502
503 def updatelfiles(
503 def updatelfiles(
504 ui, repo, filelist=None, printmessage=None, normallookup=False
504 ui, repo, filelist=None, printmessage=None, normallookup=False
505 ):
505 ):
506 """Update largefiles according to standins in the working directory
506 """Update largefiles according to standins in the working directory
507
507
508 If ``printmessage`` is other than ``None``, it means "print (or
508 If ``printmessage`` is other than ``None``, it means "print (or
509 ignore, for false) message forcibly".
509 ignore, for false) message forcibly".
510 """
510 """
511 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
511 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
512 with repo.wlock():
512 with repo.wlock():
513 lfdirstate = lfutil.openlfdirstate(ui, repo)
513 lfdirstate = lfutil.openlfdirstate(ui, repo)
514 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
514 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
515
515
516 if filelist is not None:
516 if filelist is not None:
517 filelist = set(filelist)
517 filelist = set(filelist)
518 lfiles = [f for f in lfiles if f in filelist]
518 lfiles = [f for f in lfiles if f in filelist]
519
519
520 with lfdirstate.changing_parents(repo):
520 update = {}
521 update = {}
521 dropped = set()
522 dropped = set()
522 updated, removed = 0, 0
523 updated, removed = 0, 0
523 wvfs = repo.wvfs
524 wvfs = repo.wvfs
524 wctx = repo[None]
525 wctx = repo[None]
525 for lfile in lfiles:
526 for lfile in lfiles:
526 lfileorig = os.path.relpath(
527 lfileorig = os.path.relpath(
527 scmutil.backuppath(ui, repo, lfile), start=repo.root
528 scmutil.backuppath(ui, repo, lfile), start=repo.root
528 )
529 )
529 standin = lfutil.standin(lfile)
530 standin = lfutil.standin(lfile)
530 standinorig = os.path.relpath(
531 standinorig = os.path.relpath(
531 scmutil.backuppath(ui, repo, standin), start=repo.root
532 scmutil.backuppath(ui, repo, standin), start=repo.root
532 )
533 )
533 if wvfs.exists(standin):
534 if wvfs.exists(standin):
534 if wvfs.exists(standinorig) and wvfs.exists(lfile):
535 if wvfs.exists(standinorig) and wvfs.exists(lfile):
535 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
536 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
536 wvfs.unlinkpath(standinorig)
537 wvfs.unlinkpath(standinorig)
537 expecthash = lfutil.readasstandin(wctx[standin])
538 expecthash = lfutil.readasstandin(wctx[standin])
538 if expecthash != b'':
539 if expecthash != b'':
539 if lfile not in wctx: # not switched to normal file
540 if lfile not in wctx: # not switched to normal file
540 if repo.dirstate.get_entry(standin).any_tracked:
541 if repo.dirstate.get_entry(standin).any_tracked:
541 wvfs.unlinkpath(lfile, ignoremissing=True)
542 wvfs.unlinkpath(lfile, ignoremissing=True)
542 else:
543 else:
543 dropped.add(lfile)
544 dropped.add(lfile)
545
544
546 # allocate an entry in largefiles dirstate to prevent
545 # allocate an entry in largefiles dirstate to prevent
547 # lfilesrepo.status() from reporting missing files as
546 # lfilesrepo.status() from reporting missing files as
548 # removed.
547 # removed.
549 lfdirstate.hacky_extension_update_file(
548 lfdirstate.hacky_extension_update_file(
550 lfile,
549 lfile,
551 p1_tracked=True,
550 p1_tracked=True,
552 wc_tracked=True,
551 wc_tracked=True,
553 possibly_dirty=True,
552 possibly_dirty=True,
554 )
553 )
555 update[lfile] = expecthash
554 update[lfile] = expecthash
556 else:
555 else:
557 # Remove lfiles for which the standin is deleted, unless the
556 # Remove lfiles for which the standin is deleted, unless the
558 # lfile is added to the repository again. This happens when a
557 # lfile is added to the repository again. This happens when a
559 # largefile is converted back to a normal file: the standin
558 # largefile is converted back to a normal file: the standin
560 # disappears, but a new (normal) file appears as the lfile.
559 # disappears, but a new (normal) file appears as the lfile.
561 if (
560 if (
562 wvfs.exists(lfile)
561 wvfs.exists(lfile)
563 and repo.dirstate.normalize(lfile) not in wctx
562 and repo.dirstate.normalize(lfile) not in wctx
564 ):
563 ):
565 wvfs.unlinkpath(lfile)
564 wvfs.unlinkpath(lfile)
566 removed += 1
565 removed += 1
567
566
568 # largefile processing might be slow and be interrupted - be prepared
567 # largefile processing might be slow and be interrupted - be prepared
569 lfdirstate.write(repo.currenttransaction())
568 lfdirstate.write(repo.currenttransaction())
570
569
571 if lfiles:
570 if lfiles:
572 lfiles = [f for f in lfiles if f not in dropped]
571 lfiles = [f for f in lfiles if f not in dropped]
573
572
574 for f in dropped:
573 for f in dropped:
575 repo.wvfs.unlinkpath(lfutil.standin(f))
574 repo.wvfs.unlinkpath(lfutil.standin(f))
576 # This needs to happen for dropped files, otherwise they stay in
575 # This needs to happen for dropped files, otherwise they stay in
577 # the M state.
576 # the M state.
578 lfdirstate._map.reset_state(f)
577 lfdirstate._map.reset_state(f)
579
578
580 statuswriter(_(b'getting changed largefiles\n'))
579 statuswriter(_(b'getting changed largefiles\n'))
581 cachelfiles(ui, repo, None, lfiles)
580 cachelfiles(ui, repo, None, lfiles)
582
581
583 with lfdirstate.changing_parents(repo):
582 with lfdirstate.changing_parents(repo):
584 for lfile in lfiles:
583 for lfile in lfiles:
585 update1 = 0
584 update1 = 0
586
585
587 expecthash = update.get(lfile)
586 expecthash = update.get(lfile)
588 if expecthash:
587 if expecthash:
589 if not lfutil.copyfromcache(repo, expecthash, lfile):
588 if not lfutil.copyfromcache(repo, expecthash, lfile):
590 # failed ... but already removed and set to normallookup
589 # failed ... but already removed and set to normallookup
591 continue
590 continue
592 # Synchronize largefile dirstate to the last modified
591 # Synchronize largefile dirstate to the last modified
593 # time of the file
592 # time of the file
594 lfdirstate.hacky_extension_update_file(
593 lfdirstate.hacky_extension_update_file(
595 lfile,
594 lfile,
596 p1_tracked=True,
595 p1_tracked=True,
597 wc_tracked=True,
596 wc_tracked=True,
598 )
597 )
599 update1 = 1
598 update1 = 1
600
599
601 # copy the exec mode of largefile standin from the repository's
600 # copy the exec mode of largefile standin from the repository's
602 # dirstate to its state in the lfdirstate.
601 # dirstate to its state in the lfdirstate.
603 standin = lfutil.standin(lfile)
602 standin = lfutil.standin(lfile)
604 if wvfs.exists(standin):
603 if wvfs.exists(standin):
605 # exec is decided by the users permissions using mask 0o100
604 # exec is decided by the users permissions using mask 0o100
606 standinexec = wvfs.stat(standin).st_mode & 0o100
605 standinexec = wvfs.stat(standin).st_mode & 0o100
607 st = wvfs.stat(lfile)
606 st = wvfs.stat(lfile)
608 mode = st.st_mode
607 mode = st.st_mode
609 if standinexec != mode & 0o100:
608 if standinexec != mode & 0o100:
610 # first remove all X bits, then shift all R bits to X
609 # first remove all X bits, then shift all R bits to X
611 mode &= ~0o111
610 mode &= ~0o111
612 if standinexec:
611 if standinexec:
613 mode |= (mode >> 2) & 0o111 & ~util.umask
612 mode |= (mode >> 2) & 0o111 & ~util.umask
614 wvfs.chmod(lfile, mode)
613 wvfs.chmod(lfile, mode)
615 update1 = 1
614 update1 = 1
616
615
617 updated += update1
616 updated += update1
618
617
619 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
618 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620
619
621 lfdirstate.write(repo.currenttransaction())
620 lfdirstate.write(repo.currenttransaction())
622 if lfiles:
621 if lfiles:
623 statuswriter(
622 statuswriter(
624 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
623 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
625 )
624 )
626
625
627
626
628 @eh.command(
627 @eh.command(
629 b'lfpull',
628 b'lfpull',
630 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
629 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
631 + cmdutil.remoteopts,
630 + cmdutil.remoteopts,
632 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
631 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
633 )
632 )
634 def lfpull(ui, repo, source=b"default", **opts):
633 def lfpull(ui, repo, source=b"default", **opts):
635 """pull largefiles for the specified revisions from the specified source
634 """pull largefiles for the specified revisions from the specified source
636
635
637 Pull largefiles that are referenced from local changesets but missing
636 Pull largefiles that are referenced from local changesets but missing
638 locally, pulling from a remote repository to the local cache.
637 locally, pulling from a remote repository to the local cache.
639
638
640 If SOURCE is omitted, the 'default' path will be used.
639 If SOURCE is omitted, the 'default' path will be used.
641 See :hg:`help urls` for more information.
640 See :hg:`help urls` for more information.
642
641
643 .. container:: verbose
642 .. container:: verbose
644
643
645 Some examples:
644 Some examples:
646
645
647 - pull largefiles for all branch heads::
646 - pull largefiles for all branch heads::
648
647
649 hg lfpull -r "head() and not closed()"
648 hg lfpull -r "head() and not closed()"
650
649
651 - pull largefiles on the default branch::
650 - pull largefiles on the default branch::
652
651
653 hg lfpull -r "branch(default)"
652 hg lfpull -r "branch(default)"
654 """
653 """
655 repo.lfpullsource = source
654 repo.lfpullsource = source
656
655
657 revs = opts.get('rev', [])
656 revs = opts.get('rev', [])
658 if not revs:
657 if not revs:
659 raise error.Abort(_(b'no revisions specified'))
658 raise error.Abort(_(b'no revisions specified'))
660 revs = logcmdutil.revrange(repo, revs)
659 revs = logcmdutil.revrange(repo, revs)
661
660
662 numcached = 0
661 numcached = 0
663 for rev in revs:
662 for rev in revs:
664 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
663 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
665 (cached, missing) = cachelfiles(ui, repo, rev)
664 (cached, missing) = cachelfiles(ui, repo, rev)
666 numcached += len(cached)
665 numcached += len(cached)
667 ui.status(_(b"%d largefiles cached\n") % numcached)
666 ui.status(_(b"%d largefiles cached\n") % numcached)
668
667
669
668
670 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
669 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
671 def debuglfput(ui, repo, filepath, **kwargs):
670 def debuglfput(ui, repo, filepath, **kwargs):
672 hash = lfutil.hashfile(filepath)
671 hash = lfutil.hashfile(filepath)
673 storefactory.openstore(repo).put(filepath, hash)
672 storefactory.openstore(repo).put(filepath, hash)
674 ui.write(b'%s\n' % hash)
673 ui.write(b'%s\n' % hash)
675 return 0
674 return 0
General Comments 0
You need to be logged in to leave comments. Login now