##// END OF EJS Templates
largefiles: migrate to new method for getting copy info...
Martin von Zweigbergk -
r41941:a86e2200 default
parent child Browse files
Show More
@@ -1,603 +1,601 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 context,
21 context,
22 error,
22 error,
23 exthelper,
23 exthelper,
24 hg,
24 hg,
25 lock,
25 lock,
26 match as matchmod,
26 match as matchmod,
27 node,
27 node,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from ..convert import (
33 from ..convert import (
34 convcmd,
34 convcmd,
35 filemap,
35 filemap,
36 )
36 )
37
37
38 from . import (
38 from . import (
39 lfutil,
39 lfutil,
40 storefactory
40 storefactory
41 )
41 )
42
42
43 release = lock.release
43 release = lock.release
44
44
45 # -- Commands ----------------------------------------------------------
45 # -- Commands ----------------------------------------------------------
46
46
47 eh = exthelper.exthelper()
47 eh = exthelper.exthelper()
48
48
49 @eh.command('lfconvert',
49 @eh.command('lfconvert',
50 [('s', 'size', '',
50 [('s', 'size', '',
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 ('', 'to-normal', False,
52 ('', 'to-normal', False,
53 _('convert from a largefiles repo to a normal repo')),
53 _('convert from a largefiles repo to a normal repo')),
54 ],
54 ],
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 norepo=True,
56 norepo=True,
57 inferrepo=True)
57 inferrepo=True)
58 def lfconvert(ui, src, dest, *pats, **opts):
58 def lfconvert(ui, src, dest, *pats, **opts):
59 '''convert a normal repository to a largefiles repository
59 '''convert a normal repository to a largefiles repository
60
60
61 Convert repository SOURCE to a new repository DEST, identical to
61 Convert repository SOURCE to a new repository DEST, identical to
62 SOURCE except that certain files will be converted as largefiles:
62 SOURCE except that certain files will be converted as largefiles:
63 specifically, any file that matches any PATTERN *or* whose size is
63 specifically, any file that matches any PATTERN *or* whose size is
64 above the minimum size threshold is converted as a largefile. The
64 above the minimum size threshold is converted as a largefile. The
65 size used to determine whether or not to track a file as a
65 size used to determine whether or not to track a file as a
66 largefile is the size of the first version of the file. The
66 largefile is the size of the first version of the file. The
67 minimum size can be specified either with --size or in
67 minimum size can be specified either with --size or in
68 configuration as ``largefiles.size``.
68 configuration as ``largefiles.size``.
69
69
70 After running this command you will need to make sure that
70 After running this command you will need to make sure that
71 largefiles is enabled anywhere you intend to push the new
71 largefiles is enabled anywhere you intend to push the new
72 repository.
72 repository.
73
73
74 Use --to-normal to convert largefiles back to normal files; after
74 Use --to-normal to convert largefiles back to normal files; after
75 this, the DEST repository can be used without largefiles at all.'''
75 this, the DEST repository can be used without largefiles at all.'''
76
76
77 opts = pycompat.byteskwargs(opts)
77 opts = pycompat.byteskwargs(opts)
78 if opts['to_normal']:
78 if opts['to_normal']:
79 tolfile = False
79 tolfile = False
80 else:
80 else:
81 tolfile = True
81 tolfile = True
82 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
83
83
84 if not hg.islocal(src):
84 if not hg.islocal(src):
85 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 raise error.Abort(_('%s is not a local Mercurial repo') % src)
86 if not hg.islocal(dest):
86 if not hg.islocal(dest):
87 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
88
88
89 rsrc = hg.repository(ui, src)
89 rsrc = hg.repository(ui, src)
90 ui.status(_('initializing destination %s\n') % dest)
90 ui.status(_('initializing destination %s\n') % dest)
91 rdst = hg.repository(ui, dest, create=True)
91 rdst = hg.repository(ui, dest, create=True)
92
92
93 success = False
93 success = False
94 dstwlock = dstlock = None
94 dstwlock = dstlock = None
95 try:
95 try:
96 # Get a list of all changesets in the source. The easy way to do this
96 # Get a list of all changesets in the source. The easy way to do this
97 # is to simply walk the changelog, using changelog.nodesbetween().
97 # is to simply walk the changelog, using changelog.nodesbetween().
98 # Take a look at mercurial/revlog.py:639 for more details.
98 # Take a look at mercurial/revlog.py:639 for more details.
99 # Use a generator instead of a list to decrease memory usage
99 # Use a generator instead of a list to decrease memory usage
100 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
101 rsrc.heads())[0])
101 rsrc.heads())[0])
102 revmap = {node.nullid: node.nullid}
102 revmap = {node.nullid: node.nullid}
103 if tolfile:
103 if tolfile:
104 # Lock destination to prevent modification while it is converted to.
104 # Lock destination to prevent modification while it is converted to.
105 # Don't need to lock src because we are just reading from its
105 # Don't need to lock src because we are just reading from its
106 # history which can't change.
106 # history which can't change.
107 dstwlock = rdst.wlock()
107 dstwlock = rdst.wlock()
108 dstlock = rdst.lock()
108 dstlock = rdst.lock()
109
109
110 lfiles = set()
110 lfiles = set()
111 normalfiles = set()
111 normalfiles = set()
112 if not pats:
112 if not pats:
113 pats = ui.configlist(lfutil.longname, 'patterns')
113 pats = ui.configlist(lfutil.longname, 'patterns')
114 if pats:
114 if pats:
115 matcher = matchmod.match(rsrc.root, '', list(pats))
115 matcher = matchmod.match(rsrc.root, '', list(pats))
116 else:
116 else:
117 matcher = None
117 matcher = None
118
118
119 lfiletohash = {}
119 lfiletohash = {}
120 with ui.makeprogress(_('converting revisions'),
120 with ui.makeprogress(_('converting revisions'),
121 unit=_('revisions'),
121 unit=_('revisions'),
122 total=rsrc['tip'].rev()) as progress:
122 total=rsrc['tip'].rev()) as progress:
123 for ctx in ctxs:
123 for ctx in ctxs:
124 progress.update(ctx.rev())
124 progress.update(ctx.rev())
125 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
125 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
126 lfiles, normalfiles, matcher, size, lfiletohash)
126 lfiles, normalfiles, matcher, size, lfiletohash)
127
127
128 if rdst.wvfs.exists(lfutil.shortname):
128 if rdst.wvfs.exists(lfutil.shortname):
129 rdst.wvfs.rmtree(lfutil.shortname)
129 rdst.wvfs.rmtree(lfutil.shortname)
130
130
131 for f in lfiletohash.keys():
131 for f in lfiletohash.keys():
132 if rdst.wvfs.isfile(f):
132 if rdst.wvfs.isfile(f):
133 rdst.wvfs.unlink(f)
133 rdst.wvfs.unlink(f)
134 try:
134 try:
135 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
135 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
136 except OSError:
136 except OSError:
137 pass
137 pass
138
138
139 # If there were any files converted to largefiles, add largefiles
139 # If there were any files converted to largefiles, add largefiles
140 # to the destination repository's requirements.
140 # to the destination repository's requirements.
141 if lfiles:
141 if lfiles:
142 rdst.requirements.add('largefiles')
142 rdst.requirements.add('largefiles')
143 rdst._writerequirements()
143 rdst._writerequirements()
144 else:
144 else:
145 class lfsource(filemap.filemap_source):
145 class lfsource(filemap.filemap_source):
146 def __init__(self, ui, source):
146 def __init__(self, ui, source):
147 super(lfsource, self).__init__(ui, source, None)
147 super(lfsource, self).__init__(ui, source, None)
148 self.filemapper.rename[lfutil.shortname] = '.'
148 self.filemapper.rename[lfutil.shortname] = '.'
149
149
150 def getfile(self, name, rev):
150 def getfile(self, name, rev):
151 realname, realrev = rev
151 realname, realrev = rev
152 f = super(lfsource, self).getfile(name, rev)
152 f = super(lfsource, self).getfile(name, rev)
153
153
154 if (not realname.startswith(lfutil.shortnameslash)
154 if (not realname.startswith(lfutil.shortnameslash)
155 or f[0] is None):
155 or f[0] is None):
156 return f
156 return f
157
157
158 # Substitute in the largefile data for the hash
158 # Substitute in the largefile data for the hash
159 hash = f[0].strip()
159 hash = f[0].strip()
160 path = lfutil.findfile(rsrc, hash)
160 path = lfutil.findfile(rsrc, hash)
161
161
162 if path is None:
162 if path is None:
163 raise error.Abort(_("missing largefile for '%s' in %s")
163 raise error.Abort(_("missing largefile for '%s' in %s")
164 % (realname, realrev))
164 % (realname, realrev))
165 return util.readfile(path), f[1]
165 return util.readfile(path), f[1]
166
166
167 class converter(convcmd.converter):
167 class converter(convcmd.converter):
168 def __init__(self, ui, source, dest, revmapfile, opts):
168 def __init__(self, ui, source, dest, revmapfile, opts):
169 src = lfsource(ui, source)
169 src = lfsource(ui, source)
170
170
171 super(converter, self).__init__(ui, src, dest, revmapfile,
171 super(converter, self).__init__(ui, src, dest, revmapfile,
172 opts)
172 opts)
173
173
174 found, missing = downloadlfiles(ui, rsrc)
174 found, missing = downloadlfiles(ui, rsrc)
175 if missing != 0:
175 if missing != 0:
176 raise error.Abort(_("all largefiles must be present locally"))
176 raise error.Abort(_("all largefiles must be present locally"))
177
177
178 orig = convcmd.converter
178 orig = convcmd.converter
179 convcmd.converter = converter
179 convcmd.converter = converter
180
180
181 try:
181 try:
182 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
182 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
183 finally:
183 finally:
184 convcmd.converter = orig
184 convcmd.converter = orig
185 success = True
185 success = True
186 finally:
186 finally:
187 if tolfile:
187 if tolfile:
188 rdst.dirstate.clear()
188 rdst.dirstate.clear()
189 release(dstlock, dstwlock)
189 release(dstlock, dstwlock)
190 if not success:
190 if not success:
191 # we failed, remove the new directory
191 # we failed, remove the new directory
192 shutil.rmtree(rdst.root)
192 shutil.rmtree(rdst.root)
193
193
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
195 matcher, size, lfiletohash):
195 matcher, size, lfiletohash):
196 # Convert src parents to dst parents
196 # Convert src parents to dst parents
197 parents = _convertparents(ctx, revmap)
197 parents = _convertparents(ctx, revmap)
198
198
199 # Generate list of changed files
199 # Generate list of changed files
200 files = _getchangedfiles(ctx, parents)
200 files = _getchangedfiles(ctx, parents)
201
201
202 dstfiles = []
202 dstfiles = []
203 for f in files:
203 for f in files:
204 if f not in lfiles and f not in normalfiles:
204 if f not in lfiles and f not in normalfiles:
205 islfile = _islfile(f, ctx, matcher, size)
205 islfile = _islfile(f, ctx, matcher, size)
206 # If this file was renamed or copied then copy
206 # If this file was renamed or copied then copy
207 # the largefile-ness of its predecessor
207 # the largefile-ness of its predecessor
208 if f in ctx.manifest():
208 if f in ctx.manifest():
209 fctx = ctx.filectx(f)
209 fctx = ctx.filectx(f)
210 renamed = fctx.renamed()
210 renamed = fctx.copysource()
211 if renamed is None:
211 if renamed is None:
212 # the code below assumes renamed to be a boolean or a list
212 # the code below assumes renamed to be a boolean or a list
213 # and won't quite work with the value None
213 # and won't quite work with the value None
214 renamed = False
214 renamed = False
215 renamedlfile = renamed and renamed[0] in lfiles
215 renamedlfile = renamed and renamed in lfiles
216 islfile |= renamedlfile
216 islfile |= renamedlfile
217 if 'l' in fctx.flags():
217 if 'l' in fctx.flags():
218 if renamedlfile:
218 if renamedlfile:
219 raise error.Abort(
219 raise error.Abort(
220 _('renamed/copied largefile %s becomes symlink')
220 _('renamed/copied largefile %s becomes symlink')
221 % f)
221 % f)
222 islfile = False
222 islfile = False
223 if islfile:
223 if islfile:
224 lfiles.add(f)
224 lfiles.add(f)
225 else:
225 else:
226 normalfiles.add(f)
226 normalfiles.add(f)
227
227
228 if f in lfiles:
228 if f in lfiles:
229 fstandin = lfutil.standin(f)
229 fstandin = lfutil.standin(f)
230 dstfiles.append(fstandin)
230 dstfiles.append(fstandin)
231 # largefile in manifest if it has not been removed/renamed
231 # largefile in manifest if it has not been removed/renamed
232 if f in ctx.manifest():
232 if f in ctx.manifest():
233 fctx = ctx.filectx(f)
233 fctx = ctx.filectx(f)
234 if 'l' in fctx.flags():
234 if 'l' in fctx.flags():
235 renamed = fctx.renamed()
235 renamed = fctx.copysource()
236 if renamed and renamed[0] in lfiles:
236 if renamed and renamed in lfiles:
237 raise error.Abort(_('largefile %s becomes symlink') % f)
237 raise error.Abort(_('largefile %s becomes symlink') % f)
238
238
239 # largefile was modified, update standins
239 # largefile was modified, update standins
240 m = hashlib.sha1('')
240 m = hashlib.sha1('')
241 m.update(ctx[f].data())
241 m.update(ctx[f].data())
242 hash = node.hex(m.digest())
242 hash = node.hex(m.digest())
243 if f not in lfiletohash or lfiletohash[f] != hash:
243 if f not in lfiletohash or lfiletohash[f] != hash:
244 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
244 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
245 executable = 'x' in ctx[f].flags()
245 executable = 'x' in ctx[f].flags()
246 lfutil.writestandin(rdst, fstandin, hash,
246 lfutil.writestandin(rdst, fstandin, hash,
247 executable)
247 executable)
248 lfiletohash[f] = hash
248 lfiletohash[f] = hash
249 else:
249 else:
250 # normal file
250 # normal file
251 dstfiles.append(f)
251 dstfiles.append(f)
252
252
253 def getfilectx(repo, memctx, f):
253 def getfilectx(repo, memctx, f):
254 srcfname = lfutil.splitstandin(f)
254 srcfname = lfutil.splitstandin(f)
255 if srcfname is not None:
255 if srcfname is not None:
256 # if the file isn't in the manifest then it was removed
256 # if the file isn't in the manifest then it was removed
257 # or renamed, return None to indicate this
257 # or renamed, return None to indicate this
258 try:
258 try:
259 fctx = ctx.filectx(srcfname)
259 fctx = ctx.filectx(srcfname)
260 except error.LookupError:
260 except error.LookupError:
261 return None
261 return None
262 renamed = fctx.renamed()
262 renamed = fctx.copysource()
263 if renamed:
263 if renamed:
264 # standin is always a largefile because largefile-ness
264 # standin is always a largefile because largefile-ness
265 # doesn't change after rename or copy
265 # doesn't change after rename or copy
266 renamed = lfutil.standin(renamed[0])
266 renamed = lfutil.standin(renamed)
267
267
268 return context.memfilectx(repo, memctx, f,
268 return context.memfilectx(repo, memctx, f,
269 lfiletohash[srcfname] + '\n',
269 lfiletohash[srcfname] + '\n',
270 'l' in fctx.flags(), 'x' in fctx.flags(),
270 'l' in fctx.flags(), 'x' in fctx.flags(),
271 renamed)
271 renamed)
272 else:
272 else:
273 return _getnormalcontext(repo, ctx, f, revmap)
273 return _getnormalcontext(repo, ctx, f, revmap)
274
274
275 # Commit
275 # Commit
276 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
276 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
277
277
278 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
278 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
279 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
279 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
280 getfilectx, ctx.user(), ctx.date(), ctx.extra())
280 getfilectx, ctx.user(), ctx.date(), ctx.extra())
281 ret = rdst.commitctx(mctx)
281 ret = rdst.commitctx(mctx)
282 lfutil.copyalltostore(rdst, ret)
282 lfutil.copyalltostore(rdst, ret)
283 rdst.setparents(ret)
283 rdst.setparents(ret)
284 revmap[ctx.node()] = rdst.changelog.tip()
284 revmap[ctx.node()] = rdst.changelog.tip()
285
285
286 # Generate list of changed files
286 # Generate list of changed files
287 def _getchangedfiles(ctx, parents):
287 def _getchangedfiles(ctx, parents):
288 files = set(ctx.files())
288 files = set(ctx.files())
289 if node.nullid not in parents:
289 if node.nullid not in parents:
290 mc = ctx.manifest()
290 mc = ctx.manifest()
291 for pctx in ctx.parents():
291 for pctx in ctx.parents():
292 for fn in pctx.manifest().diff(mc):
292 for fn in pctx.manifest().diff(mc):
293 files.add(fn)
293 files.add(fn)
294 return files
294 return files
295
295
296 # Convert src parents to dst parents
296 # Convert src parents to dst parents
297 def _convertparents(ctx, revmap):
297 def _convertparents(ctx, revmap):
298 parents = []
298 parents = []
299 for p in ctx.parents():
299 for p in ctx.parents():
300 parents.append(revmap[p.node()])
300 parents.append(revmap[p.node()])
301 while len(parents) < 2:
301 while len(parents) < 2:
302 parents.append(node.nullid)
302 parents.append(node.nullid)
303 return parents
303 return parents
304
304
305 # Get memfilectx for a normal file
305 # Get memfilectx for a normal file
306 def _getnormalcontext(repo, ctx, f, revmap):
306 def _getnormalcontext(repo, ctx, f, revmap):
307 try:
307 try:
308 fctx = ctx.filectx(f)
308 fctx = ctx.filectx(f)
309 except error.LookupError:
309 except error.LookupError:
310 return None
310 return None
311 renamed = fctx.renamed()
311 renamed = fctx.copysource()
312 if renamed:
313 renamed = renamed[0]
314
312
315 data = fctx.data()
313 data = fctx.data()
316 if f == '.hgtags':
314 if f == '.hgtags':
317 data = _converttags (repo.ui, revmap, data)
315 data = _converttags (repo.ui, revmap, data)
318 return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
316 return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
319 'x' in fctx.flags(), renamed)
317 'x' in fctx.flags(), renamed)
320
318
321 # Remap tag data using a revision map
319 # Remap tag data using a revision map
322 def _converttags(ui, revmap, data):
320 def _converttags(ui, revmap, data):
323 newdata = []
321 newdata = []
324 for line in data.splitlines():
322 for line in data.splitlines():
325 try:
323 try:
326 id, name = line.split(' ', 1)
324 id, name = line.split(' ', 1)
327 except ValueError:
325 except ValueError:
328 ui.warn(_('skipping incorrectly formatted tag %s\n')
326 ui.warn(_('skipping incorrectly formatted tag %s\n')
329 % line)
327 % line)
330 continue
328 continue
331 try:
329 try:
332 newid = node.bin(id)
330 newid = node.bin(id)
333 except TypeError:
331 except TypeError:
334 ui.warn(_('skipping incorrectly formatted id %s\n')
332 ui.warn(_('skipping incorrectly formatted id %s\n')
335 % id)
333 % id)
336 continue
334 continue
337 try:
335 try:
338 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
336 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
339 name))
337 name))
340 except KeyError:
338 except KeyError:
341 ui.warn(_('no mapping for id %s\n') % id)
339 ui.warn(_('no mapping for id %s\n') % id)
342 continue
340 continue
343 return ''.join(newdata)
341 return ''.join(newdata)
344
342
345 def _islfile(file, ctx, matcher, size):
343 def _islfile(file, ctx, matcher, size):
346 '''Return true if file should be considered a largefile, i.e.
344 '''Return true if file should be considered a largefile, i.e.
347 matcher matches it or it is larger than size.'''
345 matcher matches it or it is larger than size.'''
348 # never store special .hg* files as largefiles
346 # never store special .hg* files as largefiles
349 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
347 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
350 return False
348 return False
351 if matcher and matcher(file):
349 if matcher and matcher(file):
352 return True
350 return True
353 try:
351 try:
354 return ctx.filectx(file).size() >= size * 1024 * 1024
352 return ctx.filectx(file).size() >= size * 1024 * 1024
355 except error.LookupError:
353 except error.LookupError:
356 return False
354 return False
357
355
358 def uploadlfiles(ui, rsrc, rdst, files):
356 def uploadlfiles(ui, rsrc, rdst, files):
359 '''upload largefiles to the central store'''
357 '''upload largefiles to the central store'''
360
358
361 if not files:
359 if not files:
362 return
360 return
363
361
364 store = storefactory.openstore(rsrc, rdst, put=True)
362 store = storefactory.openstore(rsrc, rdst, put=True)
365
363
366 at = 0
364 at = 0
367 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
365 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
368 retval = store.exists(files)
366 retval = store.exists(files)
369 files = [h for h in files if not retval[h]]
367 files = [h for h in files if not retval[h]]
370 ui.debug("%d largefiles need to be uploaded\n" % len(files))
368 ui.debug("%d largefiles need to be uploaded\n" % len(files))
371
369
372 with ui.makeprogress(_('uploading largefiles'), unit=_('files'),
370 with ui.makeprogress(_('uploading largefiles'), unit=_('files'),
373 total=len(files)) as progress:
371 total=len(files)) as progress:
374 for hash in files:
372 for hash in files:
375 progress.update(at)
373 progress.update(at)
376 source = lfutil.findfile(rsrc, hash)
374 source = lfutil.findfile(rsrc, hash)
377 if not source:
375 if not source:
378 raise error.Abort(_('largefile %s missing from store'
376 raise error.Abort(_('largefile %s missing from store'
379 ' (needs to be uploaded)') % hash)
377 ' (needs to be uploaded)') % hash)
380 # XXX check for errors here
378 # XXX check for errors here
381 store.put(source, hash)
379 store.put(source, hash)
382 at += 1
380 at += 1
383
381
384 def verifylfiles(ui, repo, all=False, contents=False):
382 def verifylfiles(ui, repo, all=False, contents=False):
385 '''Verify that every largefile revision in the current changeset
383 '''Verify that every largefile revision in the current changeset
386 exists in the central store. With --contents, also verify that
384 exists in the central store. With --contents, also verify that
387 the contents of each local largefile file revision are correct (SHA-1 hash
385 the contents of each local largefile file revision are correct (SHA-1 hash
388 matches the revision ID). With --all, check every changeset in
386 matches the revision ID). With --all, check every changeset in
389 this repository.'''
387 this repository.'''
390 if all:
388 if all:
391 revs = repo.revs('all()')
389 revs = repo.revs('all()')
392 else:
390 else:
393 revs = ['.']
391 revs = ['.']
394
392
395 store = storefactory.openstore(repo)
393 store = storefactory.openstore(repo)
396 return store.verify(revs, contents=contents)
394 return store.verify(revs, contents=contents)
397
395
398 def cachelfiles(ui, repo, node, filelist=None):
396 def cachelfiles(ui, repo, node, filelist=None):
399 '''cachelfiles ensures that all largefiles needed by the specified revision
397 '''cachelfiles ensures that all largefiles needed by the specified revision
400 are present in the repository's largefile cache.
398 are present in the repository's largefile cache.
401
399
402 returns a tuple (cached, missing). cached is the list of files downloaded
400 returns a tuple (cached, missing). cached is the list of files downloaded
403 by this operation; missing is the list of files that were needed but could
401 by this operation; missing is the list of files that were needed but could
404 not be found.'''
402 not be found.'''
405 lfiles = lfutil.listlfiles(repo, node)
403 lfiles = lfutil.listlfiles(repo, node)
406 if filelist:
404 if filelist:
407 lfiles = set(lfiles) & set(filelist)
405 lfiles = set(lfiles) & set(filelist)
408 toget = []
406 toget = []
409
407
410 ctx = repo[node]
408 ctx = repo[node]
411 for lfile in lfiles:
409 for lfile in lfiles:
412 try:
410 try:
413 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
411 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
414 except IOError as err:
412 except IOError as err:
415 if err.errno == errno.ENOENT:
413 if err.errno == errno.ENOENT:
416 continue # node must be None and standin wasn't found in wctx
414 continue # node must be None and standin wasn't found in wctx
417 raise
415 raise
418 if not lfutil.findfile(repo, expectedhash):
416 if not lfutil.findfile(repo, expectedhash):
419 toget.append((lfile, expectedhash))
417 toget.append((lfile, expectedhash))
420
418
421 if toget:
419 if toget:
422 store = storefactory.openstore(repo)
420 store = storefactory.openstore(repo)
423 ret = store.get(toget)
421 ret = store.get(toget)
424 return ret
422 return ret
425
423
426 return ([], [])
424 return ([], [])
427
425
428 def downloadlfiles(ui, repo, rev=None):
426 def downloadlfiles(ui, repo, rev=None):
429 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
427 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
430 def prepare(ctx, fns):
428 def prepare(ctx, fns):
431 pass
429 pass
432 totalsuccess = 0
430 totalsuccess = 0
433 totalmissing = 0
431 totalmissing = 0
434 if rev != []: # walkchangerevs on empty list would return all revs
432 if rev != []: # walkchangerevs on empty list would return all revs
435 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
433 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
436 prepare):
434 prepare):
437 success, missing = cachelfiles(ui, repo, ctx.node())
435 success, missing = cachelfiles(ui, repo, ctx.node())
438 totalsuccess += len(success)
436 totalsuccess += len(success)
439 totalmissing += len(missing)
437 totalmissing += len(missing)
440 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
438 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
441 if totalmissing > 0:
439 if totalmissing > 0:
442 ui.status(_("%d largefiles failed to download\n") % totalmissing)
440 ui.status(_("%d largefiles failed to download\n") % totalmissing)
443 return totalsuccess, totalmissing
441 return totalsuccess, totalmissing
444
442
445 def updatelfiles(ui, repo, filelist=None, printmessage=None,
443 def updatelfiles(ui, repo, filelist=None, printmessage=None,
446 normallookup=False):
444 normallookup=False):
447 '''Update largefiles according to standins in the working directory
445 '''Update largefiles according to standins in the working directory
448
446
449 If ``printmessage`` is other than ``None``, it means "print (or
447 If ``printmessage`` is other than ``None``, it means "print (or
450 ignore, for false) message forcibly".
448 ignore, for false) message forcibly".
451 '''
449 '''
452 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
450 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
453 with repo.wlock():
451 with repo.wlock():
454 lfdirstate = lfutil.openlfdirstate(ui, repo)
452 lfdirstate = lfutil.openlfdirstate(ui, repo)
455 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
453 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
456
454
457 if filelist is not None:
455 if filelist is not None:
458 filelist = set(filelist)
456 filelist = set(filelist)
459 lfiles = [f for f in lfiles if f in filelist]
457 lfiles = [f for f in lfiles if f in filelist]
460
458
461 update = {}
459 update = {}
462 dropped = set()
460 dropped = set()
463 updated, removed = 0, 0
461 updated, removed = 0, 0
464 wvfs = repo.wvfs
462 wvfs = repo.wvfs
465 wctx = repo[None]
463 wctx = repo[None]
466 for lfile in lfiles:
464 for lfile in lfiles:
467 lfileorig = os.path.relpath(
465 lfileorig = os.path.relpath(
468 scmutil.backuppath(ui, repo, lfile),
466 scmutil.backuppath(ui, repo, lfile),
469 start=repo.root)
467 start=repo.root)
470 standin = lfutil.standin(lfile)
468 standin = lfutil.standin(lfile)
471 standinorig = os.path.relpath(
469 standinorig = os.path.relpath(
472 scmutil.backuppath(ui, repo, standin),
470 scmutil.backuppath(ui, repo, standin),
473 start=repo.root)
471 start=repo.root)
474 if wvfs.exists(standin):
472 if wvfs.exists(standin):
475 if (wvfs.exists(standinorig) and
473 if (wvfs.exists(standinorig) and
476 wvfs.exists(lfile)):
474 wvfs.exists(lfile)):
477 shutil.copyfile(wvfs.join(lfile),
475 shutil.copyfile(wvfs.join(lfile),
478 wvfs.join(lfileorig))
476 wvfs.join(lfileorig))
479 wvfs.unlinkpath(standinorig)
477 wvfs.unlinkpath(standinorig)
480 expecthash = lfutil.readasstandin(wctx[standin])
478 expecthash = lfutil.readasstandin(wctx[standin])
481 if expecthash != '':
479 if expecthash != '':
482 if lfile not in wctx: # not switched to normal file
480 if lfile not in wctx: # not switched to normal file
483 if repo.dirstate[standin] != '?':
481 if repo.dirstate[standin] != '?':
484 wvfs.unlinkpath(lfile, ignoremissing=True)
482 wvfs.unlinkpath(lfile, ignoremissing=True)
485 else:
483 else:
486 dropped.add(lfile)
484 dropped.add(lfile)
487
485
488 # use normallookup() to allocate an entry in largefiles
486 # use normallookup() to allocate an entry in largefiles
489 # dirstate to prevent lfilesrepo.status() from reporting
487 # dirstate to prevent lfilesrepo.status() from reporting
490 # missing files as removed.
488 # missing files as removed.
491 lfdirstate.normallookup(lfile)
489 lfdirstate.normallookup(lfile)
492 update[lfile] = expecthash
490 update[lfile] = expecthash
493 else:
491 else:
494 # Remove lfiles for which the standin is deleted, unless the
492 # Remove lfiles for which the standin is deleted, unless the
495 # lfile is added to the repository again. This happens when a
493 # lfile is added to the repository again. This happens when a
496 # largefile is converted back to a normal file: the standin
494 # largefile is converted back to a normal file: the standin
497 # disappears, but a new (normal) file appears as the lfile.
495 # disappears, but a new (normal) file appears as the lfile.
498 if (wvfs.exists(lfile) and
496 if (wvfs.exists(lfile) and
499 repo.dirstate.normalize(lfile) not in wctx):
497 repo.dirstate.normalize(lfile) not in wctx):
500 wvfs.unlinkpath(lfile)
498 wvfs.unlinkpath(lfile)
501 removed += 1
499 removed += 1
502
500
503 # largefile processing might be slow and be interrupted - be prepared
501 # largefile processing might be slow and be interrupted - be prepared
504 lfdirstate.write()
502 lfdirstate.write()
505
503
506 if lfiles:
504 if lfiles:
507 lfiles = [f for f in lfiles if f not in dropped]
505 lfiles = [f for f in lfiles if f not in dropped]
508
506
509 for f in dropped:
507 for f in dropped:
510 repo.wvfs.unlinkpath(lfutil.standin(f))
508 repo.wvfs.unlinkpath(lfutil.standin(f))
511
509
512 # This needs to happen for dropped files, otherwise they stay in
510 # This needs to happen for dropped files, otherwise they stay in
513 # the M state.
511 # the M state.
514 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
512 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
515
513
516 statuswriter(_('getting changed largefiles\n'))
514 statuswriter(_('getting changed largefiles\n'))
517 cachelfiles(ui, repo, None, lfiles)
515 cachelfiles(ui, repo, None, lfiles)
518
516
519 for lfile in lfiles:
517 for lfile in lfiles:
520 update1 = 0
518 update1 = 0
521
519
522 expecthash = update.get(lfile)
520 expecthash = update.get(lfile)
523 if expecthash:
521 if expecthash:
524 if not lfutil.copyfromcache(repo, expecthash, lfile):
522 if not lfutil.copyfromcache(repo, expecthash, lfile):
525 # failed ... but already removed and set to normallookup
523 # failed ... but already removed and set to normallookup
526 continue
524 continue
527 # Synchronize largefile dirstate to the last modified
525 # Synchronize largefile dirstate to the last modified
528 # time of the file
526 # time of the file
529 lfdirstate.normal(lfile)
527 lfdirstate.normal(lfile)
530 update1 = 1
528 update1 = 1
531
529
532 # copy the exec mode of largefile standin from the repository's
530 # copy the exec mode of largefile standin from the repository's
533 # dirstate to its state in the lfdirstate.
531 # dirstate to its state in the lfdirstate.
534 standin = lfutil.standin(lfile)
532 standin = lfutil.standin(lfile)
535 if wvfs.exists(standin):
533 if wvfs.exists(standin):
536 # exec is decided by the users permissions using mask 0o100
534 # exec is decided by the users permissions using mask 0o100
537 standinexec = wvfs.stat(standin).st_mode & 0o100
535 standinexec = wvfs.stat(standin).st_mode & 0o100
538 st = wvfs.stat(lfile)
536 st = wvfs.stat(lfile)
539 mode = st.st_mode
537 mode = st.st_mode
540 if standinexec != mode & 0o100:
538 if standinexec != mode & 0o100:
541 # first remove all X bits, then shift all R bits to X
539 # first remove all X bits, then shift all R bits to X
542 mode &= ~0o111
540 mode &= ~0o111
543 if standinexec:
541 if standinexec:
544 mode |= (mode >> 2) & 0o111 & ~util.umask
542 mode |= (mode >> 2) & 0o111 & ~util.umask
545 wvfs.chmod(lfile, mode)
543 wvfs.chmod(lfile, mode)
546 update1 = 1
544 update1 = 1
547
545
548 updated += update1
546 updated += update1
549
547
550 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
548 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
551
549
552 lfdirstate.write()
550 lfdirstate.write()
553 if lfiles:
551 if lfiles:
554 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
552 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
555 removed))
553 removed))
556
554
557 @eh.command('lfpull',
555 @eh.command('lfpull',
558 [('r', 'rev', [], _('pull largefiles for these revisions'))
556 [('r', 'rev', [], _('pull largefiles for these revisions'))
559 ] + cmdutil.remoteopts,
557 ] + cmdutil.remoteopts,
560 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
558 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
561 def lfpull(ui, repo, source="default", **opts):
559 def lfpull(ui, repo, source="default", **opts):
562 """pull largefiles for the specified revisions from the specified source
560 """pull largefiles for the specified revisions from the specified source
563
561
564 Pull largefiles that are referenced from local changesets but missing
562 Pull largefiles that are referenced from local changesets but missing
565 locally, pulling from a remote repository to the local cache.
563 locally, pulling from a remote repository to the local cache.
566
564
567 If SOURCE is omitted, the 'default' path will be used.
565 If SOURCE is omitted, the 'default' path will be used.
568 See :hg:`help urls` for more information.
566 See :hg:`help urls` for more information.
569
567
570 .. container:: verbose
568 .. container:: verbose
571
569
572 Some examples:
570 Some examples:
573
571
574 - pull largefiles for all branch heads::
572 - pull largefiles for all branch heads::
575
573
576 hg lfpull -r "head() and not closed()"
574 hg lfpull -r "head() and not closed()"
577
575
578 - pull largefiles on the default branch::
576 - pull largefiles on the default branch::
579
577
580 hg lfpull -r "branch(default)"
578 hg lfpull -r "branch(default)"
581 """
579 """
582 repo.lfpullsource = source
580 repo.lfpullsource = source
583
581
584 revs = opts.get(r'rev', [])
582 revs = opts.get(r'rev', [])
585 if not revs:
583 if not revs:
586 raise error.Abort(_('no revisions specified'))
584 raise error.Abort(_('no revisions specified'))
587 revs = scmutil.revrange(repo, revs)
585 revs = scmutil.revrange(repo, revs)
588
586
589 numcached = 0
587 numcached = 0
590 for rev in revs:
588 for rev in revs:
591 ui.note(_('pulling largefiles for revision %d\n') % rev)
589 ui.note(_('pulling largefiles for revision %d\n') % rev)
592 (cached, missing) = cachelfiles(ui, repo, rev)
590 (cached, missing) = cachelfiles(ui, repo, rev)
593 numcached += len(cached)
591 numcached += len(cached)
594 ui.status(_("%d largefiles cached\n") % numcached)
592 ui.status(_("%d largefiles cached\n") % numcached)
595
593
596 @eh.command('debuglfput',
594 @eh.command('debuglfput',
597 [] + cmdutil.remoteopts,
595 [] + cmdutil.remoteopts,
598 _('FILE'))
596 _('FILE'))
599 def debuglfput(ui, repo, filepath, **kwargs):
597 def debuglfput(ui, repo, filepath, **kwargs):
600 hash = lfutil.hashfile(filepath)
598 hash = lfutil.hashfile(filepath)
601 storefactory.openstore(repo).put(filepath, hash)
599 storefactory.openstore(repo).put(filepath, hash)
602 ui.write('%s\n' % hash)
600 ui.write('%s\n' % hash)
603 return 0
601 return 0
General Comments 0
You need to be logged in to leave comments. Login now