##// END OF EJS Templates
largefiles: explicitly set the source and sink types to 'hg' for lfconvert...
Matt Harbison -
r35171:4abfe416 default
parent child Browse files
Show More
@@ -1,578 +1,578
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 context,
21 context,
22 error,
22 error,
23 hg,
23 hg,
24 lock,
24 lock,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 registrar,
27 registrar,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 from ..convert import (
32 from ..convert import (
33 convcmd,
33 convcmd,
34 filemap,
34 filemap,
35 )
35 )
36
36
37 from . import (
37 from . import (
38 lfutil,
38 lfutil,
39 storefactory
39 storefactory
40 )
40 )
41
41
42 release = lock.release
42 release = lock.release
43
43
44 # -- Commands ----------------------------------------------------------
44 # -- Commands ----------------------------------------------------------
45
45
46 cmdtable = {}
46 cmdtable = {}
47 command = registrar.command(cmdtable)
47 command = registrar.command(cmdtable)
48
48
49 @command('lfconvert',
49 @command('lfconvert',
50 [('s', 'size', '',
50 [('s', 'size', '',
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 ('', 'to-normal', False,
52 ('', 'to-normal', False,
53 _('convert from a largefiles repo to a normal repo')),
53 _('convert from a largefiles repo to a normal repo')),
54 ],
54 ],
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 norepo=True,
56 norepo=True,
57 inferrepo=True)
57 inferrepo=True)
58 def lfconvert(ui, src, dest, *pats, **opts):
58 def lfconvert(ui, src, dest, *pats, **opts):
59 '''convert a normal repository to a largefiles repository
59 '''convert a normal repository to a largefiles repository
60
60
61 Convert repository SOURCE to a new repository DEST, identical to
61 Convert repository SOURCE to a new repository DEST, identical to
62 SOURCE except that certain files will be converted as largefiles:
62 SOURCE except that certain files will be converted as largefiles:
63 specifically, any file that matches any PATTERN *or* whose size is
63 specifically, any file that matches any PATTERN *or* whose size is
64 above the minimum size threshold is converted as a largefile. The
64 above the minimum size threshold is converted as a largefile. The
65 size used to determine whether or not to track a file as a
65 size used to determine whether or not to track a file as a
66 largefile is the size of the first version of the file. The
66 largefile is the size of the first version of the file. The
67 minimum size can be specified either with --size or in
67 minimum size can be specified either with --size or in
68 configuration as ``largefiles.size``.
68 configuration as ``largefiles.size``.
69
69
70 After running this command you will need to make sure that
70 After running this command you will need to make sure that
71 largefiles is enabled anywhere you intend to push the new
71 largefiles is enabled anywhere you intend to push the new
72 repository.
72 repository.
73
73
74 Use --to-normal to convert largefiles back to normal files; after
74 Use --to-normal to convert largefiles back to normal files; after
75 this, the DEST repository can be used without largefiles at all.'''
75 this, the DEST repository can be used without largefiles at all.'''
76
76
77 if opts['to_normal']:
77 if opts['to_normal']:
78 tolfile = False
78 tolfile = False
79 else:
79 else:
80 tolfile = True
80 tolfile = True
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82
82
83 if not hg.islocal(src):
83 if not hg.islocal(src):
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 if not hg.islocal(dest):
85 if not hg.islocal(dest):
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87
87
88 rsrc = hg.repository(ui, src)
88 rsrc = hg.repository(ui, src)
89 ui.status(_('initializing destination %s\n') % dest)
89 ui.status(_('initializing destination %s\n') % dest)
90 rdst = hg.repository(ui, dest, create=True)
90 rdst = hg.repository(ui, dest, create=True)
91
91
92 success = False
92 success = False
93 dstwlock = dstlock = None
93 dstwlock = dstlock = None
94 try:
94 try:
95 # Get a list of all changesets in the source. The easy way to do this
95 # Get a list of all changesets in the source. The easy way to do this
96 # is to simply walk the changelog, using changelog.nodesbetween().
96 # is to simply walk the changelog, using changelog.nodesbetween().
97 # Take a look at mercurial/revlog.py:639 for more details.
97 # Take a look at mercurial/revlog.py:639 for more details.
98 # Use a generator instead of a list to decrease memory usage
98 # Use a generator instead of a list to decrease memory usage
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 rsrc.heads())[0])
100 rsrc.heads())[0])
101 revmap = {node.nullid: node.nullid}
101 revmap = {node.nullid: node.nullid}
102 if tolfile:
102 if tolfile:
103 # Lock destination to prevent modification while it is converted to.
103 # Lock destination to prevent modification while it is converted to.
104 # Don't need to lock src because we are just reading from its
104 # Don't need to lock src because we are just reading from its
105 # history which can't change.
105 # history which can't change.
106 dstwlock = rdst.wlock()
106 dstwlock = rdst.wlock()
107 dstlock = rdst.lock()
107 dstlock = rdst.lock()
108
108
109 lfiles = set()
109 lfiles = set()
110 normalfiles = set()
110 normalfiles = set()
111 if not pats:
111 if not pats:
112 pats = ui.configlist(lfutil.longname, 'patterns')
112 pats = ui.configlist(lfutil.longname, 'patterns')
113 if pats:
113 if pats:
114 matcher = matchmod.match(rsrc.root, '', list(pats))
114 matcher = matchmod.match(rsrc.root, '', list(pats))
115 else:
115 else:
116 matcher = None
116 matcher = None
117
117
118 lfiletohash = {}
118 lfiletohash = {}
119 for ctx in ctxs:
119 for ctx in ctxs:
120 ui.progress(_('converting revisions'), ctx.rev(),
120 ui.progress(_('converting revisions'), ctx.rev(),
121 unit=_('revisions'), total=rsrc['tip'].rev())
121 unit=_('revisions'), total=rsrc['tip'].rev())
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
123 lfiles, normalfiles, matcher, size, lfiletohash)
123 lfiles, normalfiles, matcher, size, lfiletohash)
124 ui.progress(_('converting revisions'), None)
124 ui.progress(_('converting revisions'), None)
125
125
126 if rdst.wvfs.exists(lfutil.shortname):
126 if rdst.wvfs.exists(lfutil.shortname):
127 rdst.wvfs.rmtree(lfutil.shortname)
127 rdst.wvfs.rmtree(lfutil.shortname)
128
128
129 for f in lfiletohash.keys():
129 for f in lfiletohash.keys():
130 if rdst.wvfs.isfile(f):
130 if rdst.wvfs.isfile(f):
131 rdst.wvfs.unlink(f)
131 rdst.wvfs.unlink(f)
132 try:
132 try:
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
134 except OSError:
134 except OSError:
135 pass
135 pass
136
136
137 # If there were any files converted to largefiles, add largefiles
137 # If there were any files converted to largefiles, add largefiles
138 # to the destination repository's requirements.
138 # to the destination repository's requirements.
139 if lfiles:
139 if lfiles:
140 rdst.requirements.add('largefiles')
140 rdst.requirements.add('largefiles')
141 rdst._writerequirements()
141 rdst._writerequirements()
142 else:
142 else:
143 class lfsource(filemap.filemap_source):
143 class lfsource(filemap.filemap_source):
144 def __init__(self, ui, source):
144 def __init__(self, ui, source):
145 super(lfsource, self).__init__(ui, source, None)
145 super(lfsource, self).__init__(ui, source, None)
146 self.filemapper.rename[lfutil.shortname] = '.'
146 self.filemapper.rename[lfutil.shortname] = '.'
147
147
148 def getfile(self, name, rev):
148 def getfile(self, name, rev):
149 realname, realrev = rev
149 realname, realrev = rev
150 f = super(lfsource, self).getfile(name, rev)
150 f = super(lfsource, self).getfile(name, rev)
151
151
152 if (not realname.startswith(lfutil.shortnameslash)
152 if (not realname.startswith(lfutil.shortnameslash)
153 or f[0] is None):
153 or f[0] is None):
154 return f
154 return f
155
155
156 # Substitute in the largefile data for the hash
156 # Substitute in the largefile data for the hash
157 hash = f[0].strip()
157 hash = f[0].strip()
158 path = lfutil.findfile(rsrc, hash)
158 path = lfutil.findfile(rsrc, hash)
159
159
160 if path is None:
160 if path is None:
161 raise error.Abort(_("missing largefile for '%s' in %s")
161 raise error.Abort(_("missing largefile for '%s' in %s")
162 % (realname, realrev))
162 % (realname, realrev))
163 return util.readfile(path), f[1]
163 return util.readfile(path), f[1]
164
164
165 class converter(convcmd.converter):
165 class converter(convcmd.converter):
166 def __init__(self, ui, source, dest, revmapfile, opts):
166 def __init__(self, ui, source, dest, revmapfile, opts):
167 src = lfsource(ui, source)
167 src = lfsource(ui, source)
168
168
169 super(converter, self).__init__(ui, src, dest, revmapfile,
169 super(converter, self).__init__(ui, src, dest, revmapfile,
170 opts)
170 opts)
171
171
172 found, missing = downloadlfiles(ui, rsrc)
172 found, missing = downloadlfiles(ui, rsrc)
173 if missing != 0:
173 if missing != 0:
174 raise error.Abort(_("all largefiles must be present locally"))
174 raise error.Abort(_("all largefiles must be present locally"))
175
175
176 orig = convcmd.converter
176 orig = convcmd.converter
177 convcmd.converter = converter
177 convcmd.converter = converter
178
178
179 try:
179 try:
180 convcmd.convert(ui, src, dest)
180 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
181 finally:
181 finally:
182 convcmd.converter = orig
182 convcmd.converter = orig
183 success = True
183 success = True
184 finally:
184 finally:
185 if tolfile:
185 if tolfile:
186 rdst.dirstate.clear()
186 rdst.dirstate.clear()
187 release(dstlock, dstwlock)
187 release(dstlock, dstwlock)
188 if not success:
188 if not success:
189 # we failed, remove the new directory
189 # we failed, remove the new directory
190 shutil.rmtree(rdst.root)
190 shutil.rmtree(rdst.root)
191
191
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
193 matcher, size, lfiletohash):
193 matcher, size, lfiletohash):
194 # Convert src parents to dst parents
194 # Convert src parents to dst parents
195 parents = _convertparents(ctx, revmap)
195 parents = _convertparents(ctx, revmap)
196
196
197 # Generate list of changed files
197 # Generate list of changed files
198 files = _getchangedfiles(ctx, parents)
198 files = _getchangedfiles(ctx, parents)
199
199
200 dstfiles = []
200 dstfiles = []
201 for f in files:
201 for f in files:
202 if f not in lfiles and f not in normalfiles:
202 if f not in lfiles and f not in normalfiles:
203 islfile = _islfile(f, ctx, matcher, size)
203 islfile = _islfile(f, ctx, matcher, size)
204 # If this file was renamed or copied then copy
204 # If this file was renamed or copied then copy
205 # the largefile-ness of its predecessor
205 # the largefile-ness of its predecessor
206 if f in ctx.manifest():
206 if f in ctx.manifest():
207 fctx = ctx.filectx(f)
207 fctx = ctx.filectx(f)
208 renamed = fctx.renamed()
208 renamed = fctx.renamed()
209 renamedlfile = renamed and renamed[0] in lfiles
209 renamedlfile = renamed and renamed[0] in lfiles
210 islfile |= renamedlfile
210 islfile |= renamedlfile
211 if 'l' in fctx.flags():
211 if 'l' in fctx.flags():
212 if renamedlfile:
212 if renamedlfile:
213 raise error.Abort(
213 raise error.Abort(
214 _('renamed/copied largefile %s becomes symlink')
214 _('renamed/copied largefile %s becomes symlink')
215 % f)
215 % f)
216 islfile = False
216 islfile = False
217 if islfile:
217 if islfile:
218 lfiles.add(f)
218 lfiles.add(f)
219 else:
219 else:
220 normalfiles.add(f)
220 normalfiles.add(f)
221
221
222 if f in lfiles:
222 if f in lfiles:
223 fstandin = lfutil.standin(f)
223 fstandin = lfutil.standin(f)
224 dstfiles.append(fstandin)
224 dstfiles.append(fstandin)
225 # largefile in manifest if it has not been removed/renamed
225 # largefile in manifest if it has not been removed/renamed
226 if f in ctx.manifest():
226 if f in ctx.manifest():
227 fctx = ctx.filectx(f)
227 fctx = ctx.filectx(f)
228 if 'l' in fctx.flags():
228 if 'l' in fctx.flags():
229 renamed = fctx.renamed()
229 renamed = fctx.renamed()
230 if renamed and renamed[0] in lfiles:
230 if renamed and renamed[0] in lfiles:
231 raise error.Abort(_('largefile %s becomes symlink') % f)
231 raise error.Abort(_('largefile %s becomes symlink') % f)
232
232
233 # largefile was modified, update standins
233 # largefile was modified, update standins
234 m = hashlib.sha1('')
234 m = hashlib.sha1('')
235 m.update(ctx[f].data())
235 m.update(ctx[f].data())
236 hash = m.hexdigest()
236 hash = m.hexdigest()
237 if f not in lfiletohash or lfiletohash[f] != hash:
237 if f not in lfiletohash or lfiletohash[f] != hash:
238 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
238 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
239 executable = 'x' in ctx[f].flags()
239 executable = 'x' in ctx[f].flags()
240 lfutil.writestandin(rdst, fstandin, hash,
240 lfutil.writestandin(rdst, fstandin, hash,
241 executable)
241 executable)
242 lfiletohash[f] = hash
242 lfiletohash[f] = hash
243 else:
243 else:
244 # normal file
244 # normal file
245 dstfiles.append(f)
245 dstfiles.append(f)
246
246
247 def getfilectx(repo, memctx, f):
247 def getfilectx(repo, memctx, f):
248 srcfname = lfutil.splitstandin(f)
248 srcfname = lfutil.splitstandin(f)
249 if srcfname is not None:
249 if srcfname is not None:
250 # if the file isn't in the manifest then it was removed
250 # if the file isn't in the manifest then it was removed
251 # or renamed, return None to indicate this
251 # or renamed, return None to indicate this
252 try:
252 try:
253 fctx = ctx.filectx(srcfname)
253 fctx = ctx.filectx(srcfname)
254 except error.LookupError:
254 except error.LookupError:
255 return None
255 return None
256 renamed = fctx.renamed()
256 renamed = fctx.renamed()
257 if renamed:
257 if renamed:
258 # standin is always a largefile because largefile-ness
258 # standin is always a largefile because largefile-ness
259 # doesn't change after rename or copy
259 # doesn't change after rename or copy
260 renamed = lfutil.standin(renamed[0])
260 renamed = lfutil.standin(renamed[0])
261
261
262 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
262 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
263 'l' in fctx.flags(), 'x' in fctx.flags(),
263 'l' in fctx.flags(), 'x' in fctx.flags(),
264 renamed)
264 renamed)
265 else:
265 else:
266 return _getnormalcontext(repo, ctx, f, revmap)
266 return _getnormalcontext(repo, ctx, f, revmap)
267
267
268 # Commit
268 # Commit
269 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
269 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
270
270
271 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
271 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
272 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
272 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
273 getfilectx, ctx.user(), ctx.date(), ctx.extra())
273 getfilectx, ctx.user(), ctx.date(), ctx.extra())
274 ret = rdst.commitctx(mctx)
274 ret = rdst.commitctx(mctx)
275 lfutil.copyalltostore(rdst, ret)
275 lfutil.copyalltostore(rdst, ret)
276 rdst.setparents(ret)
276 rdst.setparents(ret)
277 revmap[ctx.node()] = rdst.changelog.tip()
277 revmap[ctx.node()] = rdst.changelog.tip()
278
278
279 # Generate list of changed files
279 # Generate list of changed files
280 def _getchangedfiles(ctx, parents):
280 def _getchangedfiles(ctx, parents):
281 files = set(ctx.files())
281 files = set(ctx.files())
282 if node.nullid not in parents:
282 if node.nullid not in parents:
283 mc = ctx.manifest()
283 mc = ctx.manifest()
284 mp1 = ctx.parents()[0].manifest()
284 mp1 = ctx.parents()[0].manifest()
285 mp2 = ctx.parents()[1].manifest()
285 mp2 = ctx.parents()[1].manifest()
286 files |= (set(mp1) | set(mp2)) - set(mc)
286 files |= (set(mp1) | set(mp2)) - set(mc)
287 for f in mc:
287 for f in mc:
288 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
288 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
289 files.add(f)
289 files.add(f)
290 return files
290 return files
291
291
292 # Convert src parents to dst parents
292 # Convert src parents to dst parents
293 def _convertparents(ctx, revmap):
293 def _convertparents(ctx, revmap):
294 parents = []
294 parents = []
295 for p in ctx.parents():
295 for p in ctx.parents():
296 parents.append(revmap[p.node()])
296 parents.append(revmap[p.node()])
297 while len(parents) < 2:
297 while len(parents) < 2:
298 parents.append(node.nullid)
298 parents.append(node.nullid)
299 return parents
299 return parents
300
300
301 # Get memfilectx for a normal file
301 # Get memfilectx for a normal file
302 def _getnormalcontext(repo, ctx, f, revmap):
302 def _getnormalcontext(repo, ctx, f, revmap):
303 try:
303 try:
304 fctx = ctx.filectx(f)
304 fctx = ctx.filectx(f)
305 except error.LookupError:
305 except error.LookupError:
306 return None
306 return None
307 renamed = fctx.renamed()
307 renamed = fctx.renamed()
308 if renamed:
308 if renamed:
309 renamed = renamed[0]
309 renamed = renamed[0]
310
310
311 data = fctx.data()
311 data = fctx.data()
312 if f == '.hgtags':
312 if f == '.hgtags':
313 data = _converttags (repo.ui, revmap, data)
313 data = _converttags (repo.ui, revmap, data)
314 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
314 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
315 'x' in fctx.flags(), renamed)
315 'x' in fctx.flags(), renamed)
316
316
317 # Remap tag data using a revision map
317 # Remap tag data using a revision map
318 def _converttags(ui, revmap, data):
318 def _converttags(ui, revmap, data):
319 newdata = []
319 newdata = []
320 for line in data.splitlines():
320 for line in data.splitlines():
321 try:
321 try:
322 id, name = line.split(' ', 1)
322 id, name = line.split(' ', 1)
323 except ValueError:
323 except ValueError:
324 ui.warn(_('skipping incorrectly formatted tag %s\n')
324 ui.warn(_('skipping incorrectly formatted tag %s\n')
325 % line)
325 % line)
326 continue
326 continue
327 try:
327 try:
328 newid = node.bin(id)
328 newid = node.bin(id)
329 except TypeError:
329 except TypeError:
330 ui.warn(_('skipping incorrectly formatted id %s\n')
330 ui.warn(_('skipping incorrectly formatted id %s\n')
331 % id)
331 % id)
332 continue
332 continue
333 try:
333 try:
334 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
334 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
335 name))
335 name))
336 except KeyError:
336 except KeyError:
337 ui.warn(_('no mapping for id %s\n') % id)
337 ui.warn(_('no mapping for id %s\n') % id)
338 continue
338 continue
339 return ''.join(newdata)
339 return ''.join(newdata)
340
340
341 def _islfile(file, ctx, matcher, size):
341 def _islfile(file, ctx, matcher, size):
342 '''Return true if file should be considered a largefile, i.e.
342 '''Return true if file should be considered a largefile, i.e.
343 matcher matches it or it is larger than size.'''
343 matcher matches it or it is larger than size.'''
344 # never store special .hg* files as largefiles
344 # never store special .hg* files as largefiles
345 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
345 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
346 return False
346 return False
347 if matcher and matcher(file):
347 if matcher and matcher(file):
348 return True
348 return True
349 try:
349 try:
350 return ctx.filectx(file).size() >= size * 1024 * 1024
350 return ctx.filectx(file).size() >= size * 1024 * 1024
351 except error.LookupError:
351 except error.LookupError:
352 return False
352 return False
353
353
354 def uploadlfiles(ui, rsrc, rdst, files):
354 def uploadlfiles(ui, rsrc, rdst, files):
355 '''upload largefiles to the central store'''
355 '''upload largefiles to the central store'''
356
356
357 if not files:
357 if not files:
358 return
358 return
359
359
360 store = storefactory.openstore(rsrc, rdst, put=True)
360 store = storefactory.openstore(rsrc, rdst, put=True)
361
361
362 at = 0
362 at = 0
363 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
363 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
364 retval = store.exists(files)
364 retval = store.exists(files)
365 files = filter(lambda h: not retval[h], files)
365 files = filter(lambda h: not retval[h], files)
366 ui.debug("%d largefiles need to be uploaded\n" % len(files))
366 ui.debug("%d largefiles need to be uploaded\n" % len(files))
367
367
368 for hash in files:
368 for hash in files:
369 ui.progress(_('uploading largefiles'), at, unit=_('files'),
369 ui.progress(_('uploading largefiles'), at, unit=_('files'),
370 total=len(files))
370 total=len(files))
371 source = lfutil.findfile(rsrc, hash)
371 source = lfutil.findfile(rsrc, hash)
372 if not source:
372 if not source:
373 raise error.Abort(_('largefile %s missing from store'
373 raise error.Abort(_('largefile %s missing from store'
374 ' (needs to be uploaded)') % hash)
374 ' (needs to be uploaded)') % hash)
375 # XXX check for errors here
375 # XXX check for errors here
376 store.put(source, hash)
376 store.put(source, hash)
377 at += 1
377 at += 1
378 ui.progress(_('uploading largefiles'), None)
378 ui.progress(_('uploading largefiles'), None)
379
379
380 def verifylfiles(ui, repo, all=False, contents=False):
380 def verifylfiles(ui, repo, all=False, contents=False):
381 '''Verify that every largefile revision in the current changeset
381 '''Verify that every largefile revision in the current changeset
382 exists in the central store. With --contents, also verify that
382 exists in the central store. With --contents, also verify that
383 the contents of each local largefile file revision are correct (SHA-1 hash
383 the contents of each local largefile file revision are correct (SHA-1 hash
384 matches the revision ID). With --all, check every changeset in
384 matches the revision ID). With --all, check every changeset in
385 this repository.'''
385 this repository.'''
386 if all:
386 if all:
387 revs = repo.revs('all()')
387 revs = repo.revs('all()')
388 else:
388 else:
389 revs = ['.']
389 revs = ['.']
390
390
391 store = storefactory.openstore(repo)
391 store = storefactory.openstore(repo)
392 return store.verify(revs, contents=contents)
392 return store.verify(revs, contents=contents)
393
393
394 def cachelfiles(ui, repo, node, filelist=None):
394 def cachelfiles(ui, repo, node, filelist=None):
395 '''cachelfiles ensures that all largefiles needed by the specified revision
395 '''cachelfiles ensures that all largefiles needed by the specified revision
396 are present in the repository's largefile cache.
396 are present in the repository's largefile cache.
397
397
398 returns a tuple (cached, missing). cached is the list of files downloaded
398 returns a tuple (cached, missing). cached is the list of files downloaded
399 by this operation; missing is the list of files that were needed but could
399 by this operation; missing is the list of files that were needed but could
400 not be found.'''
400 not be found.'''
401 lfiles = lfutil.listlfiles(repo, node)
401 lfiles = lfutil.listlfiles(repo, node)
402 if filelist:
402 if filelist:
403 lfiles = set(lfiles) & set(filelist)
403 lfiles = set(lfiles) & set(filelist)
404 toget = []
404 toget = []
405
405
406 ctx = repo[node]
406 ctx = repo[node]
407 for lfile in lfiles:
407 for lfile in lfiles:
408 try:
408 try:
409 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
409 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
410 except IOError as err:
410 except IOError as err:
411 if err.errno == errno.ENOENT:
411 if err.errno == errno.ENOENT:
412 continue # node must be None and standin wasn't found in wctx
412 continue # node must be None and standin wasn't found in wctx
413 raise
413 raise
414 if not lfutil.findfile(repo, expectedhash):
414 if not lfutil.findfile(repo, expectedhash):
415 toget.append((lfile, expectedhash))
415 toget.append((lfile, expectedhash))
416
416
417 if toget:
417 if toget:
418 store = storefactory.openstore(repo)
418 store = storefactory.openstore(repo)
419 ret = store.get(toget)
419 ret = store.get(toget)
420 return ret
420 return ret
421
421
422 return ([], [])
422 return ([], [])
423
423
424 def downloadlfiles(ui, repo, rev=None):
424 def downloadlfiles(ui, repo, rev=None):
425 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
425 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
426 def prepare(ctx, fns):
426 def prepare(ctx, fns):
427 pass
427 pass
428 totalsuccess = 0
428 totalsuccess = 0
429 totalmissing = 0
429 totalmissing = 0
430 if rev != []: # walkchangerevs on empty list would return all revs
430 if rev != []: # walkchangerevs on empty list would return all revs
431 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
431 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
432 prepare):
432 prepare):
433 success, missing = cachelfiles(ui, repo, ctx.node())
433 success, missing = cachelfiles(ui, repo, ctx.node())
434 totalsuccess += len(success)
434 totalsuccess += len(success)
435 totalmissing += len(missing)
435 totalmissing += len(missing)
436 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
436 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
437 if totalmissing > 0:
437 if totalmissing > 0:
438 ui.status(_("%d largefiles failed to download\n") % totalmissing)
438 ui.status(_("%d largefiles failed to download\n") % totalmissing)
439 return totalsuccess, totalmissing
439 return totalsuccess, totalmissing
440
440
441 def updatelfiles(ui, repo, filelist=None, printmessage=None,
441 def updatelfiles(ui, repo, filelist=None, printmessage=None,
442 normallookup=False):
442 normallookup=False):
443 '''Update largefiles according to standins in the working directory
443 '''Update largefiles according to standins in the working directory
444
444
445 If ``printmessage`` is other than ``None``, it means "print (or
445 If ``printmessage`` is other than ``None``, it means "print (or
446 ignore, for false) message forcibly".
446 ignore, for false) message forcibly".
447 '''
447 '''
448 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
448 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
449 with repo.wlock():
449 with repo.wlock():
450 lfdirstate = lfutil.openlfdirstate(ui, repo)
450 lfdirstate = lfutil.openlfdirstate(ui, repo)
451 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
451 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
452
452
453 if filelist is not None:
453 if filelist is not None:
454 filelist = set(filelist)
454 filelist = set(filelist)
455 lfiles = [f for f in lfiles if f in filelist]
455 lfiles = [f for f in lfiles if f in filelist]
456
456
457 update = {}
457 update = {}
458 updated, removed = 0, 0
458 updated, removed = 0, 0
459 wvfs = repo.wvfs
459 wvfs = repo.wvfs
460 wctx = repo[None]
460 wctx = repo[None]
461 for lfile in lfiles:
461 for lfile in lfiles:
462 rellfile = lfile
462 rellfile = lfile
463 rellfileorig = os.path.relpath(
463 rellfileorig = os.path.relpath(
464 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
464 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
465 start=repo.root)
465 start=repo.root)
466 relstandin = lfutil.standin(lfile)
466 relstandin = lfutil.standin(lfile)
467 relstandinorig = os.path.relpath(
467 relstandinorig = os.path.relpath(
468 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
468 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
469 start=repo.root)
469 start=repo.root)
470 if wvfs.exists(relstandin):
470 if wvfs.exists(relstandin):
471 if (wvfs.exists(relstandinorig) and
471 if (wvfs.exists(relstandinorig) and
472 wvfs.exists(rellfile)):
472 wvfs.exists(rellfile)):
473 shutil.copyfile(wvfs.join(rellfile),
473 shutil.copyfile(wvfs.join(rellfile),
474 wvfs.join(rellfileorig))
474 wvfs.join(rellfileorig))
475 wvfs.unlinkpath(relstandinorig)
475 wvfs.unlinkpath(relstandinorig)
476 expecthash = lfutil.readasstandin(wctx[relstandin])
476 expecthash = lfutil.readasstandin(wctx[relstandin])
477 if expecthash != '':
477 if expecthash != '':
478 if lfile not in wctx: # not switched to normal file
478 if lfile not in wctx: # not switched to normal file
479 wvfs.unlinkpath(rellfile, ignoremissing=True)
479 wvfs.unlinkpath(rellfile, ignoremissing=True)
480 # use normallookup() to allocate an entry in largefiles
480 # use normallookup() to allocate an entry in largefiles
481 # dirstate to prevent lfilesrepo.status() from reporting
481 # dirstate to prevent lfilesrepo.status() from reporting
482 # missing files as removed.
482 # missing files as removed.
483 lfdirstate.normallookup(lfile)
483 lfdirstate.normallookup(lfile)
484 update[lfile] = expecthash
484 update[lfile] = expecthash
485 else:
485 else:
486 # Remove lfiles for which the standin is deleted, unless the
486 # Remove lfiles for which the standin is deleted, unless the
487 # lfile is added to the repository again. This happens when a
487 # lfile is added to the repository again. This happens when a
488 # largefile is converted back to a normal file: the standin
488 # largefile is converted back to a normal file: the standin
489 # disappears, but a new (normal) file appears as the lfile.
489 # disappears, but a new (normal) file appears as the lfile.
490 if (wvfs.exists(rellfile) and
490 if (wvfs.exists(rellfile) and
491 repo.dirstate.normalize(lfile) not in wctx):
491 repo.dirstate.normalize(lfile) not in wctx):
492 wvfs.unlinkpath(rellfile)
492 wvfs.unlinkpath(rellfile)
493 removed += 1
493 removed += 1
494
494
495 # largefile processing might be slow and be interrupted - be prepared
495 # largefile processing might be slow and be interrupted - be prepared
496 lfdirstate.write()
496 lfdirstate.write()
497
497
498 if lfiles:
498 if lfiles:
499 statuswriter(_('getting changed largefiles\n'))
499 statuswriter(_('getting changed largefiles\n'))
500 cachelfiles(ui, repo, None, lfiles)
500 cachelfiles(ui, repo, None, lfiles)
501
501
502 for lfile in lfiles:
502 for lfile in lfiles:
503 update1 = 0
503 update1 = 0
504
504
505 expecthash = update.get(lfile)
505 expecthash = update.get(lfile)
506 if expecthash:
506 if expecthash:
507 if not lfutil.copyfromcache(repo, expecthash, lfile):
507 if not lfutil.copyfromcache(repo, expecthash, lfile):
508 # failed ... but already removed and set to normallookup
508 # failed ... but already removed and set to normallookup
509 continue
509 continue
510 # Synchronize largefile dirstate to the last modified
510 # Synchronize largefile dirstate to the last modified
511 # time of the file
511 # time of the file
512 lfdirstate.normal(lfile)
512 lfdirstate.normal(lfile)
513 update1 = 1
513 update1 = 1
514
514
515 # copy the exec mode of largefile standin from the repository's
515 # copy the exec mode of largefile standin from the repository's
516 # dirstate to its state in the lfdirstate.
516 # dirstate to its state in the lfdirstate.
517 rellfile = lfile
517 rellfile = lfile
518 relstandin = lfutil.standin(lfile)
518 relstandin = lfutil.standin(lfile)
519 if wvfs.exists(relstandin):
519 if wvfs.exists(relstandin):
520 # exec is decided by the users permissions using mask 0o100
520 # exec is decided by the users permissions using mask 0o100
521 standinexec = wvfs.stat(relstandin).st_mode & 0o100
521 standinexec = wvfs.stat(relstandin).st_mode & 0o100
522 st = wvfs.stat(rellfile)
522 st = wvfs.stat(rellfile)
523 mode = st.st_mode
523 mode = st.st_mode
524 if standinexec != mode & 0o100:
524 if standinexec != mode & 0o100:
525 # first remove all X bits, then shift all R bits to X
525 # first remove all X bits, then shift all R bits to X
526 mode &= ~0o111
526 mode &= ~0o111
527 if standinexec:
527 if standinexec:
528 mode |= (mode >> 2) & 0o111 & ~util.umask
528 mode |= (mode >> 2) & 0o111 & ~util.umask
529 wvfs.chmod(rellfile, mode)
529 wvfs.chmod(rellfile, mode)
530 update1 = 1
530 update1 = 1
531
531
532 updated += update1
532 updated += update1
533
533
534 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
534 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
535
535
536 lfdirstate.write()
536 lfdirstate.write()
537 if lfiles:
537 if lfiles:
538 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
538 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
539 removed))
539 removed))
540
540
541 @command('lfpull',
541 @command('lfpull',
542 [('r', 'rev', [], _('pull largefiles for these revisions'))
542 [('r', 'rev', [], _('pull largefiles for these revisions'))
543 ] + cmdutil.remoteopts,
543 ] + cmdutil.remoteopts,
544 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
544 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
545 def lfpull(ui, repo, source="default", **opts):
545 def lfpull(ui, repo, source="default", **opts):
546 """pull largefiles for the specified revisions from the specified source
546 """pull largefiles for the specified revisions from the specified source
547
547
548 Pull largefiles that are referenced from local changesets but missing
548 Pull largefiles that are referenced from local changesets but missing
549 locally, pulling from a remote repository to the local cache.
549 locally, pulling from a remote repository to the local cache.
550
550
551 If SOURCE is omitted, the 'default' path will be used.
551 If SOURCE is omitted, the 'default' path will be used.
552 See :hg:`help urls` for more information.
552 See :hg:`help urls` for more information.
553
553
554 .. container:: verbose
554 .. container:: verbose
555
555
556 Some examples:
556 Some examples:
557
557
558 - pull largefiles for all branch heads::
558 - pull largefiles for all branch heads::
559
559
560 hg lfpull -r "head() and not closed()"
560 hg lfpull -r "head() and not closed()"
561
561
562 - pull largefiles on the default branch::
562 - pull largefiles on the default branch::
563
563
564 hg lfpull -r "branch(default)"
564 hg lfpull -r "branch(default)"
565 """
565 """
566 repo.lfpullsource = source
566 repo.lfpullsource = source
567
567
568 revs = opts.get('rev', [])
568 revs = opts.get('rev', [])
569 if not revs:
569 if not revs:
570 raise error.Abort(_('no revisions specified'))
570 raise error.Abort(_('no revisions specified'))
571 revs = scmutil.revrange(repo, revs)
571 revs = scmutil.revrange(repo, revs)
572
572
573 numcached = 0
573 numcached = 0
574 for rev in revs:
574 for rev in revs:
575 ui.note(_('pulling largefiles for revision %s\n') % rev)
575 ui.note(_('pulling largefiles for revision %s\n') % rev)
576 (cached, missing) = cachelfiles(ui, repo, rev)
576 (cached, missing) = cachelfiles(ui, repo, rev)
577 numcached += len(cached)
577 numcached += len(cached)
578 ui.status(_("%d largefiles cached\n") % numcached)
578 ui.status(_("%d largefiles cached\n") % numcached)
General Comments 0
You need to be logged in to leave comments. Login now