##// END OF EJS Templates
largefiles: add a 'debuglfput' command to put largefile into the store...
Boris Feld -
r35579:4aa6ed59 default
parent child Browse files
Show More
@@ -1,595 +1,604 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 context,
21 context,
22 error,
22 error,
23 hg,
23 hg,
24 lock,
24 lock,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 registrar,
28 registrar,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from ..convert import (
33 from ..convert import (
34 convcmd,
34 convcmd,
35 filemap,
35 filemap,
36 )
36 )
37
37
38 from . import (
38 from . import (
39 lfutil,
39 lfutil,
40 storefactory
40 storefactory
41 )
41 )
42
42
43 release = lock.release
43 release = lock.release
44
44
45 # -- Commands ----------------------------------------------------------
45 # -- Commands ----------------------------------------------------------
46
46
47 cmdtable = {}
47 cmdtable = {}
48 command = registrar.command(cmdtable)
48 command = registrar.command(cmdtable)
49
49
50 @command('lfconvert',
50 @command('lfconvert',
51 [('s', 'size', '',
51 [('s', 'size', '',
52 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
53 ('', 'to-normal', False,
53 ('', 'to-normal', False,
54 _('convert from a largefiles repo to a normal repo')),
54 _('convert from a largefiles repo to a normal repo')),
55 ],
55 ],
56 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 _('hg lfconvert SOURCE DEST [FILE ...]'),
57 norepo=True,
57 norepo=True,
58 inferrepo=True)
58 inferrepo=True)
59 def lfconvert(ui, src, dest, *pats, **opts):
59 def lfconvert(ui, src, dest, *pats, **opts):
60 '''convert a normal repository to a largefiles repository
60 '''convert a normal repository to a largefiles repository
61
61
62 Convert repository SOURCE to a new repository DEST, identical to
62 Convert repository SOURCE to a new repository DEST, identical to
63 SOURCE except that certain files will be converted as largefiles:
63 SOURCE except that certain files will be converted as largefiles:
64 specifically, any file that matches any PATTERN *or* whose size is
64 specifically, any file that matches any PATTERN *or* whose size is
65 above the minimum size threshold is converted as a largefile. The
65 above the minimum size threshold is converted as a largefile. The
66 size used to determine whether or not to track a file as a
66 size used to determine whether or not to track a file as a
67 largefile is the size of the first version of the file. The
67 largefile is the size of the first version of the file. The
68 minimum size can be specified either with --size or in
68 minimum size can be specified either with --size or in
69 configuration as ``largefiles.size``.
69 configuration as ``largefiles.size``.
70
70
71 After running this command you will need to make sure that
71 After running this command you will need to make sure that
72 largefiles is enabled anywhere you intend to push the new
72 largefiles is enabled anywhere you intend to push the new
73 repository.
73 repository.
74
74
75 Use --to-normal to convert largefiles back to normal files; after
75 Use --to-normal to convert largefiles back to normal files; after
76 this, the DEST repository can be used without largefiles at all.'''
76 this, the DEST repository can be used without largefiles at all.'''
77
77
78 opts = pycompat.byteskwargs(opts)
78 opts = pycompat.byteskwargs(opts)
79 if opts['to_normal']:
79 if opts['to_normal']:
80 tolfile = False
80 tolfile = False
81 else:
81 else:
82 tolfile = True
82 tolfile = True
83 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
83 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
84
84
85 if not hg.islocal(src):
85 if not hg.islocal(src):
86 raise error.Abort(_('%s is not a local Mercurial repo') % src)
86 raise error.Abort(_('%s is not a local Mercurial repo') % src)
87 if not hg.islocal(dest):
87 if not hg.islocal(dest):
88 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
88 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
89
89
90 rsrc = hg.repository(ui, src)
90 rsrc = hg.repository(ui, src)
91 ui.status(_('initializing destination %s\n') % dest)
91 ui.status(_('initializing destination %s\n') % dest)
92 rdst = hg.repository(ui, dest, create=True)
92 rdst = hg.repository(ui, dest, create=True)
93
93
94 success = False
94 success = False
95 dstwlock = dstlock = None
95 dstwlock = dstlock = None
96 try:
96 try:
97 # Get a list of all changesets in the source. The easy way to do this
97 # Get a list of all changesets in the source. The easy way to do this
98 # is to simply walk the changelog, using changelog.nodesbetween().
98 # is to simply walk the changelog, using changelog.nodesbetween().
99 # Take a look at mercurial/revlog.py:639 for more details.
99 # Take a look at mercurial/revlog.py:639 for more details.
100 # Use a generator instead of a list to decrease memory usage
100 # Use a generator instead of a list to decrease memory usage
101 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
101 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
102 rsrc.heads())[0])
102 rsrc.heads())[0])
103 revmap = {node.nullid: node.nullid}
103 revmap = {node.nullid: node.nullid}
104 if tolfile:
104 if tolfile:
105 # Lock destination to prevent modification while it is converted to.
105 # Lock destination to prevent modification while it is converted to.
106 # Don't need to lock src because we are just reading from its
106 # Don't need to lock src because we are just reading from its
107 # history which can't change.
107 # history which can't change.
108 dstwlock = rdst.wlock()
108 dstwlock = rdst.wlock()
109 dstlock = rdst.lock()
109 dstlock = rdst.lock()
110
110
111 lfiles = set()
111 lfiles = set()
112 normalfiles = set()
112 normalfiles = set()
113 if not pats:
113 if not pats:
114 pats = ui.configlist(lfutil.longname, 'patterns')
114 pats = ui.configlist(lfutil.longname, 'patterns')
115 if pats:
115 if pats:
116 matcher = matchmod.match(rsrc.root, '', list(pats))
116 matcher = matchmod.match(rsrc.root, '', list(pats))
117 else:
117 else:
118 matcher = None
118 matcher = None
119
119
120 lfiletohash = {}
120 lfiletohash = {}
121 for ctx in ctxs:
121 for ctx in ctxs:
122 ui.progress(_('converting revisions'), ctx.rev(),
122 ui.progress(_('converting revisions'), ctx.rev(),
123 unit=_('revisions'), total=rsrc['tip'].rev())
123 unit=_('revisions'), total=rsrc['tip'].rev())
124 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
124 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
125 lfiles, normalfiles, matcher, size, lfiletohash)
125 lfiles, normalfiles, matcher, size, lfiletohash)
126 ui.progress(_('converting revisions'), None)
126 ui.progress(_('converting revisions'), None)
127
127
128 if rdst.wvfs.exists(lfutil.shortname):
128 if rdst.wvfs.exists(lfutil.shortname):
129 rdst.wvfs.rmtree(lfutil.shortname)
129 rdst.wvfs.rmtree(lfutil.shortname)
130
130
131 for f in lfiletohash.keys():
131 for f in lfiletohash.keys():
132 if rdst.wvfs.isfile(f):
132 if rdst.wvfs.isfile(f):
133 rdst.wvfs.unlink(f)
133 rdst.wvfs.unlink(f)
134 try:
134 try:
135 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
135 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
136 except OSError:
136 except OSError:
137 pass
137 pass
138
138
139 # If there were any files converted to largefiles, add largefiles
139 # If there were any files converted to largefiles, add largefiles
140 # to the destination repository's requirements.
140 # to the destination repository's requirements.
141 if lfiles:
141 if lfiles:
142 rdst.requirements.add('largefiles')
142 rdst.requirements.add('largefiles')
143 rdst._writerequirements()
143 rdst._writerequirements()
144 else:
144 else:
145 class lfsource(filemap.filemap_source):
145 class lfsource(filemap.filemap_source):
146 def __init__(self, ui, source):
146 def __init__(self, ui, source):
147 super(lfsource, self).__init__(ui, source, None)
147 super(lfsource, self).__init__(ui, source, None)
148 self.filemapper.rename[lfutil.shortname] = '.'
148 self.filemapper.rename[lfutil.shortname] = '.'
149
149
150 def getfile(self, name, rev):
150 def getfile(self, name, rev):
151 realname, realrev = rev
151 realname, realrev = rev
152 f = super(lfsource, self).getfile(name, rev)
152 f = super(lfsource, self).getfile(name, rev)
153
153
154 if (not realname.startswith(lfutil.shortnameslash)
154 if (not realname.startswith(lfutil.shortnameslash)
155 or f[0] is None):
155 or f[0] is None):
156 return f
156 return f
157
157
158 # Substitute in the largefile data for the hash
158 # Substitute in the largefile data for the hash
159 hash = f[0].strip()
159 hash = f[0].strip()
160 path = lfutil.findfile(rsrc, hash)
160 path = lfutil.findfile(rsrc, hash)
161
161
162 if path is None:
162 if path is None:
163 raise error.Abort(_("missing largefile for '%s' in %s")
163 raise error.Abort(_("missing largefile for '%s' in %s")
164 % (realname, realrev))
164 % (realname, realrev))
165 return util.readfile(path), f[1]
165 return util.readfile(path), f[1]
166
166
167 class converter(convcmd.converter):
167 class converter(convcmd.converter):
168 def __init__(self, ui, source, dest, revmapfile, opts):
168 def __init__(self, ui, source, dest, revmapfile, opts):
169 src = lfsource(ui, source)
169 src = lfsource(ui, source)
170
170
171 super(converter, self).__init__(ui, src, dest, revmapfile,
171 super(converter, self).__init__(ui, src, dest, revmapfile,
172 opts)
172 opts)
173
173
174 found, missing = downloadlfiles(ui, rsrc)
174 found, missing = downloadlfiles(ui, rsrc)
175 if missing != 0:
175 if missing != 0:
176 raise error.Abort(_("all largefiles must be present locally"))
176 raise error.Abort(_("all largefiles must be present locally"))
177
177
178 orig = convcmd.converter
178 orig = convcmd.converter
179 convcmd.converter = converter
179 convcmd.converter = converter
180
180
181 try:
181 try:
182 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
182 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
183 finally:
183 finally:
184 convcmd.converter = orig
184 convcmd.converter = orig
185 success = True
185 success = True
186 finally:
186 finally:
187 if tolfile:
187 if tolfile:
188 rdst.dirstate.clear()
188 rdst.dirstate.clear()
189 release(dstlock, dstwlock)
189 release(dstlock, dstwlock)
190 if not success:
190 if not success:
191 # we failed, remove the new directory
191 # we failed, remove the new directory
192 shutil.rmtree(rdst.root)
192 shutil.rmtree(rdst.root)
193
193
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
195 matcher, size, lfiletohash):
195 matcher, size, lfiletohash):
196 # Convert src parents to dst parents
196 # Convert src parents to dst parents
197 parents = _convertparents(ctx, revmap)
197 parents = _convertparents(ctx, revmap)
198
198
199 # Generate list of changed files
199 # Generate list of changed files
200 files = _getchangedfiles(ctx, parents)
200 files = _getchangedfiles(ctx, parents)
201
201
202 dstfiles = []
202 dstfiles = []
203 for f in files:
203 for f in files:
204 if f not in lfiles and f not in normalfiles:
204 if f not in lfiles and f not in normalfiles:
205 islfile = _islfile(f, ctx, matcher, size)
205 islfile = _islfile(f, ctx, matcher, size)
206 # If this file was renamed or copied then copy
206 # If this file was renamed or copied then copy
207 # the largefile-ness of its predecessor
207 # the largefile-ness of its predecessor
208 if f in ctx.manifest():
208 if f in ctx.manifest():
209 fctx = ctx.filectx(f)
209 fctx = ctx.filectx(f)
210 renamed = fctx.renamed()
210 renamed = fctx.renamed()
211 renamedlfile = renamed and renamed[0] in lfiles
211 renamedlfile = renamed and renamed[0] in lfiles
212 islfile |= renamedlfile
212 islfile |= renamedlfile
213 if 'l' in fctx.flags():
213 if 'l' in fctx.flags():
214 if renamedlfile:
214 if renamedlfile:
215 raise error.Abort(
215 raise error.Abort(
216 _('renamed/copied largefile %s becomes symlink')
216 _('renamed/copied largefile %s becomes symlink')
217 % f)
217 % f)
218 islfile = False
218 islfile = False
219 if islfile:
219 if islfile:
220 lfiles.add(f)
220 lfiles.add(f)
221 else:
221 else:
222 normalfiles.add(f)
222 normalfiles.add(f)
223
223
224 if f in lfiles:
224 if f in lfiles:
225 fstandin = lfutil.standin(f)
225 fstandin = lfutil.standin(f)
226 dstfiles.append(fstandin)
226 dstfiles.append(fstandin)
227 # largefile in manifest if it has not been removed/renamed
227 # largefile in manifest if it has not been removed/renamed
228 if f in ctx.manifest():
228 if f in ctx.manifest():
229 fctx = ctx.filectx(f)
229 fctx = ctx.filectx(f)
230 if 'l' in fctx.flags():
230 if 'l' in fctx.flags():
231 renamed = fctx.renamed()
231 renamed = fctx.renamed()
232 if renamed and renamed[0] in lfiles:
232 if renamed and renamed[0] in lfiles:
233 raise error.Abort(_('largefile %s becomes symlink') % f)
233 raise error.Abort(_('largefile %s becomes symlink') % f)
234
234
235 # largefile was modified, update standins
235 # largefile was modified, update standins
236 m = hashlib.sha1('')
236 m = hashlib.sha1('')
237 m.update(ctx[f].data())
237 m.update(ctx[f].data())
238 hash = m.hexdigest()
238 hash = m.hexdigest()
239 if f not in lfiletohash or lfiletohash[f] != hash:
239 if f not in lfiletohash or lfiletohash[f] != hash:
240 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
240 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
241 executable = 'x' in ctx[f].flags()
241 executable = 'x' in ctx[f].flags()
242 lfutil.writestandin(rdst, fstandin, hash,
242 lfutil.writestandin(rdst, fstandin, hash,
243 executable)
243 executable)
244 lfiletohash[f] = hash
244 lfiletohash[f] = hash
245 else:
245 else:
246 # normal file
246 # normal file
247 dstfiles.append(f)
247 dstfiles.append(f)
248
248
249 def getfilectx(repo, memctx, f):
249 def getfilectx(repo, memctx, f):
250 srcfname = lfutil.splitstandin(f)
250 srcfname = lfutil.splitstandin(f)
251 if srcfname is not None:
251 if srcfname is not None:
252 # if the file isn't in the manifest then it was removed
252 # if the file isn't in the manifest then it was removed
253 # or renamed, return None to indicate this
253 # or renamed, return None to indicate this
254 try:
254 try:
255 fctx = ctx.filectx(srcfname)
255 fctx = ctx.filectx(srcfname)
256 except error.LookupError:
256 except error.LookupError:
257 return None
257 return None
258 renamed = fctx.renamed()
258 renamed = fctx.renamed()
259 if renamed:
259 if renamed:
260 # standin is always a largefile because largefile-ness
260 # standin is always a largefile because largefile-ness
261 # doesn't change after rename or copy
261 # doesn't change after rename or copy
262 renamed = lfutil.standin(renamed[0])
262 renamed = lfutil.standin(renamed[0])
263
263
264 return context.memfilectx(repo, memctx, f,
264 return context.memfilectx(repo, memctx, f,
265 lfiletohash[srcfname] + '\n',
265 lfiletohash[srcfname] + '\n',
266 'l' in fctx.flags(), 'x' in fctx.flags(),
266 'l' in fctx.flags(), 'x' in fctx.flags(),
267 renamed)
267 renamed)
268 else:
268 else:
269 return _getnormalcontext(repo, ctx, f, revmap)
269 return _getnormalcontext(repo, ctx, f, revmap)
270
270
271 # Commit
271 # Commit
272 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
272 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
273
273
274 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
274 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
275 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
275 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
276 getfilectx, ctx.user(), ctx.date(), ctx.extra())
276 getfilectx, ctx.user(), ctx.date(), ctx.extra())
277 ret = rdst.commitctx(mctx)
277 ret = rdst.commitctx(mctx)
278 lfutil.copyalltostore(rdst, ret)
278 lfutil.copyalltostore(rdst, ret)
279 rdst.setparents(ret)
279 rdst.setparents(ret)
280 revmap[ctx.node()] = rdst.changelog.tip()
280 revmap[ctx.node()] = rdst.changelog.tip()
281
281
282 # Generate list of changed files
282 # Generate list of changed files
283 def _getchangedfiles(ctx, parents):
283 def _getchangedfiles(ctx, parents):
284 files = set(ctx.files())
284 files = set(ctx.files())
285 if node.nullid not in parents:
285 if node.nullid not in parents:
286 mc = ctx.manifest()
286 mc = ctx.manifest()
287 mp1 = ctx.parents()[0].manifest()
287 mp1 = ctx.parents()[0].manifest()
288 mp2 = ctx.parents()[1].manifest()
288 mp2 = ctx.parents()[1].manifest()
289 files |= (set(mp1) | set(mp2)) - set(mc)
289 files |= (set(mp1) | set(mp2)) - set(mc)
290 for f in mc:
290 for f in mc:
291 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
291 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
292 files.add(f)
292 files.add(f)
293 return files
293 return files
294
294
295 # Convert src parents to dst parents
295 # Convert src parents to dst parents
296 def _convertparents(ctx, revmap):
296 def _convertparents(ctx, revmap):
297 parents = []
297 parents = []
298 for p in ctx.parents():
298 for p in ctx.parents():
299 parents.append(revmap[p.node()])
299 parents.append(revmap[p.node()])
300 while len(parents) < 2:
300 while len(parents) < 2:
301 parents.append(node.nullid)
301 parents.append(node.nullid)
302 return parents
302 return parents
303
303
304 # Get memfilectx for a normal file
304 # Get memfilectx for a normal file
305 def _getnormalcontext(repo, ctx, f, revmap):
305 def _getnormalcontext(repo, ctx, f, revmap):
306 try:
306 try:
307 fctx = ctx.filectx(f)
307 fctx = ctx.filectx(f)
308 except error.LookupError:
308 except error.LookupError:
309 return None
309 return None
310 renamed = fctx.renamed()
310 renamed = fctx.renamed()
311 if renamed:
311 if renamed:
312 renamed = renamed[0]
312 renamed = renamed[0]
313
313
314 data = fctx.data()
314 data = fctx.data()
315 if f == '.hgtags':
315 if f == '.hgtags':
316 data = _converttags (repo.ui, revmap, data)
316 data = _converttags (repo.ui, revmap, data)
317 return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
317 return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
318 'x' in fctx.flags(), renamed)
318 'x' in fctx.flags(), renamed)
319
319
320 # Remap tag data using a revision map
320 # Remap tag data using a revision map
321 def _converttags(ui, revmap, data):
321 def _converttags(ui, revmap, data):
322 newdata = []
322 newdata = []
323 for line in data.splitlines():
323 for line in data.splitlines():
324 try:
324 try:
325 id, name = line.split(' ', 1)
325 id, name = line.split(' ', 1)
326 except ValueError:
326 except ValueError:
327 ui.warn(_('skipping incorrectly formatted tag %s\n')
327 ui.warn(_('skipping incorrectly formatted tag %s\n')
328 % line)
328 % line)
329 continue
329 continue
330 try:
330 try:
331 newid = node.bin(id)
331 newid = node.bin(id)
332 except TypeError:
332 except TypeError:
333 ui.warn(_('skipping incorrectly formatted id %s\n')
333 ui.warn(_('skipping incorrectly formatted id %s\n')
334 % id)
334 % id)
335 continue
335 continue
336 try:
336 try:
337 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
337 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
338 name))
338 name))
339 except KeyError:
339 except KeyError:
340 ui.warn(_('no mapping for id %s\n') % id)
340 ui.warn(_('no mapping for id %s\n') % id)
341 continue
341 continue
342 return ''.join(newdata)
342 return ''.join(newdata)
343
343
344 def _islfile(file, ctx, matcher, size):
344 def _islfile(file, ctx, matcher, size):
345 '''Return true if file should be considered a largefile, i.e.
345 '''Return true if file should be considered a largefile, i.e.
346 matcher matches it or it is larger than size.'''
346 matcher matches it or it is larger than size.'''
347 # never store special .hg* files as largefiles
347 # never store special .hg* files as largefiles
348 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
348 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
349 return False
349 return False
350 if matcher and matcher(file):
350 if matcher and matcher(file):
351 return True
351 return True
352 try:
352 try:
353 return ctx.filectx(file).size() >= size * 1024 * 1024
353 return ctx.filectx(file).size() >= size * 1024 * 1024
354 except error.LookupError:
354 except error.LookupError:
355 return False
355 return False
356
356
357 def uploadlfiles(ui, rsrc, rdst, files):
357 def uploadlfiles(ui, rsrc, rdst, files):
358 '''upload largefiles to the central store'''
358 '''upload largefiles to the central store'''
359
359
360 if not files:
360 if not files:
361 return
361 return
362
362
363 store = storefactory.openstore(rsrc, rdst, put=True)
363 store = storefactory.openstore(rsrc, rdst, put=True)
364
364
365 at = 0
365 at = 0
366 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
366 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
367 retval = store.exists(files)
367 retval = store.exists(files)
368 files = filter(lambda h: not retval[h], files)
368 files = filter(lambda h: not retval[h], files)
369 ui.debug("%d largefiles need to be uploaded\n" % len(files))
369 ui.debug("%d largefiles need to be uploaded\n" % len(files))
370
370
371 for hash in files:
371 for hash in files:
372 ui.progress(_('uploading largefiles'), at, unit=_('files'),
372 ui.progress(_('uploading largefiles'), at, unit=_('files'),
373 total=len(files))
373 total=len(files))
374 source = lfutil.findfile(rsrc, hash)
374 source = lfutil.findfile(rsrc, hash)
375 if not source:
375 if not source:
376 raise error.Abort(_('largefile %s missing from store'
376 raise error.Abort(_('largefile %s missing from store'
377 ' (needs to be uploaded)') % hash)
377 ' (needs to be uploaded)') % hash)
378 # XXX check for errors here
378 # XXX check for errors here
379 store.put(source, hash)
379 store.put(source, hash)
380 at += 1
380 at += 1
381 ui.progress(_('uploading largefiles'), None)
381 ui.progress(_('uploading largefiles'), None)
382
382
383 def verifylfiles(ui, repo, all=False, contents=False):
383 def verifylfiles(ui, repo, all=False, contents=False):
384 '''Verify that every largefile revision in the current changeset
384 '''Verify that every largefile revision in the current changeset
385 exists in the central store. With --contents, also verify that
385 exists in the central store. With --contents, also verify that
386 the contents of each local largefile file revision are correct (SHA-1 hash
386 the contents of each local largefile file revision are correct (SHA-1 hash
387 matches the revision ID). With --all, check every changeset in
387 matches the revision ID). With --all, check every changeset in
388 this repository.'''
388 this repository.'''
389 if all:
389 if all:
390 revs = repo.revs('all()')
390 revs = repo.revs('all()')
391 else:
391 else:
392 revs = ['.']
392 revs = ['.']
393
393
394 store = storefactory.openstore(repo)
394 store = storefactory.openstore(repo)
395 return store.verify(revs, contents=contents)
395 return store.verify(revs, contents=contents)
396
396
397 def cachelfiles(ui, repo, node, filelist=None):
397 def cachelfiles(ui, repo, node, filelist=None):
398 '''cachelfiles ensures that all largefiles needed by the specified revision
398 '''cachelfiles ensures that all largefiles needed by the specified revision
399 are present in the repository's largefile cache.
399 are present in the repository's largefile cache.
400
400
401 returns a tuple (cached, missing). cached is the list of files downloaded
401 returns a tuple (cached, missing). cached is the list of files downloaded
402 by this operation; missing is the list of files that were needed but could
402 by this operation; missing is the list of files that were needed but could
403 not be found.'''
403 not be found.'''
404 lfiles = lfutil.listlfiles(repo, node)
404 lfiles = lfutil.listlfiles(repo, node)
405 if filelist:
405 if filelist:
406 lfiles = set(lfiles) & set(filelist)
406 lfiles = set(lfiles) & set(filelist)
407 toget = []
407 toget = []
408
408
409 ctx = repo[node]
409 ctx = repo[node]
410 for lfile in lfiles:
410 for lfile in lfiles:
411 try:
411 try:
412 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
412 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
413 except IOError as err:
413 except IOError as err:
414 if err.errno == errno.ENOENT:
414 if err.errno == errno.ENOENT:
415 continue # node must be None and standin wasn't found in wctx
415 continue # node must be None and standin wasn't found in wctx
416 raise
416 raise
417 if not lfutil.findfile(repo, expectedhash):
417 if not lfutil.findfile(repo, expectedhash):
418 toget.append((lfile, expectedhash))
418 toget.append((lfile, expectedhash))
419
419
420 if toget:
420 if toget:
421 store = storefactory.openstore(repo)
421 store = storefactory.openstore(repo)
422 ret = store.get(toget)
422 ret = store.get(toget)
423 return ret
423 return ret
424
424
425 return ([], [])
425 return ([], [])
426
426
427 def downloadlfiles(ui, repo, rev=None):
427 def downloadlfiles(ui, repo, rev=None):
428 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
428 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
429 def prepare(ctx, fns):
429 def prepare(ctx, fns):
430 pass
430 pass
431 totalsuccess = 0
431 totalsuccess = 0
432 totalmissing = 0
432 totalmissing = 0
433 if rev != []: # walkchangerevs on empty list would return all revs
433 if rev != []: # walkchangerevs on empty list would return all revs
434 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
434 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
435 prepare):
435 prepare):
436 success, missing = cachelfiles(ui, repo, ctx.node())
436 success, missing = cachelfiles(ui, repo, ctx.node())
437 totalsuccess += len(success)
437 totalsuccess += len(success)
438 totalmissing += len(missing)
438 totalmissing += len(missing)
439 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
439 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
440 if totalmissing > 0:
440 if totalmissing > 0:
441 ui.status(_("%d largefiles failed to download\n") % totalmissing)
441 ui.status(_("%d largefiles failed to download\n") % totalmissing)
442 return totalsuccess, totalmissing
442 return totalsuccess, totalmissing
443
443
444 def updatelfiles(ui, repo, filelist=None, printmessage=None,
444 def updatelfiles(ui, repo, filelist=None, printmessage=None,
445 normallookup=False):
445 normallookup=False):
446 '''Update largefiles according to standins in the working directory
446 '''Update largefiles according to standins in the working directory
447
447
448 If ``printmessage`` is other than ``None``, it means "print (or
448 If ``printmessage`` is other than ``None``, it means "print (or
449 ignore, for false) message forcibly".
449 ignore, for false) message forcibly".
450 '''
450 '''
451 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
451 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
452 with repo.wlock():
452 with repo.wlock():
453 lfdirstate = lfutil.openlfdirstate(ui, repo)
453 lfdirstate = lfutil.openlfdirstate(ui, repo)
454 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
454 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
455
455
456 if filelist is not None:
456 if filelist is not None:
457 filelist = set(filelist)
457 filelist = set(filelist)
458 lfiles = [f for f in lfiles if f in filelist]
458 lfiles = [f for f in lfiles if f in filelist]
459
459
460 update = {}
460 update = {}
461 dropped = set()
461 dropped = set()
462 updated, removed = 0, 0
462 updated, removed = 0, 0
463 wvfs = repo.wvfs
463 wvfs = repo.wvfs
464 wctx = repo[None]
464 wctx = repo[None]
465 for lfile in lfiles:
465 for lfile in lfiles:
466 rellfile = lfile
466 rellfile = lfile
467 rellfileorig = os.path.relpath(
467 rellfileorig = os.path.relpath(
468 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
468 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
469 start=repo.root)
469 start=repo.root)
470 relstandin = lfutil.standin(lfile)
470 relstandin = lfutil.standin(lfile)
471 relstandinorig = os.path.relpath(
471 relstandinorig = os.path.relpath(
472 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
472 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
473 start=repo.root)
473 start=repo.root)
474 if wvfs.exists(relstandin):
474 if wvfs.exists(relstandin):
475 if (wvfs.exists(relstandinorig) and
475 if (wvfs.exists(relstandinorig) and
476 wvfs.exists(rellfile)):
476 wvfs.exists(rellfile)):
477 shutil.copyfile(wvfs.join(rellfile),
477 shutil.copyfile(wvfs.join(rellfile),
478 wvfs.join(rellfileorig))
478 wvfs.join(rellfileorig))
479 wvfs.unlinkpath(relstandinorig)
479 wvfs.unlinkpath(relstandinorig)
480 expecthash = lfutil.readasstandin(wctx[relstandin])
480 expecthash = lfutil.readasstandin(wctx[relstandin])
481 if expecthash != '':
481 if expecthash != '':
482 if lfile not in wctx: # not switched to normal file
482 if lfile not in wctx: # not switched to normal file
483 if repo.dirstate[relstandin] != '?':
483 if repo.dirstate[relstandin] != '?':
484 wvfs.unlinkpath(rellfile, ignoremissing=True)
484 wvfs.unlinkpath(rellfile, ignoremissing=True)
485 else:
485 else:
486 dropped.add(rellfile)
486 dropped.add(rellfile)
487
487
488 # use normallookup() to allocate an entry in largefiles
488 # use normallookup() to allocate an entry in largefiles
489 # dirstate to prevent lfilesrepo.status() from reporting
489 # dirstate to prevent lfilesrepo.status() from reporting
490 # missing files as removed.
490 # missing files as removed.
491 lfdirstate.normallookup(lfile)
491 lfdirstate.normallookup(lfile)
492 update[lfile] = expecthash
492 update[lfile] = expecthash
493 else:
493 else:
494 # Remove lfiles for which the standin is deleted, unless the
494 # Remove lfiles for which the standin is deleted, unless the
495 # lfile is added to the repository again. This happens when a
495 # lfile is added to the repository again. This happens when a
496 # largefile is converted back to a normal file: the standin
496 # largefile is converted back to a normal file: the standin
497 # disappears, but a new (normal) file appears as the lfile.
497 # disappears, but a new (normal) file appears as the lfile.
498 if (wvfs.exists(rellfile) and
498 if (wvfs.exists(rellfile) and
499 repo.dirstate.normalize(lfile) not in wctx):
499 repo.dirstate.normalize(lfile) not in wctx):
500 wvfs.unlinkpath(rellfile)
500 wvfs.unlinkpath(rellfile)
501 removed += 1
501 removed += 1
502
502
503 # largefile processing might be slow and be interrupted - be prepared
503 # largefile processing might be slow and be interrupted - be prepared
504 lfdirstate.write()
504 lfdirstate.write()
505
505
506 if lfiles:
506 if lfiles:
507 lfiles = [f for f in lfiles if f not in dropped]
507 lfiles = [f for f in lfiles if f not in dropped]
508
508
509 for f in dropped:
509 for f in dropped:
510 repo.wvfs.unlinkpath(lfutil.standin(f))
510 repo.wvfs.unlinkpath(lfutil.standin(f))
511
511
512 # This needs to happen for dropped files, otherwise they stay in
512 # This needs to happen for dropped files, otherwise they stay in
513 # the M state.
513 # the M state.
514 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
514 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
515
515
516 statuswriter(_('getting changed largefiles\n'))
516 statuswriter(_('getting changed largefiles\n'))
517 cachelfiles(ui, repo, None, lfiles)
517 cachelfiles(ui, repo, None, lfiles)
518
518
519 for lfile in lfiles:
519 for lfile in lfiles:
520 update1 = 0
520 update1 = 0
521
521
522 expecthash = update.get(lfile)
522 expecthash = update.get(lfile)
523 if expecthash:
523 if expecthash:
524 if not lfutil.copyfromcache(repo, expecthash, lfile):
524 if not lfutil.copyfromcache(repo, expecthash, lfile):
525 # failed ... but already removed and set to normallookup
525 # failed ... but already removed and set to normallookup
526 continue
526 continue
527 # Synchronize largefile dirstate to the last modified
527 # Synchronize largefile dirstate to the last modified
528 # time of the file
528 # time of the file
529 lfdirstate.normal(lfile)
529 lfdirstate.normal(lfile)
530 update1 = 1
530 update1 = 1
531
531
532 # copy the exec mode of largefile standin from the repository's
532 # copy the exec mode of largefile standin from the repository's
533 # dirstate to its state in the lfdirstate.
533 # dirstate to its state in the lfdirstate.
534 rellfile = lfile
534 rellfile = lfile
535 relstandin = lfutil.standin(lfile)
535 relstandin = lfutil.standin(lfile)
536 if wvfs.exists(relstandin):
536 if wvfs.exists(relstandin):
537 # exec is decided by the users permissions using mask 0o100
537 # exec is decided by the users permissions using mask 0o100
538 standinexec = wvfs.stat(relstandin).st_mode & 0o100
538 standinexec = wvfs.stat(relstandin).st_mode & 0o100
539 st = wvfs.stat(rellfile)
539 st = wvfs.stat(rellfile)
540 mode = st.st_mode
540 mode = st.st_mode
541 if standinexec != mode & 0o100:
541 if standinexec != mode & 0o100:
542 # first remove all X bits, then shift all R bits to X
542 # first remove all X bits, then shift all R bits to X
543 mode &= ~0o111
543 mode &= ~0o111
544 if standinexec:
544 if standinexec:
545 mode |= (mode >> 2) & 0o111 & ~util.umask
545 mode |= (mode >> 2) & 0o111 & ~util.umask
546 wvfs.chmod(rellfile, mode)
546 wvfs.chmod(rellfile, mode)
547 update1 = 1
547 update1 = 1
548
548
549 updated += update1
549 updated += update1
550
550
551 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
551 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
552
552
553 lfdirstate.write()
553 lfdirstate.write()
554 if lfiles:
554 if lfiles:
555 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
555 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
556 removed))
556 removed))
557
557
558 @command('lfpull',
558 @command('lfpull',
559 [('r', 'rev', [], _('pull largefiles for these revisions'))
559 [('r', 'rev', [], _('pull largefiles for these revisions'))
560 ] + cmdutil.remoteopts,
560 ] + cmdutil.remoteopts,
561 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
561 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
562 def lfpull(ui, repo, source="default", **opts):
562 def lfpull(ui, repo, source="default", **opts):
563 """pull largefiles for the specified revisions from the specified source
563 """pull largefiles for the specified revisions from the specified source
564
564
565 Pull largefiles that are referenced from local changesets but missing
565 Pull largefiles that are referenced from local changesets but missing
566 locally, pulling from a remote repository to the local cache.
566 locally, pulling from a remote repository to the local cache.
567
567
568 If SOURCE is omitted, the 'default' path will be used.
568 If SOURCE is omitted, the 'default' path will be used.
569 See :hg:`help urls` for more information.
569 See :hg:`help urls` for more information.
570
570
571 .. container:: verbose
571 .. container:: verbose
572
572
573 Some examples:
573 Some examples:
574
574
575 - pull largefiles for all branch heads::
575 - pull largefiles for all branch heads::
576
576
577 hg lfpull -r "head() and not closed()"
577 hg lfpull -r "head() and not closed()"
578
578
579 - pull largefiles on the default branch::
579 - pull largefiles on the default branch::
580
580
581 hg lfpull -r "branch(default)"
581 hg lfpull -r "branch(default)"
582 """
582 """
583 repo.lfpullsource = source
583 repo.lfpullsource = source
584
584
585 revs = opts.get(r'rev', [])
585 revs = opts.get(r'rev', [])
586 if not revs:
586 if not revs:
587 raise error.Abort(_('no revisions specified'))
587 raise error.Abort(_('no revisions specified'))
588 revs = scmutil.revrange(repo, revs)
588 revs = scmutil.revrange(repo, revs)
589
589
590 numcached = 0
590 numcached = 0
591 for rev in revs:
591 for rev in revs:
592 ui.note(_('pulling largefiles for revision %s\n') % rev)
592 ui.note(_('pulling largefiles for revision %s\n') % rev)
593 (cached, missing) = cachelfiles(ui, repo, rev)
593 (cached, missing) = cachelfiles(ui, repo, rev)
594 numcached += len(cached)
594 numcached += len(cached)
595 ui.status(_("%d largefiles cached\n") % numcached)
595 ui.status(_("%d largefiles cached\n") % numcached)
596
597 @command('debuglfput',
598 [] + cmdutil.remoteopts,
599 _('FILE'))
600 def debuglfput(ui, repo, filepath, **kwargs):
601 hash = lfutil.hashfile(filepath)
602 storefactory.openstore(repo).put(filepath, hash)
603 ui.write('%s\n' % hash)
604 return 0
General Comments 0
You need to be logged in to leave comments. Login now