##// END OF EJS Templates
largefiles: when setting/clearing x bit on largefiles, don't change other bits...
Mads Kiilerich -
r30141:c01acee3 default
parent child Browse files
Show More
@@ -1,569 +1,573 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 commands,
21 commands,
22 context,
22 context,
23 error,
23 error,
24 hg,
24 hg,
25 lock,
25 lock,
26 match as matchmod,
26 match as matchmod,
27 node,
27 node,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 from ..convert import (
32 from ..convert import (
33 convcmd,
33 convcmd,
34 filemap,
34 filemap,
35 )
35 )
36
36
37 from . import (
37 from . import (
38 lfutil,
38 lfutil,
39 storefactory
39 storefactory
40 )
40 )
41
41
42 release = lock.release
42 release = lock.release
43
43
44 # -- Commands ----------------------------------------------------------
44 # -- Commands ----------------------------------------------------------
45
45
46 cmdtable = {}
46 cmdtable = {}
47 command = cmdutil.command(cmdtable)
47 command = cmdutil.command(cmdtable)
48
48
49 @command('lfconvert',
49 @command('lfconvert',
50 [('s', 'size', '',
50 [('s', 'size', '',
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 ('', 'to-normal', False,
52 ('', 'to-normal', False,
53 _('convert from a largefiles repo to a normal repo')),
53 _('convert from a largefiles repo to a normal repo')),
54 ],
54 ],
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 norepo=True,
56 norepo=True,
57 inferrepo=True)
57 inferrepo=True)
58 def lfconvert(ui, src, dest, *pats, **opts):
58 def lfconvert(ui, src, dest, *pats, **opts):
59 '''convert a normal repository to a largefiles repository
59 '''convert a normal repository to a largefiles repository
60
60
61 Convert repository SOURCE to a new repository DEST, identical to
61 Convert repository SOURCE to a new repository DEST, identical to
62 SOURCE except that certain files will be converted as largefiles:
62 SOURCE except that certain files will be converted as largefiles:
63 specifically, any file that matches any PATTERN *or* whose size is
63 specifically, any file that matches any PATTERN *or* whose size is
64 above the minimum size threshold is converted as a largefile. The
64 above the minimum size threshold is converted as a largefile. The
65 size used to determine whether or not to track a file as a
65 size used to determine whether or not to track a file as a
66 largefile is the size of the first version of the file. The
66 largefile is the size of the first version of the file. The
67 minimum size can be specified either with --size or in
67 minimum size can be specified either with --size or in
68 configuration as ``largefiles.size``.
68 configuration as ``largefiles.size``.
69
69
70 After running this command you will need to make sure that
70 After running this command you will need to make sure that
71 largefiles is enabled anywhere you intend to push the new
71 largefiles is enabled anywhere you intend to push the new
72 repository.
72 repository.
73
73
74 Use --to-normal to convert largefiles back to normal files; after
74 Use --to-normal to convert largefiles back to normal files; after
75 this, the DEST repository can be used without largefiles at all.'''
75 this, the DEST repository can be used without largefiles at all.'''
76
76
77 if opts['to_normal']:
77 if opts['to_normal']:
78 tolfile = False
78 tolfile = False
79 else:
79 else:
80 tolfile = True
80 tolfile = True
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82
82
83 if not hg.islocal(src):
83 if not hg.islocal(src):
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 if not hg.islocal(dest):
85 if not hg.islocal(dest):
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87
87
88 rsrc = hg.repository(ui, src)
88 rsrc = hg.repository(ui, src)
89 ui.status(_('initializing destination %s\n') % dest)
89 ui.status(_('initializing destination %s\n') % dest)
90 rdst = hg.repository(ui, dest, create=True)
90 rdst = hg.repository(ui, dest, create=True)
91
91
92 success = False
92 success = False
93 dstwlock = dstlock = None
93 dstwlock = dstlock = None
94 try:
94 try:
95 # Get a list of all changesets in the source. The easy way to do this
95 # Get a list of all changesets in the source. The easy way to do this
96 # is to simply walk the changelog, using changelog.nodesbetween().
96 # is to simply walk the changelog, using changelog.nodesbetween().
97 # Take a look at mercurial/revlog.py:639 for more details.
97 # Take a look at mercurial/revlog.py:639 for more details.
98 # Use a generator instead of a list to decrease memory usage
98 # Use a generator instead of a list to decrease memory usage
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 rsrc.heads())[0])
100 rsrc.heads())[0])
101 revmap = {node.nullid: node.nullid}
101 revmap = {node.nullid: node.nullid}
102 if tolfile:
102 if tolfile:
103 # Lock destination to prevent modification while it is converted to.
103 # Lock destination to prevent modification while it is converted to.
104 # Don't need to lock src because we are just reading from its
104 # Don't need to lock src because we are just reading from its
105 # history which can't change.
105 # history which can't change.
106 dstwlock = rdst.wlock()
106 dstwlock = rdst.wlock()
107 dstlock = rdst.lock()
107 dstlock = rdst.lock()
108
108
109 lfiles = set()
109 lfiles = set()
110 normalfiles = set()
110 normalfiles = set()
111 if not pats:
111 if not pats:
112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
113 if pats:
113 if pats:
114 matcher = matchmod.match(rsrc.root, '', list(pats))
114 matcher = matchmod.match(rsrc.root, '', list(pats))
115 else:
115 else:
116 matcher = None
116 matcher = None
117
117
118 lfiletohash = {}
118 lfiletohash = {}
119 for ctx in ctxs:
119 for ctx in ctxs:
120 ui.progress(_('converting revisions'), ctx.rev(),
120 ui.progress(_('converting revisions'), ctx.rev(),
121 unit=_('revisions'), total=rsrc['tip'].rev())
121 unit=_('revisions'), total=rsrc['tip'].rev())
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
123 lfiles, normalfiles, matcher, size, lfiletohash)
123 lfiles, normalfiles, matcher, size, lfiletohash)
124 ui.progress(_('converting revisions'), None)
124 ui.progress(_('converting revisions'), None)
125
125
126 if rdst.wvfs.exists(lfutil.shortname):
126 if rdst.wvfs.exists(lfutil.shortname):
127 rdst.wvfs.rmtree(lfutil.shortname)
127 rdst.wvfs.rmtree(lfutil.shortname)
128
128
129 for f in lfiletohash.keys():
129 for f in lfiletohash.keys():
130 if rdst.wvfs.isfile(f):
130 if rdst.wvfs.isfile(f):
131 rdst.wvfs.unlink(f)
131 rdst.wvfs.unlink(f)
132 try:
132 try:
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
134 except OSError:
134 except OSError:
135 pass
135 pass
136
136
137 # If there were any files converted to largefiles, add largefiles
137 # If there were any files converted to largefiles, add largefiles
138 # to the destination repository's requirements.
138 # to the destination repository's requirements.
139 if lfiles:
139 if lfiles:
140 rdst.requirements.add('largefiles')
140 rdst.requirements.add('largefiles')
141 rdst._writerequirements()
141 rdst._writerequirements()
142 else:
142 else:
143 class lfsource(filemap.filemap_source):
143 class lfsource(filemap.filemap_source):
144 def __init__(self, ui, source):
144 def __init__(self, ui, source):
145 super(lfsource, self).__init__(ui, source, None)
145 super(lfsource, self).__init__(ui, source, None)
146 self.filemapper.rename[lfutil.shortname] = '.'
146 self.filemapper.rename[lfutil.shortname] = '.'
147
147
148 def getfile(self, name, rev):
148 def getfile(self, name, rev):
149 realname, realrev = rev
149 realname, realrev = rev
150 f = super(lfsource, self).getfile(name, rev)
150 f = super(lfsource, self).getfile(name, rev)
151
151
152 if (not realname.startswith(lfutil.shortnameslash)
152 if (not realname.startswith(lfutil.shortnameslash)
153 or f[0] is None):
153 or f[0] is None):
154 return f
154 return f
155
155
156 # Substitute in the largefile data for the hash
156 # Substitute in the largefile data for the hash
157 hash = f[0].strip()
157 hash = f[0].strip()
158 path = lfutil.findfile(rsrc, hash)
158 path = lfutil.findfile(rsrc, hash)
159
159
160 if path is None:
160 if path is None:
161 raise error.Abort(_("missing largefile for '%s' in %s")
161 raise error.Abort(_("missing largefile for '%s' in %s")
162 % (realname, realrev))
162 % (realname, realrev))
163 return util.readfile(path), f[1]
163 return util.readfile(path), f[1]
164
164
165 class converter(convcmd.converter):
165 class converter(convcmd.converter):
166 def __init__(self, ui, source, dest, revmapfile, opts):
166 def __init__(self, ui, source, dest, revmapfile, opts):
167 src = lfsource(ui, source)
167 src = lfsource(ui, source)
168
168
169 super(converter, self).__init__(ui, src, dest, revmapfile,
169 super(converter, self).__init__(ui, src, dest, revmapfile,
170 opts)
170 opts)
171
171
172 found, missing = downloadlfiles(ui, rsrc)
172 found, missing = downloadlfiles(ui, rsrc)
173 if missing != 0:
173 if missing != 0:
174 raise error.Abort(_("all largefiles must be present locally"))
174 raise error.Abort(_("all largefiles must be present locally"))
175
175
176 orig = convcmd.converter
176 orig = convcmd.converter
177 convcmd.converter = converter
177 convcmd.converter = converter
178
178
179 try:
179 try:
180 convcmd.convert(ui, src, dest)
180 convcmd.convert(ui, src, dest)
181 finally:
181 finally:
182 convcmd.converter = orig
182 convcmd.converter = orig
183 success = True
183 success = True
184 finally:
184 finally:
185 if tolfile:
185 if tolfile:
186 rdst.dirstate.clear()
186 rdst.dirstate.clear()
187 release(dstlock, dstwlock)
187 release(dstlock, dstwlock)
188 if not success:
188 if not success:
189 # we failed, remove the new directory
189 # we failed, remove the new directory
190 shutil.rmtree(rdst.root)
190 shutil.rmtree(rdst.root)
191
191
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
193 matcher, size, lfiletohash):
193 matcher, size, lfiletohash):
194 # Convert src parents to dst parents
194 # Convert src parents to dst parents
195 parents = _convertparents(ctx, revmap)
195 parents = _convertparents(ctx, revmap)
196
196
197 # Generate list of changed files
197 # Generate list of changed files
198 files = _getchangedfiles(ctx, parents)
198 files = _getchangedfiles(ctx, parents)
199
199
200 dstfiles = []
200 dstfiles = []
201 for f in files:
201 for f in files:
202 if f not in lfiles and f not in normalfiles:
202 if f not in lfiles and f not in normalfiles:
203 islfile = _islfile(f, ctx, matcher, size)
203 islfile = _islfile(f, ctx, matcher, size)
204 # If this file was renamed or copied then copy
204 # If this file was renamed or copied then copy
205 # the largefile-ness of its predecessor
205 # the largefile-ness of its predecessor
206 if f in ctx.manifest():
206 if f in ctx.manifest():
207 fctx = ctx.filectx(f)
207 fctx = ctx.filectx(f)
208 renamed = fctx.renamed()
208 renamed = fctx.renamed()
209 renamedlfile = renamed and renamed[0] in lfiles
209 renamedlfile = renamed and renamed[0] in lfiles
210 islfile |= renamedlfile
210 islfile |= renamedlfile
211 if 'l' in fctx.flags():
211 if 'l' in fctx.flags():
212 if renamedlfile:
212 if renamedlfile:
213 raise error.Abort(
213 raise error.Abort(
214 _('renamed/copied largefile %s becomes symlink')
214 _('renamed/copied largefile %s becomes symlink')
215 % f)
215 % f)
216 islfile = False
216 islfile = False
217 if islfile:
217 if islfile:
218 lfiles.add(f)
218 lfiles.add(f)
219 else:
219 else:
220 normalfiles.add(f)
220 normalfiles.add(f)
221
221
222 if f in lfiles:
222 if f in lfiles:
223 dstfiles.append(lfutil.standin(f))
223 dstfiles.append(lfutil.standin(f))
224 # largefile in manifest if it has not been removed/renamed
224 # largefile in manifest if it has not been removed/renamed
225 if f in ctx.manifest():
225 if f in ctx.manifest():
226 fctx = ctx.filectx(f)
226 fctx = ctx.filectx(f)
227 if 'l' in fctx.flags():
227 if 'l' in fctx.flags():
228 renamed = fctx.renamed()
228 renamed = fctx.renamed()
229 if renamed and renamed[0] in lfiles:
229 if renamed and renamed[0] in lfiles:
230 raise error.Abort(_('largefile %s becomes symlink') % f)
230 raise error.Abort(_('largefile %s becomes symlink') % f)
231
231
232 # largefile was modified, update standins
232 # largefile was modified, update standins
233 m = hashlib.sha1('')
233 m = hashlib.sha1('')
234 m.update(ctx[f].data())
234 m.update(ctx[f].data())
235 hash = m.hexdigest()
235 hash = m.hexdigest()
236 if f not in lfiletohash or lfiletohash[f] != hash:
236 if f not in lfiletohash or lfiletohash[f] != hash:
237 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
237 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
238 executable = 'x' in ctx[f].flags()
238 executable = 'x' in ctx[f].flags()
239 lfutil.writestandin(rdst, lfutil.standin(f), hash,
239 lfutil.writestandin(rdst, lfutil.standin(f), hash,
240 executable)
240 executable)
241 lfiletohash[f] = hash
241 lfiletohash[f] = hash
242 else:
242 else:
243 # normal file
243 # normal file
244 dstfiles.append(f)
244 dstfiles.append(f)
245
245
246 def getfilectx(repo, memctx, f):
246 def getfilectx(repo, memctx, f):
247 if lfutil.isstandin(f):
247 if lfutil.isstandin(f):
248 # if the file isn't in the manifest then it was removed
248 # if the file isn't in the manifest then it was removed
249 # or renamed, raise IOError to indicate this
249 # or renamed, raise IOError to indicate this
250 srcfname = lfutil.splitstandin(f)
250 srcfname = lfutil.splitstandin(f)
251 try:
251 try:
252 fctx = ctx.filectx(srcfname)
252 fctx = ctx.filectx(srcfname)
253 except error.LookupError:
253 except error.LookupError:
254 return None
254 return None
255 renamed = fctx.renamed()
255 renamed = fctx.renamed()
256 if renamed:
256 if renamed:
257 # standin is always a largefile because largefile-ness
257 # standin is always a largefile because largefile-ness
258 # doesn't change after rename or copy
258 # doesn't change after rename or copy
259 renamed = lfutil.standin(renamed[0])
259 renamed = lfutil.standin(renamed[0])
260
260
261 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
261 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
262 'l' in fctx.flags(), 'x' in fctx.flags(),
262 'l' in fctx.flags(), 'x' in fctx.flags(),
263 renamed)
263 renamed)
264 else:
264 else:
265 return _getnormalcontext(repo, ctx, f, revmap)
265 return _getnormalcontext(repo, ctx, f, revmap)
266
266
267 # Commit
267 # Commit
268 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
268 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
269
269
270 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
270 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
271 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
271 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
272 getfilectx, ctx.user(), ctx.date(), ctx.extra())
272 getfilectx, ctx.user(), ctx.date(), ctx.extra())
273 ret = rdst.commitctx(mctx)
273 ret = rdst.commitctx(mctx)
274 lfutil.copyalltostore(rdst, ret)
274 lfutil.copyalltostore(rdst, ret)
275 rdst.setparents(ret)
275 rdst.setparents(ret)
276 revmap[ctx.node()] = rdst.changelog.tip()
276 revmap[ctx.node()] = rdst.changelog.tip()
277
277
278 # Generate list of changed files
278 # Generate list of changed files
279 def _getchangedfiles(ctx, parents):
279 def _getchangedfiles(ctx, parents):
280 files = set(ctx.files())
280 files = set(ctx.files())
281 if node.nullid not in parents:
281 if node.nullid not in parents:
282 mc = ctx.manifest()
282 mc = ctx.manifest()
283 mp1 = ctx.parents()[0].manifest()
283 mp1 = ctx.parents()[0].manifest()
284 mp2 = ctx.parents()[1].manifest()
284 mp2 = ctx.parents()[1].manifest()
285 files |= (set(mp1) | set(mp2)) - set(mc)
285 files |= (set(mp1) | set(mp2)) - set(mc)
286 for f in mc:
286 for f in mc:
287 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
287 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
288 files.add(f)
288 files.add(f)
289 return files
289 return files
290
290
291 # Convert src parents to dst parents
291 # Convert src parents to dst parents
292 def _convertparents(ctx, revmap):
292 def _convertparents(ctx, revmap):
293 parents = []
293 parents = []
294 for p in ctx.parents():
294 for p in ctx.parents():
295 parents.append(revmap[p.node()])
295 parents.append(revmap[p.node()])
296 while len(parents) < 2:
296 while len(parents) < 2:
297 parents.append(node.nullid)
297 parents.append(node.nullid)
298 return parents
298 return parents
299
299
300 # Get memfilectx for a normal file
300 # Get memfilectx for a normal file
301 def _getnormalcontext(repo, ctx, f, revmap):
301 def _getnormalcontext(repo, ctx, f, revmap):
302 try:
302 try:
303 fctx = ctx.filectx(f)
303 fctx = ctx.filectx(f)
304 except error.LookupError:
304 except error.LookupError:
305 return None
305 return None
306 renamed = fctx.renamed()
306 renamed = fctx.renamed()
307 if renamed:
307 if renamed:
308 renamed = renamed[0]
308 renamed = renamed[0]
309
309
310 data = fctx.data()
310 data = fctx.data()
311 if f == '.hgtags':
311 if f == '.hgtags':
312 data = _converttags (repo.ui, revmap, data)
312 data = _converttags (repo.ui, revmap, data)
313 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
313 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
314 'x' in fctx.flags(), renamed)
314 'x' in fctx.flags(), renamed)
315
315
316 # Remap tag data using a revision map
316 # Remap tag data using a revision map
317 def _converttags(ui, revmap, data):
317 def _converttags(ui, revmap, data):
318 newdata = []
318 newdata = []
319 for line in data.splitlines():
319 for line in data.splitlines():
320 try:
320 try:
321 id, name = line.split(' ', 1)
321 id, name = line.split(' ', 1)
322 except ValueError:
322 except ValueError:
323 ui.warn(_('skipping incorrectly formatted tag %s\n')
323 ui.warn(_('skipping incorrectly formatted tag %s\n')
324 % line)
324 % line)
325 continue
325 continue
326 try:
326 try:
327 newid = node.bin(id)
327 newid = node.bin(id)
328 except TypeError:
328 except TypeError:
329 ui.warn(_('skipping incorrectly formatted id %s\n')
329 ui.warn(_('skipping incorrectly formatted id %s\n')
330 % id)
330 % id)
331 continue
331 continue
332 try:
332 try:
333 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
333 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
334 name))
334 name))
335 except KeyError:
335 except KeyError:
336 ui.warn(_('no mapping for id %s\n') % id)
336 ui.warn(_('no mapping for id %s\n') % id)
337 continue
337 continue
338 return ''.join(newdata)
338 return ''.join(newdata)
339
339
340 def _islfile(file, ctx, matcher, size):
340 def _islfile(file, ctx, matcher, size):
341 '''Return true if file should be considered a largefile, i.e.
341 '''Return true if file should be considered a largefile, i.e.
342 matcher matches it or it is larger than size.'''
342 matcher matches it or it is larger than size.'''
343 # never store special .hg* files as largefiles
343 # never store special .hg* files as largefiles
344 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
344 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
345 return False
345 return False
346 if matcher and matcher(file):
346 if matcher and matcher(file):
347 return True
347 return True
348 try:
348 try:
349 return ctx.filectx(file).size() >= size * 1024 * 1024
349 return ctx.filectx(file).size() >= size * 1024 * 1024
350 except error.LookupError:
350 except error.LookupError:
351 return False
351 return False
352
352
353 def uploadlfiles(ui, rsrc, rdst, files):
353 def uploadlfiles(ui, rsrc, rdst, files):
354 '''upload largefiles to the central store'''
354 '''upload largefiles to the central store'''
355
355
356 if not files:
356 if not files:
357 return
357 return
358
358
359 store = storefactory.openstore(rsrc, rdst, put=True)
359 store = storefactory.openstore(rsrc, rdst, put=True)
360
360
361 at = 0
361 at = 0
362 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
362 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
363 retval = store.exists(files)
363 retval = store.exists(files)
364 files = filter(lambda h: not retval[h], files)
364 files = filter(lambda h: not retval[h], files)
365 ui.debug("%d largefiles need to be uploaded\n" % len(files))
365 ui.debug("%d largefiles need to be uploaded\n" % len(files))
366
366
367 for hash in files:
367 for hash in files:
368 ui.progress(_('uploading largefiles'), at, unit=_('files'),
368 ui.progress(_('uploading largefiles'), at, unit=_('files'),
369 total=len(files))
369 total=len(files))
370 source = lfutil.findfile(rsrc, hash)
370 source = lfutil.findfile(rsrc, hash)
371 if not source:
371 if not source:
372 raise error.Abort(_('largefile %s missing from store'
372 raise error.Abort(_('largefile %s missing from store'
373 ' (needs to be uploaded)') % hash)
373 ' (needs to be uploaded)') % hash)
374 # XXX check for errors here
374 # XXX check for errors here
375 store.put(source, hash)
375 store.put(source, hash)
376 at += 1
376 at += 1
377 ui.progress(_('uploading largefiles'), None)
377 ui.progress(_('uploading largefiles'), None)
378
378
379 def verifylfiles(ui, repo, all=False, contents=False):
379 def verifylfiles(ui, repo, all=False, contents=False):
380 '''Verify that every largefile revision in the current changeset
380 '''Verify that every largefile revision in the current changeset
381 exists in the central store. With --contents, also verify that
381 exists in the central store. With --contents, also verify that
382 the contents of each local largefile file revision are correct (SHA-1 hash
382 the contents of each local largefile file revision are correct (SHA-1 hash
383 matches the revision ID). With --all, check every changeset in
383 matches the revision ID). With --all, check every changeset in
384 this repository.'''
384 this repository.'''
385 if all:
385 if all:
386 revs = repo.revs('all()')
386 revs = repo.revs('all()')
387 else:
387 else:
388 revs = ['.']
388 revs = ['.']
389
389
390 store = storefactory.openstore(repo)
390 store = storefactory.openstore(repo)
391 return store.verify(revs, contents=contents)
391 return store.verify(revs, contents=contents)
392
392
393 def cachelfiles(ui, repo, node, filelist=None):
393 def cachelfiles(ui, repo, node, filelist=None):
394 '''cachelfiles ensures that all largefiles needed by the specified revision
394 '''cachelfiles ensures that all largefiles needed by the specified revision
395 are present in the repository's largefile cache.
395 are present in the repository's largefile cache.
396
396
397 returns a tuple (cached, missing). cached is the list of files downloaded
397 returns a tuple (cached, missing). cached is the list of files downloaded
398 by this operation; missing is the list of files that were needed but could
398 by this operation; missing is the list of files that were needed but could
399 not be found.'''
399 not be found.'''
400 lfiles = lfutil.listlfiles(repo, node)
400 lfiles = lfutil.listlfiles(repo, node)
401 if filelist:
401 if filelist:
402 lfiles = set(lfiles) & set(filelist)
402 lfiles = set(lfiles) & set(filelist)
403 toget = []
403 toget = []
404
404
405 for lfile in lfiles:
405 for lfile in lfiles:
406 try:
406 try:
407 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
407 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
408 except IOError as err:
408 except IOError as err:
409 if err.errno == errno.ENOENT:
409 if err.errno == errno.ENOENT:
410 continue # node must be None and standin wasn't found in wctx
410 continue # node must be None and standin wasn't found in wctx
411 raise
411 raise
412 if not lfutil.findfile(repo, expectedhash):
412 if not lfutil.findfile(repo, expectedhash):
413 toget.append((lfile, expectedhash))
413 toget.append((lfile, expectedhash))
414
414
415 if toget:
415 if toget:
416 store = storefactory.openstore(repo)
416 store = storefactory.openstore(repo)
417 ret = store.get(toget)
417 ret = store.get(toget)
418 return ret
418 return ret
419
419
420 return ([], [])
420 return ([], [])
421
421
422 def downloadlfiles(ui, repo, rev=None):
422 def downloadlfiles(ui, repo, rev=None):
423 matchfn = scmutil.match(repo[None],
423 matchfn = scmutil.match(repo[None],
424 [repo.wjoin(lfutil.shortname)], {})
424 [repo.wjoin(lfutil.shortname)], {})
425 def prepare(ctx, fns):
425 def prepare(ctx, fns):
426 pass
426 pass
427 totalsuccess = 0
427 totalsuccess = 0
428 totalmissing = 0
428 totalmissing = 0
429 if rev != []: # walkchangerevs on empty list would return all revs
429 if rev != []: # walkchangerevs on empty list would return all revs
430 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
430 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
431 prepare):
431 prepare):
432 success, missing = cachelfiles(ui, repo, ctx.node())
432 success, missing = cachelfiles(ui, repo, ctx.node())
433 totalsuccess += len(success)
433 totalsuccess += len(success)
434 totalmissing += len(missing)
434 totalmissing += len(missing)
435 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
435 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
436 if totalmissing > 0:
436 if totalmissing > 0:
437 ui.status(_("%d largefiles failed to download\n") % totalmissing)
437 ui.status(_("%d largefiles failed to download\n") % totalmissing)
438 return totalsuccess, totalmissing
438 return totalsuccess, totalmissing
439
439
440 def updatelfiles(ui, repo, filelist=None, printmessage=None,
440 def updatelfiles(ui, repo, filelist=None, printmessage=None,
441 normallookup=False):
441 normallookup=False):
442 '''Update largefiles according to standins in the working directory
442 '''Update largefiles according to standins in the working directory
443
443
444 If ``printmessage`` is other than ``None``, it means "print (or
444 If ``printmessage`` is other than ``None``, it means "print (or
445 ignore, for false) message forcibly".
445 ignore, for false) message forcibly".
446 '''
446 '''
447 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
447 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
448 with repo.wlock():
448 with repo.wlock():
449 lfdirstate = lfutil.openlfdirstate(ui, repo)
449 lfdirstate = lfutil.openlfdirstate(ui, repo)
450 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
450 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
451
451
452 if filelist is not None:
452 if filelist is not None:
453 filelist = set(filelist)
453 filelist = set(filelist)
454 lfiles = [f for f in lfiles if f in filelist]
454 lfiles = [f for f in lfiles if f in filelist]
455
455
456 update = {}
456 update = {}
457 updated, removed = 0, 0
457 updated, removed = 0, 0
458 wvfs = repo.wvfs
458 wvfs = repo.wvfs
459 for lfile in lfiles:
459 for lfile in lfiles:
460 rellfile = lfile
460 rellfile = lfile
461 rellfileorig = os.path.relpath(
461 rellfileorig = os.path.relpath(
462 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
462 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
463 start=repo.root)
463 start=repo.root)
464 relstandin = lfutil.standin(lfile)
464 relstandin = lfutil.standin(lfile)
465 relstandinorig = os.path.relpath(
465 relstandinorig = os.path.relpath(
466 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
466 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
467 start=repo.root)
467 start=repo.root)
468 if wvfs.exists(relstandin):
468 if wvfs.exists(relstandin):
469 if (wvfs.exists(relstandinorig) and
469 if (wvfs.exists(relstandinorig) and
470 wvfs.exists(rellfile)):
470 wvfs.exists(rellfile)):
471 shutil.copyfile(wvfs.join(rellfile),
471 shutil.copyfile(wvfs.join(rellfile),
472 wvfs.join(rellfileorig))
472 wvfs.join(rellfileorig))
473 wvfs.unlinkpath(relstandinorig)
473 wvfs.unlinkpath(relstandinorig)
474 expecthash = lfutil.readstandin(repo, lfile)
474 expecthash = lfutil.readstandin(repo, lfile)
475 if expecthash != '':
475 if expecthash != '':
476 if lfile not in repo[None]: # not switched to normal file
476 if lfile not in repo[None]: # not switched to normal file
477 wvfs.unlinkpath(rellfile, ignoremissing=True)
477 wvfs.unlinkpath(rellfile, ignoremissing=True)
478 # use normallookup() to allocate an entry in largefiles
478 # use normallookup() to allocate an entry in largefiles
479 # dirstate to prevent lfilesrepo.status() from reporting
479 # dirstate to prevent lfilesrepo.status() from reporting
480 # missing files as removed.
480 # missing files as removed.
481 lfdirstate.normallookup(lfile)
481 lfdirstate.normallookup(lfile)
482 update[lfile] = expecthash
482 update[lfile] = expecthash
483 else:
483 else:
484 # Remove lfiles for which the standin is deleted, unless the
484 # Remove lfiles for which the standin is deleted, unless the
485 # lfile is added to the repository again. This happens when a
485 # lfile is added to the repository again. This happens when a
486 # largefile is converted back to a normal file: the standin
486 # largefile is converted back to a normal file: the standin
487 # disappears, but a new (normal) file appears as the lfile.
487 # disappears, but a new (normal) file appears as the lfile.
488 if (wvfs.exists(rellfile) and
488 if (wvfs.exists(rellfile) and
489 repo.dirstate.normalize(lfile) not in repo[None]):
489 repo.dirstate.normalize(lfile) not in repo[None]):
490 wvfs.unlinkpath(rellfile)
490 wvfs.unlinkpath(rellfile)
491 removed += 1
491 removed += 1
492
492
493 # largefile processing might be slow and be interrupted - be prepared
493 # largefile processing might be slow and be interrupted - be prepared
494 lfdirstate.write()
494 lfdirstate.write()
495
495
496 if lfiles:
496 if lfiles:
497 statuswriter(_('getting changed largefiles\n'))
497 statuswriter(_('getting changed largefiles\n'))
498 cachelfiles(ui, repo, None, lfiles)
498 cachelfiles(ui, repo, None, lfiles)
499
499
500 for lfile in lfiles:
500 for lfile in lfiles:
501 update1 = 0
501 update1 = 0
502
502
503 expecthash = update.get(lfile)
503 expecthash = update.get(lfile)
504 if expecthash:
504 if expecthash:
505 if not lfutil.copyfromcache(repo, expecthash, lfile):
505 if not lfutil.copyfromcache(repo, expecthash, lfile):
506 # failed ... but already removed and set to normallookup
506 # failed ... but already removed and set to normallookup
507 continue
507 continue
508 # Synchronize largefile dirstate to the last modified
508 # Synchronize largefile dirstate to the last modified
509 # time of the file
509 # time of the file
510 lfdirstate.normal(lfile)
510 lfdirstate.normal(lfile)
511 update1 = 1
511 update1 = 1
512
512
513 # copy the state of largefile standin from the repository's
513 # copy the state of largefile standin from the repository's
514 # dirstate to its state in the lfdirstate.
514 # dirstate to its state in the lfdirstate.
515 rellfile = lfile
515 rellfile = lfile
516 relstandin = lfutil.standin(lfile)
516 relstandin = lfutil.standin(lfile)
517 if wvfs.exists(relstandin):
517 if wvfs.exists(relstandin):
518 mode = wvfs.stat(relstandin).st_mode
518 standinexec = wvfs.stat(relstandin).st_mode & 0o100
519 if mode != wvfs.stat(rellfile).st_mode:
519 st = wvfs.stat(rellfile).st_mode
520 wvfs.chmod(rellfile, mode)
520 if standinexec != st & 0o100:
521 st &= ~0o111
522 if standinexec:
523 st |= (st >> 2) & 0o111 & ~util.umask
524 wvfs.chmod(rellfile, st)
521 update1 = 1
525 update1 = 1
522
526
523 updated += update1
527 updated += update1
524
528
525 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
529 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
526
530
527 lfdirstate.write()
531 lfdirstate.write()
528 if lfiles:
532 if lfiles:
529 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
533 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
530 removed))
534 removed))
531
535
532 @command('lfpull',
536 @command('lfpull',
533 [('r', 'rev', [], _('pull largefiles for these revisions'))
537 [('r', 'rev', [], _('pull largefiles for these revisions'))
534 ] + commands.remoteopts,
538 ] + commands.remoteopts,
535 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
539 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
536 def lfpull(ui, repo, source="default", **opts):
540 def lfpull(ui, repo, source="default", **opts):
537 """pull largefiles for the specified revisions from the specified source
541 """pull largefiles for the specified revisions from the specified source
538
542
539 Pull largefiles that are referenced from local changesets but missing
543 Pull largefiles that are referenced from local changesets but missing
540 locally, pulling from a remote repository to the local cache.
544 locally, pulling from a remote repository to the local cache.
541
545
542 If SOURCE is omitted, the 'default' path will be used.
546 If SOURCE is omitted, the 'default' path will be used.
543 See :hg:`help urls` for more information.
547 See :hg:`help urls` for more information.
544
548
545 .. container:: verbose
549 .. container:: verbose
546
550
547 Some examples:
551 Some examples:
548
552
549 - pull largefiles for all branch heads::
553 - pull largefiles for all branch heads::
550
554
551 hg lfpull -r "head() and not closed()"
555 hg lfpull -r "head() and not closed()"
552
556
553 - pull largefiles on the default branch::
557 - pull largefiles on the default branch::
554
558
555 hg lfpull -r "branch(default)"
559 hg lfpull -r "branch(default)"
556 """
560 """
557 repo.lfpullsource = source
561 repo.lfpullsource = source
558
562
559 revs = opts.get('rev', [])
563 revs = opts.get('rev', [])
560 if not revs:
564 if not revs:
561 raise error.Abort(_('no revisions specified'))
565 raise error.Abort(_('no revisions specified'))
562 revs = scmutil.revrange(repo, revs)
566 revs = scmutil.revrange(repo, revs)
563
567
564 numcached = 0
568 numcached = 0
565 for rev in revs:
569 for rev in revs:
566 ui.note(_('pulling largefiles for revision %s\n') % rev)
570 ui.note(_('pulling largefiles for revision %s\n') % rev)
567 (cached, missing) = cachelfiles(ui, repo, rev)
571 (cached, missing) = cachelfiles(ui, repo, rev)
568 numcached += len(cached)
572 numcached += len(cached)
569 ui.status(_("%d largefiles cached\n") % numcached)
573 ui.status(_("%d largefiles cached\n") % numcached)
General Comments 0
You need to be logged in to leave comments. Login now