##// END OF EJS Templates
largefiles: migrate to scmutil.backuppath()...
Martin von Zweigbergk -
r41738:e89e78a7 default
parent child Browse files
Show More
@@ -1,603 +1,603 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 context,
21 context,
22 error,
22 error,
23 exthelper,
23 exthelper,
24 hg,
24 hg,
25 lock,
25 lock,
26 match as matchmod,
26 match as matchmod,
27 node,
27 node,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from ..convert import (
33 from ..convert import (
34 convcmd,
34 convcmd,
35 filemap,
35 filemap,
36 )
36 )
37
37
38 from . import (
38 from . import (
39 lfutil,
39 lfutil,
40 storefactory
40 storefactory
41 )
41 )
42
42
43 release = lock.release
43 release = lock.release
44
44
45 # -- Commands ----------------------------------------------------------
45 # -- Commands ----------------------------------------------------------
46
46
47 eh = exthelper.exthelper()
47 eh = exthelper.exthelper()
48
48
49 @eh.command('lfconvert',
49 @eh.command('lfconvert',
50 [('s', 'size', '',
50 [('s', 'size', '',
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 ('', 'to-normal', False,
52 ('', 'to-normal', False,
53 _('convert from a largefiles repo to a normal repo')),
53 _('convert from a largefiles repo to a normal repo')),
54 ],
54 ],
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 norepo=True,
56 norepo=True,
57 inferrepo=True)
57 inferrepo=True)
58 def lfconvert(ui, src, dest, *pats, **opts):
58 def lfconvert(ui, src, dest, *pats, **opts):
59 '''convert a normal repository to a largefiles repository
59 '''convert a normal repository to a largefiles repository
60
60
61 Convert repository SOURCE to a new repository DEST, identical to
61 Convert repository SOURCE to a new repository DEST, identical to
62 SOURCE except that certain files will be converted as largefiles:
62 SOURCE except that certain files will be converted as largefiles:
63 specifically, any file that matches any PATTERN *or* whose size is
63 specifically, any file that matches any PATTERN *or* whose size is
64 above the minimum size threshold is converted as a largefile. The
64 above the minimum size threshold is converted as a largefile. The
65 size used to determine whether or not to track a file as a
65 size used to determine whether or not to track a file as a
66 largefile is the size of the first version of the file. The
66 largefile is the size of the first version of the file. The
67 minimum size can be specified either with --size or in
67 minimum size can be specified either with --size or in
68 configuration as ``largefiles.size``.
68 configuration as ``largefiles.size``.
69
69
70 After running this command you will need to make sure that
70 After running this command you will need to make sure that
71 largefiles is enabled anywhere you intend to push the new
71 largefiles is enabled anywhere you intend to push the new
72 repository.
72 repository.
73
73
74 Use --to-normal to convert largefiles back to normal files; after
74 Use --to-normal to convert largefiles back to normal files; after
75 this, the DEST repository can be used without largefiles at all.'''
75 this, the DEST repository can be used without largefiles at all.'''
76
76
77 opts = pycompat.byteskwargs(opts)
77 opts = pycompat.byteskwargs(opts)
78 if opts['to_normal']:
78 if opts['to_normal']:
79 tolfile = False
79 tolfile = False
80 else:
80 else:
81 tolfile = True
81 tolfile = True
82 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
83
83
84 if not hg.islocal(src):
84 if not hg.islocal(src):
85 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 raise error.Abort(_('%s is not a local Mercurial repo') % src)
86 if not hg.islocal(dest):
86 if not hg.islocal(dest):
87 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
88
88
89 rsrc = hg.repository(ui, src)
89 rsrc = hg.repository(ui, src)
90 ui.status(_('initializing destination %s\n') % dest)
90 ui.status(_('initializing destination %s\n') % dest)
91 rdst = hg.repository(ui, dest, create=True)
91 rdst = hg.repository(ui, dest, create=True)
92
92
93 success = False
93 success = False
94 dstwlock = dstlock = None
94 dstwlock = dstlock = None
95 try:
95 try:
96 # Get a list of all changesets in the source. The easy way to do this
96 # Get a list of all changesets in the source. The easy way to do this
97 # is to simply walk the changelog, using changelog.nodesbetween().
97 # is to simply walk the changelog, using changelog.nodesbetween().
98 # Take a look at mercurial/revlog.py:639 for more details.
98 # Take a look at mercurial/revlog.py:639 for more details.
99 # Use a generator instead of a list to decrease memory usage
99 # Use a generator instead of a list to decrease memory usage
100 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
101 rsrc.heads())[0])
101 rsrc.heads())[0])
102 revmap = {node.nullid: node.nullid}
102 revmap = {node.nullid: node.nullid}
103 if tolfile:
103 if tolfile:
104 # Lock destination to prevent modification while it is converted to.
104 # Lock destination to prevent modification while it is converted to.
105 # Don't need to lock src because we are just reading from its
105 # Don't need to lock src because we are just reading from its
106 # history which can't change.
106 # history which can't change.
107 dstwlock = rdst.wlock()
107 dstwlock = rdst.wlock()
108 dstlock = rdst.lock()
108 dstlock = rdst.lock()
109
109
110 lfiles = set()
110 lfiles = set()
111 normalfiles = set()
111 normalfiles = set()
112 if not pats:
112 if not pats:
113 pats = ui.configlist(lfutil.longname, 'patterns')
113 pats = ui.configlist(lfutil.longname, 'patterns')
114 if pats:
114 if pats:
115 matcher = matchmod.match(rsrc.root, '', list(pats))
115 matcher = matchmod.match(rsrc.root, '', list(pats))
116 else:
116 else:
117 matcher = None
117 matcher = None
118
118
119 lfiletohash = {}
119 lfiletohash = {}
120 with ui.makeprogress(_('converting revisions'),
120 with ui.makeprogress(_('converting revisions'),
121 unit=_('revisions'),
121 unit=_('revisions'),
122 total=rsrc['tip'].rev()) as progress:
122 total=rsrc['tip'].rev()) as progress:
123 for ctx in ctxs:
123 for ctx in ctxs:
124 progress.update(ctx.rev())
124 progress.update(ctx.rev())
125 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
125 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
126 lfiles, normalfiles, matcher, size, lfiletohash)
126 lfiles, normalfiles, matcher, size, lfiletohash)
127
127
128 if rdst.wvfs.exists(lfutil.shortname):
128 if rdst.wvfs.exists(lfutil.shortname):
129 rdst.wvfs.rmtree(lfutil.shortname)
129 rdst.wvfs.rmtree(lfutil.shortname)
130
130
131 for f in lfiletohash.keys():
131 for f in lfiletohash.keys():
132 if rdst.wvfs.isfile(f):
132 if rdst.wvfs.isfile(f):
133 rdst.wvfs.unlink(f)
133 rdst.wvfs.unlink(f)
134 try:
134 try:
135 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
135 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
136 except OSError:
136 except OSError:
137 pass
137 pass
138
138
139 # If there were any files converted to largefiles, add largefiles
139 # If there were any files converted to largefiles, add largefiles
140 # to the destination repository's requirements.
140 # to the destination repository's requirements.
141 if lfiles:
141 if lfiles:
142 rdst.requirements.add('largefiles')
142 rdst.requirements.add('largefiles')
143 rdst._writerequirements()
143 rdst._writerequirements()
144 else:
144 else:
145 class lfsource(filemap.filemap_source):
145 class lfsource(filemap.filemap_source):
146 def __init__(self, ui, source):
146 def __init__(self, ui, source):
147 super(lfsource, self).__init__(ui, source, None)
147 super(lfsource, self).__init__(ui, source, None)
148 self.filemapper.rename[lfutil.shortname] = '.'
148 self.filemapper.rename[lfutil.shortname] = '.'
149
149
150 def getfile(self, name, rev):
150 def getfile(self, name, rev):
151 realname, realrev = rev
151 realname, realrev = rev
152 f = super(lfsource, self).getfile(name, rev)
152 f = super(lfsource, self).getfile(name, rev)
153
153
154 if (not realname.startswith(lfutil.shortnameslash)
154 if (not realname.startswith(lfutil.shortnameslash)
155 or f[0] is None):
155 or f[0] is None):
156 return f
156 return f
157
157
158 # Substitute in the largefile data for the hash
158 # Substitute in the largefile data for the hash
159 hash = f[0].strip()
159 hash = f[0].strip()
160 path = lfutil.findfile(rsrc, hash)
160 path = lfutil.findfile(rsrc, hash)
161
161
162 if path is None:
162 if path is None:
163 raise error.Abort(_("missing largefile for '%s' in %s")
163 raise error.Abort(_("missing largefile for '%s' in %s")
164 % (realname, realrev))
164 % (realname, realrev))
165 return util.readfile(path), f[1]
165 return util.readfile(path), f[1]
166
166
167 class converter(convcmd.converter):
167 class converter(convcmd.converter):
168 def __init__(self, ui, source, dest, revmapfile, opts):
168 def __init__(self, ui, source, dest, revmapfile, opts):
169 src = lfsource(ui, source)
169 src = lfsource(ui, source)
170
170
171 super(converter, self).__init__(ui, src, dest, revmapfile,
171 super(converter, self).__init__(ui, src, dest, revmapfile,
172 opts)
172 opts)
173
173
174 found, missing = downloadlfiles(ui, rsrc)
174 found, missing = downloadlfiles(ui, rsrc)
175 if missing != 0:
175 if missing != 0:
176 raise error.Abort(_("all largefiles must be present locally"))
176 raise error.Abort(_("all largefiles must be present locally"))
177
177
178 orig = convcmd.converter
178 orig = convcmd.converter
179 convcmd.converter = converter
179 convcmd.converter = converter
180
180
181 try:
181 try:
182 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
182 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
183 finally:
183 finally:
184 convcmd.converter = orig
184 convcmd.converter = orig
185 success = True
185 success = True
186 finally:
186 finally:
187 if tolfile:
187 if tolfile:
188 rdst.dirstate.clear()
188 rdst.dirstate.clear()
189 release(dstlock, dstwlock)
189 release(dstlock, dstwlock)
190 if not success:
190 if not success:
191 # we failed, remove the new directory
191 # we failed, remove the new directory
192 shutil.rmtree(rdst.root)
192 shutil.rmtree(rdst.root)
193
193
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
195 matcher, size, lfiletohash):
195 matcher, size, lfiletohash):
196 # Convert src parents to dst parents
196 # Convert src parents to dst parents
197 parents = _convertparents(ctx, revmap)
197 parents = _convertparents(ctx, revmap)
198
198
199 # Generate list of changed files
199 # Generate list of changed files
200 files = _getchangedfiles(ctx, parents)
200 files = _getchangedfiles(ctx, parents)
201
201
202 dstfiles = []
202 dstfiles = []
203 for f in files:
203 for f in files:
204 if f not in lfiles and f not in normalfiles:
204 if f not in lfiles and f not in normalfiles:
205 islfile = _islfile(f, ctx, matcher, size)
205 islfile = _islfile(f, ctx, matcher, size)
206 # If this file was renamed or copied then copy
206 # If this file was renamed or copied then copy
207 # the largefile-ness of its predecessor
207 # the largefile-ness of its predecessor
208 if f in ctx.manifest():
208 if f in ctx.manifest():
209 fctx = ctx.filectx(f)
209 fctx = ctx.filectx(f)
210 renamed = fctx.renamed()
210 renamed = fctx.renamed()
211 if renamed is None:
211 if renamed is None:
212 # the code below assumes renamed to be a boolean or a list
212 # the code below assumes renamed to be a boolean or a list
213 # and won't quite work with the value None
213 # and won't quite work with the value None
214 renamed = False
214 renamed = False
215 renamedlfile = renamed and renamed[0] in lfiles
215 renamedlfile = renamed and renamed[0] in lfiles
216 islfile |= renamedlfile
216 islfile |= renamedlfile
217 if 'l' in fctx.flags():
217 if 'l' in fctx.flags():
218 if renamedlfile:
218 if renamedlfile:
219 raise error.Abort(
219 raise error.Abort(
220 _('renamed/copied largefile %s becomes symlink')
220 _('renamed/copied largefile %s becomes symlink')
221 % f)
221 % f)
222 islfile = False
222 islfile = False
223 if islfile:
223 if islfile:
224 lfiles.add(f)
224 lfiles.add(f)
225 else:
225 else:
226 normalfiles.add(f)
226 normalfiles.add(f)
227
227
228 if f in lfiles:
228 if f in lfiles:
229 fstandin = lfutil.standin(f)
229 fstandin = lfutil.standin(f)
230 dstfiles.append(fstandin)
230 dstfiles.append(fstandin)
231 # largefile in manifest if it has not been removed/renamed
231 # largefile in manifest if it has not been removed/renamed
232 if f in ctx.manifest():
232 if f in ctx.manifest():
233 fctx = ctx.filectx(f)
233 fctx = ctx.filectx(f)
234 if 'l' in fctx.flags():
234 if 'l' in fctx.flags():
235 renamed = fctx.renamed()
235 renamed = fctx.renamed()
236 if renamed and renamed[0] in lfiles:
236 if renamed and renamed[0] in lfiles:
237 raise error.Abort(_('largefile %s becomes symlink') % f)
237 raise error.Abort(_('largefile %s becomes symlink') % f)
238
238
239 # largefile was modified, update standins
239 # largefile was modified, update standins
240 m = hashlib.sha1('')
240 m = hashlib.sha1('')
241 m.update(ctx[f].data())
241 m.update(ctx[f].data())
242 hash = node.hex(m.digest())
242 hash = node.hex(m.digest())
243 if f not in lfiletohash or lfiletohash[f] != hash:
243 if f not in lfiletohash or lfiletohash[f] != hash:
244 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
244 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
245 executable = 'x' in ctx[f].flags()
245 executable = 'x' in ctx[f].flags()
246 lfutil.writestandin(rdst, fstandin, hash,
246 lfutil.writestandin(rdst, fstandin, hash,
247 executable)
247 executable)
248 lfiletohash[f] = hash
248 lfiletohash[f] = hash
249 else:
249 else:
250 # normal file
250 # normal file
251 dstfiles.append(f)
251 dstfiles.append(f)
252
252
253 def getfilectx(repo, memctx, f):
253 def getfilectx(repo, memctx, f):
254 srcfname = lfutil.splitstandin(f)
254 srcfname = lfutil.splitstandin(f)
255 if srcfname is not None:
255 if srcfname is not None:
256 # if the file isn't in the manifest then it was removed
256 # if the file isn't in the manifest then it was removed
257 # or renamed, return None to indicate this
257 # or renamed, return None to indicate this
258 try:
258 try:
259 fctx = ctx.filectx(srcfname)
259 fctx = ctx.filectx(srcfname)
260 except error.LookupError:
260 except error.LookupError:
261 return None
261 return None
262 renamed = fctx.renamed()
262 renamed = fctx.renamed()
263 if renamed:
263 if renamed:
264 # standin is always a largefile because largefile-ness
264 # standin is always a largefile because largefile-ness
265 # doesn't change after rename or copy
265 # doesn't change after rename or copy
266 renamed = lfutil.standin(renamed[0])
266 renamed = lfutil.standin(renamed[0])
267
267
268 return context.memfilectx(repo, memctx, f,
268 return context.memfilectx(repo, memctx, f,
269 lfiletohash[srcfname] + '\n',
269 lfiletohash[srcfname] + '\n',
270 'l' in fctx.flags(), 'x' in fctx.flags(),
270 'l' in fctx.flags(), 'x' in fctx.flags(),
271 renamed)
271 renamed)
272 else:
272 else:
273 return _getnormalcontext(repo, ctx, f, revmap)
273 return _getnormalcontext(repo, ctx, f, revmap)
274
274
275 # Commit
275 # Commit
276 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
276 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
277
277
278 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
278 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
279 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
279 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
280 getfilectx, ctx.user(), ctx.date(), ctx.extra())
280 getfilectx, ctx.user(), ctx.date(), ctx.extra())
281 ret = rdst.commitctx(mctx)
281 ret = rdst.commitctx(mctx)
282 lfutil.copyalltostore(rdst, ret)
282 lfutil.copyalltostore(rdst, ret)
283 rdst.setparents(ret)
283 rdst.setparents(ret)
284 revmap[ctx.node()] = rdst.changelog.tip()
284 revmap[ctx.node()] = rdst.changelog.tip()
285
285
286 # Generate list of changed files
286 # Generate list of changed files
287 def _getchangedfiles(ctx, parents):
287 def _getchangedfiles(ctx, parents):
288 files = set(ctx.files())
288 files = set(ctx.files())
289 if node.nullid not in parents:
289 if node.nullid not in parents:
290 mc = ctx.manifest()
290 mc = ctx.manifest()
291 for pctx in ctx.parents():
291 for pctx in ctx.parents():
292 for fn in pctx.manifest().diff(mc):
292 for fn in pctx.manifest().diff(mc):
293 files.add(fn)
293 files.add(fn)
294 return files
294 return files
295
295
296 # Convert src parents to dst parents
296 # Convert src parents to dst parents
297 def _convertparents(ctx, revmap):
297 def _convertparents(ctx, revmap):
298 parents = []
298 parents = []
299 for p in ctx.parents():
299 for p in ctx.parents():
300 parents.append(revmap[p.node()])
300 parents.append(revmap[p.node()])
301 while len(parents) < 2:
301 while len(parents) < 2:
302 parents.append(node.nullid)
302 parents.append(node.nullid)
303 return parents
303 return parents
304
304
305 # Get memfilectx for a normal file
305 # Get memfilectx for a normal file
306 def _getnormalcontext(repo, ctx, f, revmap):
306 def _getnormalcontext(repo, ctx, f, revmap):
307 try:
307 try:
308 fctx = ctx.filectx(f)
308 fctx = ctx.filectx(f)
309 except error.LookupError:
309 except error.LookupError:
310 return None
310 return None
311 renamed = fctx.renamed()
311 renamed = fctx.renamed()
312 if renamed:
312 if renamed:
313 renamed = renamed[0]
313 renamed = renamed[0]
314
314
315 data = fctx.data()
315 data = fctx.data()
316 if f == '.hgtags':
316 if f == '.hgtags':
317 data = _converttags (repo.ui, revmap, data)
317 data = _converttags (repo.ui, revmap, data)
318 return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
318 return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
319 'x' in fctx.flags(), renamed)
319 'x' in fctx.flags(), renamed)
320
320
321 # Remap tag data using a revision map
321 # Remap tag data using a revision map
322 def _converttags(ui, revmap, data):
322 def _converttags(ui, revmap, data):
323 newdata = []
323 newdata = []
324 for line in data.splitlines():
324 for line in data.splitlines():
325 try:
325 try:
326 id, name = line.split(' ', 1)
326 id, name = line.split(' ', 1)
327 except ValueError:
327 except ValueError:
328 ui.warn(_('skipping incorrectly formatted tag %s\n')
328 ui.warn(_('skipping incorrectly formatted tag %s\n')
329 % line)
329 % line)
330 continue
330 continue
331 try:
331 try:
332 newid = node.bin(id)
332 newid = node.bin(id)
333 except TypeError:
333 except TypeError:
334 ui.warn(_('skipping incorrectly formatted id %s\n')
334 ui.warn(_('skipping incorrectly formatted id %s\n')
335 % id)
335 % id)
336 continue
336 continue
337 try:
337 try:
338 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
338 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
339 name))
339 name))
340 except KeyError:
340 except KeyError:
341 ui.warn(_('no mapping for id %s\n') % id)
341 ui.warn(_('no mapping for id %s\n') % id)
342 continue
342 continue
343 return ''.join(newdata)
343 return ''.join(newdata)
344
344
345 def _islfile(file, ctx, matcher, size):
345 def _islfile(file, ctx, matcher, size):
346 '''Return true if file should be considered a largefile, i.e.
346 '''Return true if file should be considered a largefile, i.e.
347 matcher matches it or it is larger than size.'''
347 matcher matches it or it is larger than size.'''
348 # never store special .hg* files as largefiles
348 # never store special .hg* files as largefiles
349 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
349 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
350 return False
350 return False
351 if matcher and matcher(file):
351 if matcher and matcher(file):
352 return True
352 return True
353 try:
353 try:
354 return ctx.filectx(file).size() >= size * 1024 * 1024
354 return ctx.filectx(file).size() >= size * 1024 * 1024
355 except error.LookupError:
355 except error.LookupError:
356 return False
356 return False
357
357
358 def uploadlfiles(ui, rsrc, rdst, files):
358 def uploadlfiles(ui, rsrc, rdst, files):
359 '''upload largefiles to the central store'''
359 '''upload largefiles to the central store'''
360
360
361 if not files:
361 if not files:
362 return
362 return
363
363
364 store = storefactory.openstore(rsrc, rdst, put=True)
364 store = storefactory.openstore(rsrc, rdst, put=True)
365
365
366 at = 0
366 at = 0
367 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
367 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
368 retval = store.exists(files)
368 retval = store.exists(files)
369 files = [h for h in files if not retval[h]]
369 files = [h for h in files if not retval[h]]
370 ui.debug("%d largefiles need to be uploaded\n" % len(files))
370 ui.debug("%d largefiles need to be uploaded\n" % len(files))
371
371
372 with ui.makeprogress(_('uploading largefiles'), unit=_('files'),
372 with ui.makeprogress(_('uploading largefiles'), unit=_('files'),
373 total=len(files)) as progress:
373 total=len(files)) as progress:
374 for hash in files:
374 for hash in files:
375 progress.update(at)
375 progress.update(at)
376 source = lfutil.findfile(rsrc, hash)
376 source = lfutil.findfile(rsrc, hash)
377 if not source:
377 if not source:
378 raise error.Abort(_('largefile %s missing from store'
378 raise error.Abort(_('largefile %s missing from store'
379 ' (needs to be uploaded)') % hash)
379 ' (needs to be uploaded)') % hash)
380 # XXX check for errors here
380 # XXX check for errors here
381 store.put(source, hash)
381 store.put(source, hash)
382 at += 1
382 at += 1
383
383
384 def verifylfiles(ui, repo, all=False, contents=False):
384 def verifylfiles(ui, repo, all=False, contents=False):
385 '''Verify that every largefile revision in the current changeset
385 '''Verify that every largefile revision in the current changeset
386 exists in the central store. With --contents, also verify that
386 exists in the central store. With --contents, also verify that
387 the contents of each local largefile file revision are correct (SHA-1 hash
387 the contents of each local largefile file revision are correct (SHA-1 hash
388 matches the revision ID). With --all, check every changeset in
388 matches the revision ID). With --all, check every changeset in
389 this repository.'''
389 this repository.'''
390 if all:
390 if all:
391 revs = repo.revs('all()')
391 revs = repo.revs('all()')
392 else:
392 else:
393 revs = ['.']
393 revs = ['.']
394
394
395 store = storefactory.openstore(repo)
395 store = storefactory.openstore(repo)
396 return store.verify(revs, contents=contents)
396 return store.verify(revs, contents=contents)
397
397
398 def cachelfiles(ui, repo, node, filelist=None):
398 def cachelfiles(ui, repo, node, filelist=None):
399 '''cachelfiles ensures that all largefiles needed by the specified revision
399 '''cachelfiles ensures that all largefiles needed by the specified revision
400 are present in the repository's largefile cache.
400 are present in the repository's largefile cache.
401
401
402 returns a tuple (cached, missing). cached is the list of files downloaded
402 returns a tuple (cached, missing). cached is the list of files downloaded
403 by this operation; missing is the list of files that were needed but could
403 by this operation; missing is the list of files that were needed but could
404 not be found.'''
404 not be found.'''
405 lfiles = lfutil.listlfiles(repo, node)
405 lfiles = lfutil.listlfiles(repo, node)
406 if filelist:
406 if filelist:
407 lfiles = set(lfiles) & set(filelist)
407 lfiles = set(lfiles) & set(filelist)
408 toget = []
408 toget = []
409
409
410 ctx = repo[node]
410 ctx = repo[node]
411 for lfile in lfiles:
411 for lfile in lfiles:
412 try:
412 try:
413 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
413 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
414 except IOError as err:
414 except IOError as err:
415 if err.errno == errno.ENOENT:
415 if err.errno == errno.ENOENT:
416 continue # node must be None and standin wasn't found in wctx
416 continue # node must be None and standin wasn't found in wctx
417 raise
417 raise
418 if not lfutil.findfile(repo, expectedhash):
418 if not lfutil.findfile(repo, expectedhash):
419 toget.append((lfile, expectedhash))
419 toget.append((lfile, expectedhash))
420
420
421 if toget:
421 if toget:
422 store = storefactory.openstore(repo)
422 store = storefactory.openstore(repo)
423 ret = store.get(toget)
423 ret = store.get(toget)
424 return ret
424 return ret
425
425
426 return ([], [])
426 return ([], [])
427
427
428 def downloadlfiles(ui, repo, rev=None):
428 def downloadlfiles(ui, repo, rev=None):
429 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
429 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
430 def prepare(ctx, fns):
430 def prepare(ctx, fns):
431 pass
431 pass
432 totalsuccess = 0
432 totalsuccess = 0
433 totalmissing = 0
433 totalmissing = 0
434 if rev != []: # walkchangerevs on empty list would return all revs
434 if rev != []: # walkchangerevs on empty list would return all revs
435 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
435 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
436 prepare):
436 prepare):
437 success, missing = cachelfiles(ui, repo, ctx.node())
437 success, missing = cachelfiles(ui, repo, ctx.node())
438 totalsuccess += len(success)
438 totalsuccess += len(success)
439 totalmissing += len(missing)
439 totalmissing += len(missing)
440 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
440 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
441 if totalmissing > 0:
441 if totalmissing > 0:
442 ui.status(_("%d largefiles failed to download\n") % totalmissing)
442 ui.status(_("%d largefiles failed to download\n") % totalmissing)
443 return totalsuccess, totalmissing
443 return totalsuccess, totalmissing
444
444
445 def updatelfiles(ui, repo, filelist=None, printmessage=None,
445 def updatelfiles(ui, repo, filelist=None, printmessage=None,
446 normallookup=False):
446 normallookup=False):
447 '''Update largefiles according to standins in the working directory
447 '''Update largefiles according to standins in the working directory
448
448
449 If ``printmessage`` is other than ``None``, it means "print (or
449 If ``printmessage`` is other than ``None``, it means "print (or
450 ignore, for false) message forcibly".
450 ignore, for false) message forcibly".
451 '''
451 '''
452 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
452 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
453 with repo.wlock():
453 with repo.wlock():
454 lfdirstate = lfutil.openlfdirstate(ui, repo)
454 lfdirstate = lfutil.openlfdirstate(ui, repo)
455 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
455 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
456
456
457 if filelist is not None:
457 if filelist is not None:
458 filelist = set(filelist)
458 filelist = set(filelist)
459 lfiles = [f for f in lfiles if f in filelist]
459 lfiles = [f for f in lfiles if f in filelist]
460
460
461 update = {}
461 update = {}
462 dropped = set()
462 dropped = set()
463 updated, removed = 0, 0
463 updated, removed = 0, 0
464 wvfs = repo.wvfs
464 wvfs = repo.wvfs
465 wctx = repo[None]
465 wctx = repo[None]
466 for lfile in lfiles:
466 for lfile in lfiles:
467 lfileorig = os.path.relpath(
467 lfileorig = os.path.relpath(
468 scmutil.origpath(ui, repo, wvfs.join(lfile)),
468 scmutil.backuppath(ui, repo, lfile),
469 start=repo.root)
469 start=repo.root)
470 standin = lfutil.standin(lfile)
470 standin = lfutil.standin(lfile)
471 standinorig = os.path.relpath(
471 standinorig = os.path.relpath(
472 scmutil.origpath(ui, repo, wvfs.join(standin)),
472 scmutil.backuppath(ui, repo, standin),
473 start=repo.root)
473 start=repo.root)
474 if wvfs.exists(standin):
474 if wvfs.exists(standin):
475 if (wvfs.exists(standinorig) and
475 if (wvfs.exists(standinorig) and
476 wvfs.exists(lfile)):
476 wvfs.exists(lfile)):
477 shutil.copyfile(wvfs.join(lfile),
477 shutil.copyfile(wvfs.join(lfile),
478 wvfs.join(lfileorig))
478 wvfs.join(lfileorig))
479 wvfs.unlinkpath(standinorig)
479 wvfs.unlinkpath(standinorig)
480 expecthash = lfutil.readasstandin(wctx[standin])
480 expecthash = lfutil.readasstandin(wctx[standin])
481 if expecthash != '':
481 if expecthash != '':
482 if lfile not in wctx: # not switched to normal file
482 if lfile not in wctx: # not switched to normal file
483 if repo.dirstate[standin] != '?':
483 if repo.dirstate[standin] != '?':
484 wvfs.unlinkpath(lfile, ignoremissing=True)
484 wvfs.unlinkpath(lfile, ignoremissing=True)
485 else:
485 else:
486 dropped.add(lfile)
486 dropped.add(lfile)
487
487
488 # use normallookup() to allocate an entry in largefiles
488 # use normallookup() to allocate an entry in largefiles
489 # dirstate to prevent lfilesrepo.status() from reporting
489 # dirstate to prevent lfilesrepo.status() from reporting
490 # missing files as removed.
490 # missing files as removed.
491 lfdirstate.normallookup(lfile)
491 lfdirstate.normallookup(lfile)
492 update[lfile] = expecthash
492 update[lfile] = expecthash
493 else:
493 else:
494 # Remove lfiles for which the standin is deleted, unless the
494 # Remove lfiles for which the standin is deleted, unless the
495 # lfile is added to the repository again. This happens when a
495 # lfile is added to the repository again. This happens when a
496 # largefile is converted back to a normal file: the standin
496 # largefile is converted back to a normal file: the standin
497 # disappears, but a new (normal) file appears as the lfile.
497 # disappears, but a new (normal) file appears as the lfile.
498 if (wvfs.exists(lfile) and
498 if (wvfs.exists(lfile) and
499 repo.dirstate.normalize(lfile) not in wctx):
499 repo.dirstate.normalize(lfile) not in wctx):
500 wvfs.unlinkpath(lfile)
500 wvfs.unlinkpath(lfile)
501 removed += 1
501 removed += 1
502
502
503 # largefile processing might be slow and be interrupted - be prepared
503 # largefile processing might be slow and be interrupted - be prepared
504 lfdirstate.write()
504 lfdirstate.write()
505
505
506 if lfiles:
506 if lfiles:
507 lfiles = [f for f in lfiles if f not in dropped]
507 lfiles = [f for f in lfiles if f not in dropped]
508
508
509 for f in dropped:
509 for f in dropped:
510 repo.wvfs.unlinkpath(lfutil.standin(f))
510 repo.wvfs.unlinkpath(lfutil.standin(f))
511
511
512 # This needs to happen for dropped files, otherwise they stay in
512 # This needs to happen for dropped files, otherwise they stay in
513 # the M state.
513 # the M state.
514 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
514 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
515
515
516 statuswriter(_('getting changed largefiles\n'))
516 statuswriter(_('getting changed largefiles\n'))
517 cachelfiles(ui, repo, None, lfiles)
517 cachelfiles(ui, repo, None, lfiles)
518
518
519 for lfile in lfiles:
519 for lfile in lfiles:
520 update1 = 0
520 update1 = 0
521
521
522 expecthash = update.get(lfile)
522 expecthash = update.get(lfile)
523 if expecthash:
523 if expecthash:
524 if not lfutil.copyfromcache(repo, expecthash, lfile):
524 if not lfutil.copyfromcache(repo, expecthash, lfile):
525 # failed ... but already removed and set to normallookup
525 # failed ... but already removed and set to normallookup
526 continue
526 continue
527 # Synchronize largefile dirstate to the last modified
527 # Synchronize largefile dirstate to the last modified
528 # time of the file
528 # time of the file
529 lfdirstate.normal(lfile)
529 lfdirstate.normal(lfile)
530 update1 = 1
530 update1 = 1
531
531
532 # copy the exec mode of largefile standin from the repository's
532 # copy the exec mode of largefile standin from the repository's
533 # dirstate to its state in the lfdirstate.
533 # dirstate to its state in the lfdirstate.
534 standin = lfutil.standin(lfile)
534 standin = lfutil.standin(lfile)
535 if wvfs.exists(standin):
535 if wvfs.exists(standin):
536 # exec is decided by the users permissions using mask 0o100
536 # exec is decided by the users permissions using mask 0o100
537 standinexec = wvfs.stat(standin).st_mode & 0o100
537 standinexec = wvfs.stat(standin).st_mode & 0o100
538 st = wvfs.stat(lfile)
538 st = wvfs.stat(lfile)
539 mode = st.st_mode
539 mode = st.st_mode
540 if standinexec != mode & 0o100:
540 if standinexec != mode & 0o100:
541 # first remove all X bits, then shift all R bits to X
541 # first remove all X bits, then shift all R bits to X
542 mode &= ~0o111
542 mode &= ~0o111
543 if standinexec:
543 if standinexec:
544 mode |= (mode >> 2) & 0o111 & ~util.umask
544 mode |= (mode >> 2) & 0o111 & ~util.umask
545 wvfs.chmod(lfile, mode)
545 wvfs.chmod(lfile, mode)
546 update1 = 1
546 update1 = 1
547
547
548 updated += update1
548 updated += update1
549
549
550 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
550 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
551
551
552 lfdirstate.write()
552 lfdirstate.write()
553 if lfiles:
553 if lfiles:
554 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
554 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
555 removed))
555 removed))
556
556
557 @eh.command('lfpull',
557 @eh.command('lfpull',
558 [('r', 'rev', [], _('pull largefiles for these revisions'))
558 [('r', 'rev', [], _('pull largefiles for these revisions'))
559 ] + cmdutil.remoteopts,
559 ] + cmdutil.remoteopts,
560 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
560 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
561 def lfpull(ui, repo, source="default", **opts):
561 def lfpull(ui, repo, source="default", **opts):
562 """pull largefiles for the specified revisions from the specified source
562 """pull largefiles for the specified revisions from the specified source
563
563
564 Pull largefiles that are referenced from local changesets but missing
564 Pull largefiles that are referenced from local changesets but missing
565 locally, pulling from a remote repository to the local cache.
565 locally, pulling from a remote repository to the local cache.
566
566
567 If SOURCE is omitted, the 'default' path will be used.
567 If SOURCE is omitted, the 'default' path will be used.
568 See :hg:`help urls` for more information.
568 See :hg:`help urls` for more information.
569
569
570 .. container:: verbose
570 .. container:: verbose
571
571
572 Some examples:
572 Some examples:
573
573
574 - pull largefiles for all branch heads::
574 - pull largefiles for all branch heads::
575
575
576 hg lfpull -r "head() and not closed()"
576 hg lfpull -r "head() and not closed()"
577
577
578 - pull largefiles on the default branch::
578 - pull largefiles on the default branch::
579
579
580 hg lfpull -r "branch(default)"
580 hg lfpull -r "branch(default)"
581 """
581 """
582 repo.lfpullsource = source
582 repo.lfpullsource = source
583
583
584 revs = opts.get(r'rev', [])
584 revs = opts.get(r'rev', [])
585 if not revs:
585 if not revs:
586 raise error.Abort(_('no revisions specified'))
586 raise error.Abort(_('no revisions specified'))
587 revs = scmutil.revrange(repo, revs)
587 revs = scmutil.revrange(repo, revs)
588
588
589 numcached = 0
589 numcached = 0
590 for rev in revs:
590 for rev in revs:
591 ui.note(_('pulling largefiles for revision %d\n') % rev)
591 ui.note(_('pulling largefiles for revision %d\n') % rev)
592 (cached, missing) = cachelfiles(ui, repo, rev)
592 (cached, missing) = cachelfiles(ui, repo, rev)
593 numcached += len(cached)
593 numcached += len(cached)
594 ui.status(_("%d largefiles cached\n") % numcached)
594 ui.status(_("%d largefiles cached\n") % numcached)
595
595
596 @eh.command('debuglfput',
596 @eh.command('debuglfput',
597 [] + cmdutil.remoteopts,
597 [] + cmdutil.remoteopts,
598 _('FILE'))
598 _('FILE'))
599 def debuglfput(ui, repo, filepath, **kwargs):
599 def debuglfput(ui, repo, filepath, **kwargs):
600 hash = lfutil.hashfile(filepath)
600 hash = lfutil.hashfile(filepath)
601 storefactory.openstore(repo).put(filepath, hash)
601 storefactory.openstore(repo).put(filepath, hash)
602 ui.write('%s\n' % hash)
602 ui.write('%s\n' % hash)
603 return 0
603 return 0
General Comments 0
You need to be logged in to leave comments. Login now