##// END OF EJS Templates
largefiles: rename match_ to matchmod import in lfcommands
liscju -
r29317:5ec25534 default
parent child Browse files
Show More
@@ -1,568 +1,568 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 from mercurial import (
18 from mercurial import (
19 cmdutil,
19 cmdutil,
20 commands,
20 commands,
21 context,
21 context,
22 error,
22 error,
23 hg,
23 hg,
24 lock,
24 lock,
25 match as match_,
25 match as matchmod,
26 node,
26 node,
27 scmutil,
27 scmutil,
28 util,
28 util,
29 )
29 )
30
30
31 from ..convert import (
31 from ..convert import (
32 convcmd,
32 convcmd,
33 filemap,
33 filemap,
34 )
34 )
35
35
36 from . import (
36 from . import (
37 lfutil,
37 lfutil,
38 storefactory
38 storefactory
39 )
39 )
40
40
41 release = lock.release
41 release = lock.release
42
42
43 # -- Commands ----------------------------------------------------------
43 # -- Commands ----------------------------------------------------------
44
44
45 cmdtable = {}
45 cmdtable = {}
46 command = cmdutil.command(cmdtable)
46 command = cmdutil.command(cmdtable)
47
47
48 @command('lfconvert',
48 @command('lfconvert',
49 [('s', 'size', '',
49 [('s', 'size', '',
50 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
50 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
51 ('', 'to-normal', False,
51 ('', 'to-normal', False,
52 _('convert from a largefiles repo to a normal repo')),
52 _('convert from a largefiles repo to a normal repo')),
53 ],
53 ],
54 _('hg lfconvert SOURCE DEST [FILE ...]'),
54 _('hg lfconvert SOURCE DEST [FILE ...]'),
55 norepo=True,
55 norepo=True,
56 inferrepo=True)
56 inferrepo=True)
57 def lfconvert(ui, src, dest, *pats, **opts):
57 def lfconvert(ui, src, dest, *pats, **opts):
58 '''convert a normal repository to a largefiles repository
58 '''convert a normal repository to a largefiles repository
59
59
60 Convert repository SOURCE to a new repository DEST, identical to
60 Convert repository SOURCE to a new repository DEST, identical to
61 SOURCE except that certain files will be converted as largefiles:
61 SOURCE except that certain files will be converted as largefiles:
62 specifically, any file that matches any PATTERN *or* whose size is
62 specifically, any file that matches any PATTERN *or* whose size is
63 above the minimum size threshold is converted as a largefile. The
63 above the minimum size threshold is converted as a largefile. The
64 size used to determine whether or not to track a file as a
64 size used to determine whether or not to track a file as a
65 largefile is the size of the first version of the file. The
65 largefile is the size of the first version of the file. The
66 minimum size can be specified either with --size or in
66 minimum size can be specified either with --size or in
67 configuration as ``largefiles.size``.
67 configuration as ``largefiles.size``.
68
68
69 After running this command you will need to make sure that
69 After running this command you will need to make sure that
70 largefiles is enabled anywhere you intend to push the new
70 largefiles is enabled anywhere you intend to push the new
71 repository.
71 repository.
72
72
73 Use --to-normal to convert largefiles back to normal files; after
73 Use --to-normal to convert largefiles back to normal files; after
74 this, the DEST repository can be used without largefiles at all.'''
74 this, the DEST repository can be used without largefiles at all.'''
75
75
76 if opts['to_normal']:
76 if opts['to_normal']:
77 tolfile = False
77 tolfile = False
78 else:
78 else:
79 tolfile = True
79 tolfile = True
80 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
80 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
81
81
82 if not hg.islocal(src):
82 if not hg.islocal(src):
83 raise error.Abort(_('%s is not a local Mercurial repo') % src)
83 raise error.Abort(_('%s is not a local Mercurial repo') % src)
84 if not hg.islocal(dest):
84 if not hg.islocal(dest):
85 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
85 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
86
86
87 rsrc = hg.repository(ui, src)
87 rsrc = hg.repository(ui, src)
88 ui.status(_('initializing destination %s\n') % dest)
88 ui.status(_('initializing destination %s\n') % dest)
89 rdst = hg.repository(ui, dest, create=True)
89 rdst = hg.repository(ui, dest, create=True)
90
90
91 success = False
91 success = False
92 dstwlock = dstlock = None
92 dstwlock = dstlock = None
93 try:
93 try:
94 # Get a list of all changesets in the source. The easy way to do this
94 # Get a list of all changesets in the source. The easy way to do this
95 # is to simply walk the changelog, using changelog.nodesbetween().
95 # is to simply walk the changelog, using changelog.nodesbetween().
96 # Take a look at mercurial/revlog.py:639 for more details.
96 # Take a look at mercurial/revlog.py:639 for more details.
97 # Use a generator instead of a list to decrease memory usage
97 # Use a generator instead of a list to decrease memory usage
98 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
98 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
99 rsrc.heads())[0])
99 rsrc.heads())[0])
100 revmap = {node.nullid: node.nullid}
100 revmap = {node.nullid: node.nullid}
101 if tolfile:
101 if tolfile:
102 # Lock destination to prevent modification while it is converted to.
102 # Lock destination to prevent modification while it is converted to.
103 # Don't need to lock src because we are just reading from its
103 # Don't need to lock src because we are just reading from its
104 # history which can't change.
104 # history which can't change.
105 dstwlock = rdst.wlock()
105 dstwlock = rdst.wlock()
106 dstlock = rdst.lock()
106 dstlock = rdst.lock()
107
107
108 lfiles = set()
108 lfiles = set()
109 normalfiles = set()
109 normalfiles = set()
110 if not pats:
110 if not pats:
111 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
111 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
112 if pats:
112 if pats:
113 matcher = match_.match(rsrc.root, '', list(pats))
113 matcher = matchmod.match(rsrc.root, '', list(pats))
114 else:
114 else:
115 matcher = None
115 matcher = None
116
116
117 lfiletohash = {}
117 lfiletohash = {}
118 for ctx in ctxs:
118 for ctx in ctxs:
119 ui.progress(_('converting revisions'), ctx.rev(),
119 ui.progress(_('converting revisions'), ctx.rev(),
120 unit=_('revisions'), total=rsrc['tip'].rev())
120 unit=_('revisions'), total=rsrc['tip'].rev())
121 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
121 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
122 lfiles, normalfiles, matcher, size, lfiletohash)
122 lfiles, normalfiles, matcher, size, lfiletohash)
123 ui.progress(_('converting revisions'), None)
123 ui.progress(_('converting revisions'), None)
124
124
125 if rdst.wvfs.exists(lfutil.shortname):
125 if rdst.wvfs.exists(lfutil.shortname):
126 rdst.wvfs.rmtree(lfutil.shortname)
126 rdst.wvfs.rmtree(lfutil.shortname)
127
127
128 for f in lfiletohash.keys():
128 for f in lfiletohash.keys():
129 if rdst.wvfs.isfile(f):
129 if rdst.wvfs.isfile(f):
130 rdst.wvfs.unlink(f)
130 rdst.wvfs.unlink(f)
131 try:
131 try:
132 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
132 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
133 except OSError:
133 except OSError:
134 pass
134 pass
135
135
136 # If there were any files converted to largefiles, add largefiles
136 # If there were any files converted to largefiles, add largefiles
137 # to the destination repository's requirements.
137 # to the destination repository's requirements.
138 if lfiles:
138 if lfiles:
139 rdst.requirements.add('largefiles')
139 rdst.requirements.add('largefiles')
140 rdst._writerequirements()
140 rdst._writerequirements()
141 else:
141 else:
142 class lfsource(filemap.filemap_source):
142 class lfsource(filemap.filemap_source):
143 def __init__(self, ui, source):
143 def __init__(self, ui, source):
144 super(lfsource, self).__init__(ui, source, None)
144 super(lfsource, self).__init__(ui, source, None)
145 self.filemapper.rename[lfutil.shortname] = '.'
145 self.filemapper.rename[lfutil.shortname] = '.'
146
146
147 def getfile(self, name, rev):
147 def getfile(self, name, rev):
148 realname, realrev = rev
148 realname, realrev = rev
149 f = super(lfsource, self).getfile(name, rev)
149 f = super(lfsource, self).getfile(name, rev)
150
150
151 if (not realname.startswith(lfutil.shortnameslash)
151 if (not realname.startswith(lfutil.shortnameslash)
152 or f[0] is None):
152 or f[0] is None):
153 return f
153 return f
154
154
155 # Substitute in the largefile data for the hash
155 # Substitute in the largefile data for the hash
156 hash = f[0].strip()
156 hash = f[0].strip()
157 path = lfutil.findfile(rsrc, hash)
157 path = lfutil.findfile(rsrc, hash)
158
158
159 if path is None:
159 if path is None:
160 raise error.Abort(_("missing largefile for '%s' in %s")
160 raise error.Abort(_("missing largefile for '%s' in %s")
161 % (realname, realrev))
161 % (realname, realrev))
162 return util.readfile(path), f[1]
162 return util.readfile(path), f[1]
163
163
164 class converter(convcmd.converter):
164 class converter(convcmd.converter):
165 def __init__(self, ui, source, dest, revmapfile, opts):
165 def __init__(self, ui, source, dest, revmapfile, opts):
166 src = lfsource(ui, source)
166 src = lfsource(ui, source)
167
167
168 super(converter, self).__init__(ui, src, dest, revmapfile,
168 super(converter, self).__init__(ui, src, dest, revmapfile,
169 opts)
169 opts)
170
170
171 found, missing = downloadlfiles(ui, rsrc)
171 found, missing = downloadlfiles(ui, rsrc)
172 if missing != 0:
172 if missing != 0:
173 raise error.Abort(_("all largefiles must be present locally"))
173 raise error.Abort(_("all largefiles must be present locally"))
174
174
175 orig = convcmd.converter
175 orig = convcmd.converter
176 convcmd.converter = converter
176 convcmd.converter = converter
177
177
178 try:
178 try:
179 convcmd.convert(ui, src, dest)
179 convcmd.convert(ui, src, dest)
180 finally:
180 finally:
181 convcmd.converter = orig
181 convcmd.converter = orig
182 success = True
182 success = True
183 finally:
183 finally:
184 if tolfile:
184 if tolfile:
185 rdst.dirstate.clear()
185 rdst.dirstate.clear()
186 release(dstlock, dstwlock)
186 release(dstlock, dstwlock)
187 if not success:
187 if not success:
188 # we failed, remove the new directory
188 # we failed, remove the new directory
189 shutil.rmtree(rdst.root)
189 shutil.rmtree(rdst.root)
190
190
191 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
191 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
192 matcher, size, lfiletohash):
192 matcher, size, lfiletohash):
193 # Convert src parents to dst parents
193 # Convert src parents to dst parents
194 parents = _convertparents(ctx, revmap)
194 parents = _convertparents(ctx, revmap)
195
195
196 # Generate list of changed files
196 # Generate list of changed files
197 files = _getchangedfiles(ctx, parents)
197 files = _getchangedfiles(ctx, parents)
198
198
199 dstfiles = []
199 dstfiles = []
200 for f in files:
200 for f in files:
201 if f not in lfiles and f not in normalfiles:
201 if f not in lfiles and f not in normalfiles:
202 islfile = _islfile(f, ctx, matcher, size)
202 islfile = _islfile(f, ctx, matcher, size)
203 # If this file was renamed or copied then copy
203 # If this file was renamed or copied then copy
204 # the largefile-ness of its predecessor
204 # the largefile-ness of its predecessor
205 if f in ctx.manifest():
205 if f in ctx.manifest():
206 fctx = ctx.filectx(f)
206 fctx = ctx.filectx(f)
207 renamed = fctx.renamed()
207 renamed = fctx.renamed()
208 renamedlfile = renamed and renamed[0] in lfiles
208 renamedlfile = renamed and renamed[0] in lfiles
209 islfile |= renamedlfile
209 islfile |= renamedlfile
210 if 'l' in fctx.flags():
210 if 'l' in fctx.flags():
211 if renamedlfile:
211 if renamedlfile:
212 raise error.Abort(
212 raise error.Abort(
213 _('renamed/copied largefile %s becomes symlink')
213 _('renamed/copied largefile %s becomes symlink')
214 % f)
214 % f)
215 islfile = False
215 islfile = False
216 if islfile:
216 if islfile:
217 lfiles.add(f)
217 lfiles.add(f)
218 else:
218 else:
219 normalfiles.add(f)
219 normalfiles.add(f)
220
220
221 if f in lfiles:
221 if f in lfiles:
222 dstfiles.append(lfutil.standin(f))
222 dstfiles.append(lfutil.standin(f))
223 # largefile in manifest if it has not been removed/renamed
223 # largefile in manifest if it has not been removed/renamed
224 if f in ctx.manifest():
224 if f in ctx.manifest():
225 fctx = ctx.filectx(f)
225 fctx = ctx.filectx(f)
226 if 'l' in fctx.flags():
226 if 'l' in fctx.flags():
227 renamed = fctx.renamed()
227 renamed = fctx.renamed()
228 if renamed and renamed[0] in lfiles:
228 if renamed and renamed[0] in lfiles:
229 raise error.Abort(_('largefile %s becomes symlink') % f)
229 raise error.Abort(_('largefile %s becomes symlink') % f)
230
230
231 # largefile was modified, update standins
231 # largefile was modified, update standins
232 m = util.sha1('')
232 m = util.sha1('')
233 m.update(ctx[f].data())
233 m.update(ctx[f].data())
234 hash = m.hexdigest()
234 hash = m.hexdigest()
235 if f not in lfiletohash or lfiletohash[f] != hash:
235 if f not in lfiletohash or lfiletohash[f] != hash:
236 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
236 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
237 executable = 'x' in ctx[f].flags()
237 executable = 'x' in ctx[f].flags()
238 lfutil.writestandin(rdst, lfutil.standin(f), hash,
238 lfutil.writestandin(rdst, lfutil.standin(f), hash,
239 executable)
239 executable)
240 lfiletohash[f] = hash
240 lfiletohash[f] = hash
241 else:
241 else:
242 # normal file
242 # normal file
243 dstfiles.append(f)
243 dstfiles.append(f)
244
244
245 def getfilectx(repo, memctx, f):
245 def getfilectx(repo, memctx, f):
246 if lfutil.isstandin(f):
246 if lfutil.isstandin(f):
247 # if the file isn't in the manifest then it was removed
247 # if the file isn't in the manifest then it was removed
248 # or renamed, raise IOError to indicate this
248 # or renamed, raise IOError to indicate this
249 srcfname = lfutil.splitstandin(f)
249 srcfname = lfutil.splitstandin(f)
250 try:
250 try:
251 fctx = ctx.filectx(srcfname)
251 fctx = ctx.filectx(srcfname)
252 except error.LookupError:
252 except error.LookupError:
253 return None
253 return None
254 renamed = fctx.renamed()
254 renamed = fctx.renamed()
255 if renamed:
255 if renamed:
256 # standin is always a largefile because largefile-ness
256 # standin is always a largefile because largefile-ness
257 # doesn't change after rename or copy
257 # doesn't change after rename or copy
258 renamed = lfutil.standin(renamed[0])
258 renamed = lfutil.standin(renamed[0])
259
259
260 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
260 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
261 'l' in fctx.flags(), 'x' in fctx.flags(),
261 'l' in fctx.flags(), 'x' in fctx.flags(),
262 renamed)
262 renamed)
263 else:
263 else:
264 return _getnormalcontext(repo, ctx, f, revmap)
264 return _getnormalcontext(repo, ctx, f, revmap)
265
265
266 # Commit
266 # Commit
267 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
267 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
268
268
269 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
269 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
270 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
270 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
271 getfilectx, ctx.user(), ctx.date(), ctx.extra())
271 getfilectx, ctx.user(), ctx.date(), ctx.extra())
272 ret = rdst.commitctx(mctx)
272 ret = rdst.commitctx(mctx)
273 lfutil.copyalltostore(rdst, ret)
273 lfutil.copyalltostore(rdst, ret)
274 rdst.setparents(ret)
274 rdst.setparents(ret)
275 revmap[ctx.node()] = rdst.changelog.tip()
275 revmap[ctx.node()] = rdst.changelog.tip()
276
276
277 # Generate list of changed files
277 # Generate list of changed files
278 def _getchangedfiles(ctx, parents):
278 def _getchangedfiles(ctx, parents):
279 files = set(ctx.files())
279 files = set(ctx.files())
280 if node.nullid not in parents:
280 if node.nullid not in parents:
281 mc = ctx.manifest()
281 mc = ctx.manifest()
282 mp1 = ctx.parents()[0].manifest()
282 mp1 = ctx.parents()[0].manifest()
283 mp2 = ctx.parents()[1].manifest()
283 mp2 = ctx.parents()[1].manifest()
284 files |= (set(mp1) | set(mp2)) - set(mc)
284 files |= (set(mp1) | set(mp2)) - set(mc)
285 for f in mc:
285 for f in mc:
286 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
286 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
287 files.add(f)
287 files.add(f)
288 return files
288 return files
289
289
290 # Convert src parents to dst parents
290 # Convert src parents to dst parents
291 def _convertparents(ctx, revmap):
291 def _convertparents(ctx, revmap):
292 parents = []
292 parents = []
293 for p in ctx.parents():
293 for p in ctx.parents():
294 parents.append(revmap[p.node()])
294 parents.append(revmap[p.node()])
295 while len(parents) < 2:
295 while len(parents) < 2:
296 parents.append(node.nullid)
296 parents.append(node.nullid)
297 return parents
297 return parents
298
298
299 # Get memfilectx for a normal file
299 # Get memfilectx for a normal file
300 def _getnormalcontext(repo, ctx, f, revmap):
300 def _getnormalcontext(repo, ctx, f, revmap):
301 try:
301 try:
302 fctx = ctx.filectx(f)
302 fctx = ctx.filectx(f)
303 except error.LookupError:
303 except error.LookupError:
304 return None
304 return None
305 renamed = fctx.renamed()
305 renamed = fctx.renamed()
306 if renamed:
306 if renamed:
307 renamed = renamed[0]
307 renamed = renamed[0]
308
308
309 data = fctx.data()
309 data = fctx.data()
310 if f == '.hgtags':
310 if f == '.hgtags':
311 data = _converttags (repo.ui, revmap, data)
311 data = _converttags (repo.ui, revmap, data)
312 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
312 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
313 'x' in fctx.flags(), renamed)
313 'x' in fctx.flags(), renamed)
314
314
315 # Remap tag data using a revision map
315 # Remap tag data using a revision map
316 def _converttags(ui, revmap, data):
316 def _converttags(ui, revmap, data):
317 newdata = []
317 newdata = []
318 for line in data.splitlines():
318 for line in data.splitlines():
319 try:
319 try:
320 id, name = line.split(' ', 1)
320 id, name = line.split(' ', 1)
321 except ValueError:
321 except ValueError:
322 ui.warn(_('skipping incorrectly formatted tag %s\n')
322 ui.warn(_('skipping incorrectly formatted tag %s\n')
323 % line)
323 % line)
324 continue
324 continue
325 try:
325 try:
326 newid = node.bin(id)
326 newid = node.bin(id)
327 except TypeError:
327 except TypeError:
328 ui.warn(_('skipping incorrectly formatted id %s\n')
328 ui.warn(_('skipping incorrectly formatted id %s\n')
329 % id)
329 % id)
330 continue
330 continue
331 try:
331 try:
332 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
332 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
333 name))
333 name))
334 except KeyError:
334 except KeyError:
335 ui.warn(_('no mapping for id %s\n') % id)
335 ui.warn(_('no mapping for id %s\n') % id)
336 continue
336 continue
337 return ''.join(newdata)
337 return ''.join(newdata)
338
338
339 def _islfile(file, ctx, matcher, size):
339 def _islfile(file, ctx, matcher, size):
340 '''Return true if file should be considered a largefile, i.e.
340 '''Return true if file should be considered a largefile, i.e.
341 matcher matches it or it is larger than size.'''
341 matcher matches it or it is larger than size.'''
342 # never store special .hg* files as largefiles
342 # never store special .hg* files as largefiles
343 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
343 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
344 return False
344 return False
345 if matcher and matcher(file):
345 if matcher and matcher(file):
346 return True
346 return True
347 try:
347 try:
348 return ctx.filectx(file).size() >= size * 1024 * 1024
348 return ctx.filectx(file).size() >= size * 1024 * 1024
349 except error.LookupError:
349 except error.LookupError:
350 return False
350 return False
351
351
352 def uploadlfiles(ui, rsrc, rdst, files):
352 def uploadlfiles(ui, rsrc, rdst, files):
353 '''upload largefiles to the central store'''
353 '''upload largefiles to the central store'''
354
354
355 if not files:
355 if not files:
356 return
356 return
357
357
358 store = storefactory._openstore(rsrc, rdst, put=True)
358 store = storefactory._openstore(rsrc, rdst, put=True)
359
359
360 at = 0
360 at = 0
361 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
361 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
362 retval = store.exists(files)
362 retval = store.exists(files)
363 files = filter(lambda h: not retval[h], files)
363 files = filter(lambda h: not retval[h], files)
364 ui.debug("%d largefiles need to be uploaded\n" % len(files))
364 ui.debug("%d largefiles need to be uploaded\n" % len(files))
365
365
366 for hash in files:
366 for hash in files:
367 ui.progress(_('uploading largefiles'), at, unit=_('files'),
367 ui.progress(_('uploading largefiles'), at, unit=_('files'),
368 total=len(files))
368 total=len(files))
369 source = lfutil.findfile(rsrc, hash)
369 source = lfutil.findfile(rsrc, hash)
370 if not source:
370 if not source:
371 raise error.Abort(_('largefile %s missing from store'
371 raise error.Abort(_('largefile %s missing from store'
372 ' (needs to be uploaded)') % hash)
372 ' (needs to be uploaded)') % hash)
373 # XXX check for errors here
373 # XXX check for errors here
374 store.put(source, hash)
374 store.put(source, hash)
375 at += 1
375 at += 1
376 ui.progress(_('uploading largefiles'), None)
376 ui.progress(_('uploading largefiles'), None)
377
377
378 def verifylfiles(ui, repo, all=False, contents=False):
378 def verifylfiles(ui, repo, all=False, contents=False):
379 '''Verify that every largefile revision in the current changeset
379 '''Verify that every largefile revision in the current changeset
380 exists in the central store. With --contents, also verify that
380 exists in the central store. With --contents, also verify that
381 the contents of each local largefile file revision are correct (SHA-1 hash
381 the contents of each local largefile file revision are correct (SHA-1 hash
382 matches the revision ID). With --all, check every changeset in
382 matches the revision ID). With --all, check every changeset in
383 this repository.'''
383 this repository.'''
384 if all:
384 if all:
385 revs = repo.revs('all()')
385 revs = repo.revs('all()')
386 else:
386 else:
387 revs = ['.']
387 revs = ['.']
388
388
389 store = storefactory._openstore(repo)
389 store = storefactory._openstore(repo)
390 return store.verify(revs, contents=contents)
390 return store.verify(revs, contents=contents)
391
391
392 def cachelfiles(ui, repo, node, filelist=None):
392 def cachelfiles(ui, repo, node, filelist=None):
393 '''cachelfiles ensures that all largefiles needed by the specified revision
393 '''cachelfiles ensures that all largefiles needed by the specified revision
394 are present in the repository's largefile cache.
394 are present in the repository's largefile cache.
395
395
396 returns a tuple (cached, missing). cached is the list of files downloaded
396 returns a tuple (cached, missing). cached is the list of files downloaded
397 by this operation; missing is the list of files that were needed but could
397 by this operation; missing is the list of files that were needed but could
398 not be found.'''
398 not be found.'''
399 lfiles = lfutil.listlfiles(repo, node)
399 lfiles = lfutil.listlfiles(repo, node)
400 if filelist:
400 if filelist:
401 lfiles = set(lfiles) & set(filelist)
401 lfiles = set(lfiles) & set(filelist)
402 toget = []
402 toget = []
403
403
404 for lfile in lfiles:
404 for lfile in lfiles:
405 try:
405 try:
406 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
406 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
407 except IOError as err:
407 except IOError as err:
408 if err.errno == errno.ENOENT:
408 if err.errno == errno.ENOENT:
409 continue # node must be None and standin wasn't found in wctx
409 continue # node must be None and standin wasn't found in wctx
410 raise
410 raise
411 if not lfutil.findfile(repo, expectedhash):
411 if not lfutil.findfile(repo, expectedhash):
412 toget.append((lfile, expectedhash))
412 toget.append((lfile, expectedhash))
413
413
414 if toget:
414 if toget:
415 store = storefactory._openstore(repo)
415 store = storefactory._openstore(repo)
416 ret = store.get(toget)
416 ret = store.get(toget)
417 return ret
417 return ret
418
418
419 return ([], [])
419 return ([], [])
420
420
421 def downloadlfiles(ui, repo, rev=None):
421 def downloadlfiles(ui, repo, rev=None):
422 matchfn = scmutil.match(repo[None],
422 matchfn = scmutil.match(repo[None],
423 [repo.wjoin(lfutil.shortname)], {})
423 [repo.wjoin(lfutil.shortname)], {})
424 def prepare(ctx, fns):
424 def prepare(ctx, fns):
425 pass
425 pass
426 totalsuccess = 0
426 totalsuccess = 0
427 totalmissing = 0
427 totalmissing = 0
428 if rev != []: # walkchangerevs on empty list would return all revs
428 if rev != []: # walkchangerevs on empty list would return all revs
429 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
429 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
430 prepare):
430 prepare):
431 success, missing = cachelfiles(ui, repo, ctx.node())
431 success, missing = cachelfiles(ui, repo, ctx.node())
432 totalsuccess += len(success)
432 totalsuccess += len(success)
433 totalmissing += len(missing)
433 totalmissing += len(missing)
434 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
434 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
435 if totalmissing > 0:
435 if totalmissing > 0:
436 ui.status(_("%d largefiles failed to download\n") % totalmissing)
436 ui.status(_("%d largefiles failed to download\n") % totalmissing)
437 return totalsuccess, totalmissing
437 return totalsuccess, totalmissing
438
438
439 def updatelfiles(ui, repo, filelist=None, printmessage=None,
439 def updatelfiles(ui, repo, filelist=None, printmessage=None,
440 normallookup=False):
440 normallookup=False):
441 '''Update largefiles according to standins in the working directory
441 '''Update largefiles according to standins in the working directory
442
442
443 If ``printmessage`` is other than ``None``, it means "print (or
443 If ``printmessage`` is other than ``None``, it means "print (or
444 ignore, for false) message forcibly".
444 ignore, for false) message forcibly".
445 '''
445 '''
446 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
446 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
447 with repo.wlock():
447 with repo.wlock():
448 lfdirstate = lfutil.openlfdirstate(ui, repo)
448 lfdirstate = lfutil.openlfdirstate(ui, repo)
449 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
449 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
450
450
451 if filelist is not None:
451 if filelist is not None:
452 filelist = set(filelist)
452 filelist = set(filelist)
453 lfiles = [f for f in lfiles if f in filelist]
453 lfiles = [f for f in lfiles if f in filelist]
454
454
455 update = {}
455 update = {}
456 updated, removed = 0, 0
456 updated, removed = 0, 0
457 wvfs = repo.wvfs
457 wvfs = repo.wvfs
458 for lfile in lfiles:
458 for lfile in lfiles:
459 rellfile = lfile
459 rellfile = lfile
460 rellfileorig = os.path.relpath(
460 rellfileorig = os.path.relpath(
461 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
461 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
462 start=repo.root)
462 start=repo.root)
463 relstandin = lfutil.standin(lfile)
463 relstandin = lfutil.standin(lfile)
464 relstandinorig = os.path.relpath(
464 relstandinorig = os.path.relpath(
465 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
465 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
466 start=repo.root)
466 start=repo.root)
467 if wvfs.exists(relstandin):
467 if wvfs.exists(relstandin):
468 if (wvfs.exists(relstandinorig) and
468 if (wvfs.exists(relstandinorig) and
469 wvfs.exists(rellfile)):
469 wvfs.exists(rellfile)):
470 shutil.copyfile(wvfs.join(rellfile),
470 shutil.copyfile(wvfs.join(rellfile),
471 wvfs.join(rellfileorig))
471 wvfs.join(rellfileorig))
472 wvfs.unlinkpath(relstandinorig)
472 wvfs.unlinkpath(relstandinorig)
473 expecthash = lfutil.readstandin(repo, lfile)
473 expecthash = lfutil.readstandin(repo, lfile)
474 if expecthash != '':
474 if expecthash != '':
475 if lfile not in repo[None]: # not switched to normal file
475 if lfile not in repo[None]: # not switched to normal file
476 wvfs.unlinkpath(rellfile, ignoremissing=True)
476 wvfs.unlinkpath(rellfile, ignoremissing=True)
477 # use normallookup() to allocate an entry in largefiles
477 # use normallookup() to allocate an entry in largefiles
478 # dirstate to prevent lfilesrepo.status() from reporting
478 # dirstate to prevent lfilesrepo.status() from reporting
479 # missing files as removed.
479 # missing files as removed.
480 lfdirstate.normallookup(lfile)
480 lfdirstate.normallookup(lfile)
481 update[lfile] = expecthash
481 update[lfile] = expecthash
482 else:
482 else:
483 # Remove lfiles for which the standin is deleted, unless the
483 # Remove lfiles for which the standin is deleted, unless the
484 # lfile is added to the repository again. This happens when a
484 # lfile is added to the repository again. This happens when a
485 # largefile is converted back to a normal file: the standin
485 # largefile is converted back to a normal file: the standin
486 # disappears, but a new (normal) file appears as the lfile.
486 # disappears, but a new (normal) file appears as the lfile.
487 if (wvfs.exists(rellfile) and
487 if (wvfs.exists(rellfile) and
488 repo.dirstate.normalize(lfile) not in repo[None]):
488 repo.dirstate.normalize(lfile) not in repo[None]):
489 wvfs.unlinkpath(rellfile)
489 wvfs.unlinkpath(rellfile)
490 removed += 1
490 removed += 1
491
491
492 # largefile processing might be slow and be interrupted - be prepared
492 # largefile processing might be slow and be interrupted - be prepared
493 lfdirstate.write()
493 lfdirstate.write()
494
494
495 if lfiles:
495 if lfiles:
496 statuswriter(_('getting changed largefiles\n'))
496 statuswriter(_('getting changed largefiles\n'))
497 cachelfiles(ui, repo, None, lfiles)
497 cachelfiles(ui, repo, None, lfiles)
498
498
499 for lfile in lfiles:
499 for lfile in lfiles:
500 update1 = 0
500 update1 = 0
501
501
502 expecthash = update.get(lfile)
502 expecthash = update.get(lfile)
503 if expecthash:
503 if expecthash:
504 if not lfutil.copyfromcache(repo, expecthash, lfile):
504 if not lfutil.copyfromcache(repo, expecthash, lfile):
505 # failed ... but already removed and set to normallookup
505 # failed ... but already removed and set to normallookup
506 continue
506 continue
507 # Synchronize largefile dirstate to the last modified
507 # Synchronize largefile dirstate to the last modified
508 # time of the file
508 # time of the file
509 lfdirstate.normal(lfile)
509 lfdirstate.normal(lfile)
510 update1 = 1
510 update1 = 1
511
511
512 # copy the state of largefile standin from the repository's
512 # copy the state of largefile standin from the repository's
513 # dirstate to its state in the lfdirstate.
513 # dirstate to its state in the lfdirstate.
514 rellfile = lfile
514 rellfile = lfile
515 relstandin = lfutil.standin(lfile)
515 relstandin = lfutil.standin(lfile)
516 if wvfs.exists(relstandin):
516 if wvfs.exists(relstandin):
517 mode = wvfs.stat(relstandin).st_mode
517 mode = wvfs.stat(relstandin).st_mode
518 if mode != wvfs.stat(rellfile).st_mode:
518 if mode != wvfs.stat(rellfile).st_mode:
519 wvfs.chmod(rellfile, mode)
519 wvfs.chmod(rellfile, mode)
520 update1 = 1
520 update1 = 1
521
521
522 updated += update1
522 updated += update1
523
523
524 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
524 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
525
525
526 lfdirstate.write()
526 lfdirstate.write()
527 if lfiles:
527 if lfiles:
528 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
528 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
529 removed))
529 removed))
530
530
531 @command('lfpull',
531 @command('lfpull',
532 [('r', 'rev', [], _('pull largefiles for these revisions'))
532 [('r', 'rev', [], _('pull largefiles for these revisions'))
533 ] + commands.remoteopts,
533 ] + commands.remoteopts,
534 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
534 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
535 def lfpull(ui, repo, source="default", **opts):
535 def lfpull(ui, repo, source="default", **opts):
536 """pull largefiles for the specified revisions from the specified source
536 """pull largefiles for the specified revisions from the specified source
537
537
538 Pull largefiles that are referenced from local changesets but missing
538 Pull largefiles that are referenced from local changesets but missing
539 locally, pulling from a remote repository to the local cache.
539 locally, pulling from a remote repository to the local cache.
540
540
541 If SOURCE is omitted, the 'default' path will be used.
541 If SOURCE is omitted, the 'default' path will be used.
542 See :hg:`help urls` for more information.
542 See :hg:`help urls` for more information.
543
543
544 .. container:: verbose
544 .. container:: verbose
545
545
546 Some examples:
546 Some examples:
547
547
548 - pull largefiles for all branch heads::
548 - pull largefiles for all branch heads::
549
549
550 hg lfpull -r "head() and not closed()"
550 hg lfpull -r "head() and not closed()"
551
551
552 - pull largefiles on the default branch::
552 - pull largefiles on the default branch::
553
553
554 hg lfpull -r "branch(default)"
554 hg lfpull -r "branch(default)"
555 """
555 """
556 repo.lfpullsource = source
556 repo.lfpullsource = source
557
557
558 revs = opts.get('rev', [])
558 revs = opts.get('rev', [])
559 if not revs:
559 if not revs:
560 raise error.Abort(_('no revisions specified'))
560 raise error.Abort(_('no revisions specified'))
561 revs = scmutil.revrange(repo, revs)
561 revs = scmutil.revrange(repo, revs)
562
562
563 numcached = 0
563 numcached = 0
564 for rev in revs:
564 for rev in revs:
565 ui.note(_('pulling largefiles for revision %s\n') % rev)
565 ui.note(_('pulling largefiles for revision %s\n') % rev)
566 (cached, missing) = cachelfiles(ui, repo, rev)
566 (cached, missing) = cachelfiles(ui, repo, rev)
567 numcached += len(cached)
567 numcached += len(cached)
568 ui.status(_("%d largefiles cached\n") % numcached)
568 ui.status(_("%d largefiles cached\n") % numcached)
General Comments 0
You need to be logged in to leave comments. Login now