##// END OF EJS Templates
spelling: update
timeless@mozdev.org -
r17526:1572fd8a default
parent child Browse files
Show More
@@ -1,549 +1,549 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error, \
14 from mercurial import util, match as match_, hg, node, context, error, \
15 cmdutil, scmutil
15 cmdutil, scmutil
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.lock import release
17 from mercurial.lock import release
18
18
19 import lfutil
19 import lfutil
20 import basestore
20 import basestore
21
21
22 # -- Commands ----------------------------------------------------------
22 # -- Commands ----------------------------------------------------------
23
23
24 def lfconvert(ui, src, dest, *pats, **opts):
24 def lfconvert(ui, src, dest, *pats, **opts):
25 '''convert a normal repository to a largefiles repository
25 '''convert a normal repository to a largefiles repository
26
26
27 Convert repository SOURCE to a new repository DEST, identical to
27 Convert repository SOURCE to a new repository DEST, identical to
28 SOURCE except that certain files will be converted as largefiles:
28 SOURCE except that certain files will be converted as largefiles:
29 specifically, any file that matches any PATTERN *or* whose size is
29 specifically, any file that matches any PATTERN *or* whose size is
30 above the minimum size threshold is converted as a largefile. The
30 above the minimum size threshold is converted as a largefile. The
31 size used to determine whether or not to track a file as a
31 size used to determine whether or not to track a file as a
32 largefile is the size of the first version of the file. The
32 largefile is the size of the first version of the file. The
33 minimum size can be specified either with --size or in
33 minimum size can be specified either with --size or in
34 configuration as ``largefiles.size``.
34 configuration as ``largefiles.size``.
35
35
36 After running this command you will need to make sure that
36 After running this command you will need to make sure that
37 largefiles is enabled anywhere you intend to push the new
37 largefiles is enabled anywhere you intend to push the new
38 repository.
38 repository.
39
39
40 Use --to-normal to convert largefiles back to normal files; after
40 Use --to-normal to convert largefiles back to normal files; after
41 this, the DEST repository can be used without largefiles at all.'''
41 this, the DEST repository can be used without largefiles at all.'''
42
42
43 if opts['to_normal']:
43 if opts['to_normal']:
44 tolfile = False
44 tolfile = False
45 else:
45 else:
46 tolfile = True
46 tolfile = True
47 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
47 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
48
48
49 if not hg.islocal(src):
49 if not hg.islocal(src):
50 raise util.Abort(_('%s is not a local Mercurial repo') % src)
50 raise util.Abort(_('%s is not a local Mercurial repo') % src)
51 if not hg.islocal(dest):
51 if not hg.islocal(dest):
52 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
52 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
53
53
54 rsrc = hg.repository(ui, src)
54 rsrc = hg.repository(ui, src)
55 ui.status(_('initializing destination %s\n') % dest)
55 ui.status(_('initializing destination %s\n') % dest)
56 rdst = hg.repository(ui, dest, create=True)
56 rdst = hg.repository(ui, dest, create=True)
57
57
58 success = False
58 success = False
59 dstwlock = dstlock = None
59 dstwlock = dstlock = None
60 try:
60 try:
61 # Lock destination to prevent modification while it is converted to.
61 # Lock destination to prevent modification while it is converted to.
62 # Don't need to lock src because we are just reading from its history
62 # Don't need to lock src because we are just reading from its history
63 # which can't change.
63 # which can't change.
64 dstwlock = rdst.wlock()
64 dstwlock = rdst.wlock()
65 dstlock = rdst.lock()
65 dstlock = rdst.lock()
66
66
67 # Get a list of all changesets in the source. The easy way to do this
67 # Get a list of all changesets in the source. The easy way to do this
68 # is to simply walk the changelog, using changelog.nodesbewteen().
68 # is to simply walk the changelog, using changelog.nodesbewteen().
69 # Take a look at mercurial/revlog.py:639 for more details.
69 # Take a look at mercurial/revlog.py:639 for more details.
70 # Use a generator instead of a list to decrease memory usage
70 # Use a generator instead of a list to decrease memory usage
71 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
71 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
72 rsrc.heads())[0])
72 rsrc.heads())[0])
73 revmap = {node.nullid: node.nullid}
73 revmap = {node.nullid: node.nullid}
74 if tolfile:
74 if tolfile:
75 lfiles = set()
75 lfiles = set()
76 normalfiles = set()
76 normalfiles = set()
77 if not pats:
77 if not pats:
78 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
78 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
79 if pats:
79 if pats:
80 matcher = match_.match(rsrc.root, '', list(pats))
80 matcher = match_.match(rsrc.root, '', list(pats))
81 else:
81 else:
82 matcher = None
82 matcher = None
83
83
84 lfiletohash = {}
84 lfiletohash = {}
85 for ctx in ctxs:
85 for ctx in ctxs:
86 ui.progress(_('converting revisions'), ctx.rev(),
86 ui.progress(_('converting revisions'), ctx.rev(),
87 unit=_('revision'), total=rsrc['tip'].rev())
87 unit=_('revision'), total=rsrc['tip'].rev())
88 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
88 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
89 lfiles, normalfiles, matcher, size, lfiletohash)
89 lfiles, normalfiles, matcher, size, lfiletohash)
90 ui.progress(_('converting revisions'), None)
90 ui.progress(_('converting revisions'), None)
91
91
92 if os.path.exists(rdst.wjoin(lfutil.shortname)):
92 if os.path.exists(rdst.wjoin(lfutil.shortname)):
93 shutil.rmtree(rdst.wjoin(lfutil.shortname))
93 shutil.rmtree(rdst.wjoin(lfutil.shortname))
94
94
95 for f in lfiletohash.keys():
95 for f in lfiletohash.keys():
96 if os.path.isfile(rdst.wjoin(f)):
96 if os.path.isfile(rdst.wjoin(f)):
97 os.unlink(rdst.wjoin(f))
97 os.unlink(rdst.wjoin(f))
98 try:
98 try:
99 os.removedirs(os.path.dirname(rdst.wjoin(f)))
99 os.removedirs(os.path.dirname(rdst.wjoin(f)))
100 except OSError:
100 except OSError:
101 pass
101 pass
102
102
103 # If there were any files converted to largefiles, add largefiles
103 # If there were any files converted to largefiles, add largefiles
104 # to the destination repository's requirements.
104 # to the destination repository's requirements.
105 if lfiles:
105 if lfiles:
106 rdst.requirements.add('largefiles')
106 rdst.requirements.add('largefiles')
107 rdst._writerequirements()
107 rdst._writerequirements()
108 else:
108 else:
109 for ctx in ctxs:
109 for ctx in ctxs:
110 ui.progress(_('converting revisions'), ctx.rev(),
110 ui.progress(_('converting revisions'), ctx.rev(),
111 unit=_('revision'), total=rsrc['tip'].rev())
111 unit=_('revision'), total=rsrc['tip'].rev())
112 _addchangeset(ui, rsrc, rdst, ctx, revmap)
112 _addchangeset(ui, rsrc, rdst, ctx, revmap)
113
113
114 ui.progress(_('converting revisions'), None)
114 ui.progress(_('converting revisions'), None)
115 success = True
115 success = True
116 finally:
116 finally:
117 rdst.dirstate.clear()
117 rdst.dirstate.clear()
118 release(dstlock, dstwlock)
118 release(dstlock, dstwlock)
119 if not success:
119 if not success:
120 # we failed, remove the new directory
120 # we failed, remove the new directory
121 shutil.rmtree(rdst.root)
121 shutil.rmtree(rdst.root)
122
122
123 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
123 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
124 # Convert src parents to dst parents
124 # Convert src parents to dst parents
125 parents = _convertparents(ctx, revmap)
125 parents = _convertparents(ctx, revmap)
126
126
127 # Generate list of changed files
127 # Generate list of changed files
128 files = _getchangedfiles(ctx, parents)
128 files = _getchangedfiles(ctx, parents)
129
129
130 def getfilectx(repo, memctx, f):
130 def getfilectx(repo, memctx, f):
131 if lfutil.standin(f) in files:
131 if lfutil.standin(f) in files:
132 # if the file isn't in the manifest then it was removed
132 # if the file isn't in the manifest then it was removed
133 # or renamed, raise IOError to indicate this
133 # or renamed, raise IOError to indicate this
134 try:
134 try:
135 fctx = ctx.filectx(lfutil.standin(f))
135 fctx = ctx.filectx(lfutil.standin(f))
136 except error.LookupError:
136 except error.LookupError:
137 raise IOError
137 raise IOError
138 renamed = fctx.renamed()
138 renamed = fctx.renamed()
139 if renamed:
139 if renamed:
140 renamed = lfutil.splitstandin(renamed[0])
140 renamed = lfutil.splitstandin(renamed[0])
141
141
142 hash = fctx.data().strip()
142 hash = fctx.data().strip()
143 path = lfutil.findfile(rsrc, hash)
143 path = lfutil.findfile(rsrc, hash)
144 ### TODO: What if the file is not cached?
144 ### TODO: What if the file is not cached?
145 data = ''
145 data = ''
146 fd = None
146 fd = None
147 try:
147 try:
148 fd = open(path, 'rb')
148 fd = open(path, 'rb')
149 data = fd.read()
149 data = fd.read()
150 finally:
150 finally:
151 if fd:
151 if fd:
152 fd.close()
152 fd.close()
153 return context.memfilectx(f, data, 'l' in fctx.flags(),
153 return context.memfilectx(f, data, 'l' in fctx.flags(),
154 'x' in fctx.flags(), renamed)
154 'x' in fctx.flags(), renamed)
155 else:
155 else:
156 return _getnormalcontext(repo.ui, ctx, f, revmap)
156 return _getnormalcontext(repo.ui, ctx, f, revmap)
157
157
158 dstfiles = []
158 dstfiles = []
159 for file in files:
159 for file in files:
160 if lfutil.isstandin(file):
160 if lfutil.isstandin(file):
161 dstfiles.append(lfutil.splitstandin(file))
161 dstfiles.append(lfutil.splitstandin(file))
162 else:
162 else:
163 dstfiles.append(file)
163 dstfiles.append(file)
164 # Commit
164 # Commit
165 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
165 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
166
166
167 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
167 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
168 matcher, size, lfiletohash):
168 matcher, size, lfiletohash):
169 # Convert src parents to dst parents
169 # Convert src parents to dst parents
170 parents = _convertparents(ctx, revmap)
170 parents = _convertparents(ctx, revmap)
171
171
172 # Generate list of changed files
172 # Generate list of changed files
173 files = _getchangedfiles(ctx, parents)
173 files = _getchangedfiles(ctx, parents)
174
174
175 dstfiles = []
175 dstfiles = []
176 for f in files:
176 for f in files:
177 if f not in lfiles and f not in normalfiles:
177 if f not in lfiles and f not in normalfiles:
178 islfile = _islfile(f, ctx, matcher, size)
178 islfile = _islfile(f, ctx, matcher, size)
179 # If this file was renamed or copied then copy
179 # If this file was renamed or copied then copy
180 # the lfileness of its predecessor
180 # the lfileness of its predecessor
181 if f in ctx.manifest():
181 if f in ctx.manifest():
182 fctx = ctx.filectx(f)
182 fctx = ctx.filectx(f)
183 renamed = fctx.renamed()
183 renamed = fctx.renamed()
184 renamedlfile = renamed and renamed[0] in lfiles
184 renamedlfile = renamed and renamed[0] in lfiles
185 islfile |= renamedlfile
185 islfile |= renamedlfile
186 if 'l' in fctx.flags():
186 if 'l' in fctx.flags():
187 if renamedlfile:
187 if renamedlfile:
188 raise util.Abort(
188 raise util.Abort(
189 _('renamed/copied largefile %s becomes symlink')
189 _('renamed/copied largefile %s becomes symlink')
190 % f)
190 % f)
191 islfile = False
191 islfile = False
192 if islfile:
192 if islfile:
193 lfiles.add(f)
193 lfiles.add(f)
194 else:
194 else:
195 normalfiles.add(f)
195 normalfiles.add(f)
196
196
197 if f in lfiles:
197 if f in lfiles:
198 dstfiles.append(lfutil.standin(f))
198 dstfiles.append(lfutil.standin(f))
199 # largefile in manifest if it has not been removed/renamed
199 # largefile in manifest if it has not been removed/renamed
200 if f in ctx.manifest():
200 if f in ctx.manifest():
201 fctx = ctx.filectx(f)
201 fctx = ctx.filectx(f)
202 if 'l' in fctx.flags():
202 if 'l' in fctx.flags():
203 renamed = fctx.renamed()
203 renamed = fctx.renamed()
204 if renamed and renamed[0] in lfiles:
204 if renamed and renamed[0] in lfiles:
205 raise util.Abort(_('largefile %s becomes symlink') % f)
205 raise util.Abort(_('largefile %s becomes symlink') % f)
206
206
207 # largefile was modified, update standins
207 # largefile was modified, update standins
208 fullpath = rdst.wjoin(f)
208 fullpath = rdst.wjoin(f)
209 util.makedirs(os.path.dirname(fullpath))
209 util.makedirs(os.path.dirname(fullpath))
210 m = util.sha1('')
210 m = util.sha1('')
211 m.update(ctx[f].data())
211 m.update(ctx[f].data())
212 hash = m.hexdigest()
212 hash = m.hexdigest()
213 if f not in lfiletohash or lfiletohash[f] != hash:
213 if f not in lfiletohash or lfiletohash[f] != hash:
214 try:
214 try:
215 fd = open(fullpath, 'wb')
215 fd = open(fullpath, 'wb')
216 fd.write(ctx[f].data())
216 fd.write(ctx[f].data())
217 finally:
217 finally:
218 if fd:
218 if fd:
219 fd.close()
219 fd.close()
220 executable = 'x' in ctx[f].flags()
220 executable = 'x' in ctx[f].flags()
221 os.chmod(fullpath, lfutil.getmode(executable))
221 os.chmod(fullpath, lfutil.getmode(executable))
222 lfutil.writestandin(rdst, lfutil.standin(f), hash,
222 lfutil.writestandin(rdst, lfutil.standin(f), hash,
223 executable)
223 executable)
224 lfiletohash[f] = hash
224 lfiletohash[f] = hash
225 else:
225 else:
226 # normal file
226 # normal file
227 dstfiles.append(f)
227 dstfiles.append(f)
228
228
229 def getfilectx(repo, memctx, f):
229 def getfilectx(repo, memctx, f):
230 if lfutil.isstandin(f):
230 if lfutil.isstandin(f):
231 # if the file isn't in the manifest then it was removed
231 # if the file isn't in the manifest then it was removed
232 # or renamed, raise IOError to indicate this
232 # or renamed, raise IOError to indicate this
233 srcfname = lfutil.splitstandin(f)
233 srcfname = lfutil.splitstandin(f)
234 try:
234 try:
235 fctx = ctx.filectx(srcfname)
235 fctx = ctx.filectx(srcfname)
236 except error.LookupError:
236 except error.LookupError:
237 raise IOError
237 raise IOError
238 renamed = fctx.renamed()
238 renamed = fctx.renamed()
239 if renamed:
239 if renamed:
240 # standin is always a largefile because largefile-ness
240 # standin is always a largefile because largefile-ness
241 # doesn't change after rename or copy
241 # doesn't change after rename or copy
242 renamed = lfutil.standin(renamed[0])
242 renamed = lfutil.standin(renamed[0])
243
243
244 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
244 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
245 fctx.flags(), 'x' in fctx.flags(), renamed)
245 fctx.flags(), 'x' in fctx.flags(), renamed)
246 else:
246 else:
247 return _getnormalcontext(repo.ui, ctx, f, revmap)
247 return _getnormalcontext(repo.ui, ctx, f, revmap)
248
248
249 # Commit
249 # Commit
250 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
250 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
251
251
252 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
252 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
253 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
253 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
254 getfilectx, ctx.user(), ctx.date(), ctx.extra())
254 getfilectx, ctx.user(), ctx.date(), ctx.extra())
255 ret = rdst.commitctx(mctx)
255 ret = rdst.commitctx(mctx)
256 rdst.setparents(ret)
256 rdst.setparents(ret)
257 revmap[ctx.node()] = rdst.changelog.tip()
257 revmap[ctx.node()] = rdst.changelog.tip()
258
258
259 # Generate list of changed files
259 # Generate list of changed files
260 def _getchangedfiles(ctx, parents):
260 def _getchangedfiles(ctx, parents):
261 files = set(ctx.files())
261 files = set(ctx.files())
262 if node.nullid not in parents:
262 if node.nullid not in parents:
263 mc = ctx.manifest()
263 mc = ctx.manifest()
264 mp1 = ctx.parents()[0].manifest()
264 mp1 = ctx.parents()[0].manifest()
265 mp2 = ctx.parents()[1].manifest()
265 mp2 = ctx.parents()[1].manifest()
266 files |= (set(mp1) | set(mp2)) - set(mc)
266 files |= (set(mp1) | set(mp2)) - set(mc)
267 for f in mc:
267 for f in mc:
268 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
268 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
269 files.add(f)
269 files.add(f)
270 return files
270 return files
271
271
272 # Convert src parents to dst parents
272 # Convert src parents to dst parents
273 def _convertparents(ctx, revmap):
273 def _convertparents(ctx, revmap):
274 parents = []
274 parents = []
275 for p in ctx.parents():
275 for p in ctx.parents():
276 parents.append(revmap[p.node()])
276 parents.append(revmap[p.node()])
277 while len(parents) < 2:
277 while len(parents) < 2:
278 parents.append(node.nullid)
278 parents.append(node.nullid)
279 return parents
279 return parents
280
280
281 # Get memfilectx for a normal file
281 # Get memfilectx for a normal file
282 def _getnormalcontext(ui, ctx, f, revmap):
282 def _getnormalcontext(ui, ctx, f, revmap):
283 try:
283 try:
284 fctx = ctx.filectx(f)
284 fctx = ctx.filectx(f)
285 except error.LookupError:
285 except error.LookupError:
286 raise IOError
286 raise IOError
287 renamed = fctx.renamed()
287 renamed = fctx.renamed()
288 if renamed:
288 if renamed:
289 renamed = renamed[0]
289 renamed = renamed[0]
290
290
291 data = fctx.data()
291 data = fctx.data()
292 if f == '.hgtags':
292 if f == '.hgtags':
293 data = _converttags (ui, revmap, data)
293 data = _converttags (ui, revmap, data)
294 return context.memfilectx(f, data, 'l' in fctx.flags(),
294 return context.memfilectx(f, data, 'l' in fctx.flags(),
295 'x' in fctx.flags(), renamed)
295 'x' in fctx.flags(), renamed)
296
296
297 # Remap tag data using a revision map
297 # Remap tag data using a revision map
298 def _converttags(ui, revmap, data):
298 def _converttags(ui, revmap, data):
299 newdata = []
299 newdata = []
300 for line in data.splitlines():
300 for line in data.splitlines():
301 try:
301 try:
302 id, name = line.split(' ', 1)
302 id, name = line.split(' ', 1)
303 except ValueError:
303 except ValueError:
304 ui.warn(_('skipping incorrectly formatted tag %s\n'
304 ui.warn(_('skipping incorrectly formatted tag %s\n'
305 % line))
305 % line))
306 continue
306 continue
307 try:
307 try:
308 newid = node.bin(id)
308 newid = node.bin(id)
309 except TypeError:
309 except TypeError:
310 ui.warn(_('skipping incorrectly formatted id %s\n'
310 ui.warn(_('skipping incorrectly formatted id %s\n'
311 % id))
311 % id))
312 continue
312 continue
313 try:
313 try:
314 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
314 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
315 name))
315 name))
316 except KeyError:
316 except KeyError:
317 ui.warn(_('no mapping for id %s\n') % id)
317 ui.warn(_('no mapping for id %s\n') % id)
318 continue
318 continue
319 return ''.join(newdata)
319 return ''.join(newdata)
320
320
321 def _islfile(file, ctx, matcher, size):
321 def _islfile(file, ctx, matcher, size):
322 '''Return true if file should be considered a largefile, i.e.
322 '''Return true if file should be considered a largefile, i.e.
323 matcher matches it or it is larger than size.'''
323 matcher matches it or it is larger than size.'''
324 # never store special .hg* files as largefiles
324 # never store special .hg* files as largefiles
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
326 return False
326 return False
327 if matcher and matcher(file):
327 if matcher and matcher(file):
328 return True
328 return True
329 try:
329 try:
330 return ctx.filectx(file).size() >= size * 1024 * 1024
330 return ctx.filectx(file).size() >= size * 1024 * 1024
331 except error.LookupError:
331 except error.LookupError:
332 return False
332 return False
333
333
334 def uploadlfiles(ui, rsrc, rdst, files):
334 def uploadlfiles(ui, rsrc, rdst, files):
335 '''upload largefiles to the central store'''
335 '''upload largefiles to the central store'''
336
336
337 if not files:
337 if not files:
338 return
338 return
339
339
340 store = basestore._openstore(rsrc, rdst, put=True)
340 store = basestore._openstore(rsrc, rdst, put=True)
341
341
342 at = 0
342 at = 0
343 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
343 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
344 retval = store.exists(files)
344 retval = store.exists(files)
345 files = filter(lambda h: not retval[h], files)
345 files = filter(lambda h: not retval[h], files)
346 ui.debug("%d largefiles need to be uploaded\n" % len(files))
346 ui.debug("%d largefiles need to be uploaded\n" % len(files))
347
347
348 for hash in files:
348 for hash in files:
349 ui.progress(_('uploading largefiles'), at, unit='largefile',
349 ui.progress(_('uploading largefiles'), at, unit='largefile',
350 total=len(files))
350 total=len(files))
351 source = lfutil.findfile(rsrc, hash)
351 source = lfutil.findfile(rsrc, hash)
352 if not source:
352 if not source:
353 raise util.Abort(_('largefile %s missing from store'
353 raise util.Abort(_('largefile %s missing from store'
354 ' (needs to be uploaded)') % hash)
354 ' (needs to be uploaded)') % hash)
355 # XXX check for errors here
355 # XXX check for errors here
356 store.put(source, hash)
356 store.put(source, hash)
357 at += 1
357 at += 1
358 ui.progress(_('uploading largefiles'), None)
358 ui.progress(_('uploading largefiles'), None)
359
359
360 def verifylfiles(ui, repo, all=False, contents=False):
360 def verifylfiles(ui, repo, all=False, contents=False):
361 '''Verify that every big file revision in the current changeset
361 '''Verify that every big file revision in the current changeset
362 exists in the central store. With --contents, also verify that
362 exists in the central store. With --contents, also verify that
363 the contents of each big file revision are correct (SHA-1 hash
363 the contents of each big file revision are correct (SHA-1 hash
364 matches the revision ID). With --all, check every changeset in
364 matches the revision ID). With --all, check every changeset in
365 this repository.'''
365 this repository.'''
366 if all:
366 if all:
367 # Pass a list to the function rather than an iterator because we know a
367 # Pass a list to the function rather than an iterator because we know a
368 # list will work.
368 # list will work.
369 revs = range(len(repo))
369 revs = range(len(repo))
370 else:
370 else:
371 revs = ['.']
371 revs = ['.']
372
372
373 store = basestore._openstore(repo)
373 store = basestore._openstore(repo)
374 return store.verify(revs, contents=contents)
374 return store.verify(revs, contents=contents)
375
375
376 def cachelfiles(ui, repo, node, filelist=None):
376 def cachelfiles(ui, repo, node, filelist=None):
377 '''cachelfiles ensures that all largefiles needed by the specified revision
377 '''cachelfiles ensures that all largefiles needed by the specified revision
378 are present in the repository's largefile cache.
378 are present in the repository's largefile cache.
379
379
380 returns a tuple (cached, missing). cached is the list of files downloaded
380 returns a tuple (cached, missing). cached is the list of files downloaded
381 by this operation; missing is the list of files that were needed but could
381 by this operation; missing is the list of files that were needed but could
382 not be found.'''
382 not be found.'''
383 lfiles = lfutil.listlfiles(repo, node)
383 lfiles = lfutil.listlfiles(repo, node)
384 if filelist:
384 if filelist:
385 lfiles = set(lfiles) & set(filelist)
385 lfiles = set(lfiles) & set(filelist)
386 toget = []
386 toget = []
387
387
388 for lfile in lfiles:
388 for lfile in lfiles:
389 # If we are mid-merge, then we have to trust the standin that is in the
389 # If we are mid-merge, then we have to trust the standin that is in the
390 # working copy to have the correct hashvalue. This is because the
390 # working copy to have the correct hashvalue. This is because the
391 # original hg.merge() already updated the standin as part of the normal
391 # original hg.merge() already updated the standin as part of the normal
392 # merge process -- we just have to udpate the largefile to match.
392 # merge process -- we just have to update the largefile to match.
393 if (getattr(repo, "_ismerging", False) and
393 if (getattr(repo, "_ismerging", False) and
394 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
394 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
395 expectedhash = lfutil.readstandin(repo, lfile)
395 expectedhash = lfutil.readstandin(repo, lfile)
396 else:
396 else:
397 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
397 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
398
398
399 # if it exists and its hash matches, it might have been locally
399 # if it exists and its hash matches, it might have been locally
400 # modified before updating and the user chose 'local'. in this case,
400 # modified before updating and the user chose 'local'. in this case,
401 # it will not be in any store, so don't look for it.
401 # it will not be in any store, so don't look for it.
402 if ((not os.path.exists(repo.wjoin(lfile)) or
402 if ((not os.path.exists(repo.wjoin(lfile)) or
403 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
403 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
404 not lfutil.findfile(repo, expectedhash)):
404 not lfutil.findfile(repo, expectedhash)):
405 toget.append((lfile, expectedhash))
405 toget.append((lfile, expectedhash))
406
406
407 if toget:
407 if toget:
408 store = basestore._openstore(repo)
408 store = basestore._openstore(repo)
409 ret = store.get(toget)
409 ret = store.get(toget)
410 return ret
410 return ret
411
411
412 return ([], [])
412 return ([], [])
413
413
414 def downloadlfiles(ui, repo, rev=None):
414 def downloadlfiles(ui, repo, rev=None):
415 matchfn = scmutil.match(repo[None],
415 matchfn = scmutil.match(repo[None],
416 [repo.wjoin(lfutil.shortname)], {})
416 [repo.wjoin(lfutil.shortname)], {})
417 def prepare(ctx, fns):
417 def prepare(ctx, fns):
418 pass
418 pass
419 totalsuccess = 0
419 totalsuccess = 0
420 totalmissing = 0
420 totalmissing = 0
421 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
421 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
422 prepare):
422 prepare):
423 success, missing = cachelfiles(ui, repo, ctx.node())
423 success, missing = cachelfiles(ui, repo, ctx.node())
424 totalsuccess += len(success)
424 totalsuccess += len(success)
425 totalmissing += len(missing)
425 totalmissing += len(missing)
426 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
426 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
427 if totalmissing > 0:
427 if totalmissing > 0:
428 ui.status(_("%d largefiles failed to download\n") % totalmissing)
428 ui.status(_("%d largefiles failed to download\n") % totalmissing)
429 return totalsuccess, totalmissing
429 return totalsuccess, totalmissing
430
430
431 def updatelfiles(ui, repo, filelist=None, printmessage=True):
431 def updatelfiles(ui, repo, filelist=None, printmessage=True):
432 wlock = repo.wlock()
432 wlock = repo.wlock()
433 try:
433 try:
434 lfdirstate = lfutil.openlfdirstate(ui, repo)
434 lfdirstate = lfutil.openlfdirstate(ui, repo)
435 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
435 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
436
436
437 if filelist is not None:
437 if filelist is not None:
438 lfiles = [f for f in lfiles if f in filelist]
438 lfiles = [f for f in lfiles if f in filelist]
439
439
440 printed = False
440 printed = False
441 if printmessage and lfiles:
441 if printmessage and lfiles:
442 ui.status(_('getting changed largefiles\n'))
442 ui.status(_('getting changed largefiles\n'))
443 printed = True
443 printed = True
444 cachelfiles(ui, repo, '.', lfiles)
444 cachelfiles(ui, repo, '.', lfiles)
445
445
446 updated, removed = 0, 0
446 updated, removed = 0, 0
447 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
447 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
448 # increment the appropriate counter according to _updatelfile's
448 # increment the appropriate counter according to _updatelfile's
449 # return value
449 # return value
450 updated += i > 0 and i or 0
450 updated += i > 0 and i or 0
451 removed -= i < 0 and i or 0
451 removed -= i < 0 and i or 0
452 if printmessage and (removed or updated) and not printed:
452 if printmessage and (removed or updated) and not printed:
453 ui.status(_('getting changed largefiles\n'))
453 ui.status(_('getting changed largefiles\n'))
454 printed = True
454 printed = True
455
455
456 lfdirstate.write()
456 lfdirstate.write()
457 if printed and printmessage:
457 if printed and printmessage:
458 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
458 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
459 removed))
459 removed))
460 finally:
460 finally:
461 wlock.release()
461 wlock.release()
462
462
463 def _updatelfile(repo, lfdirstate, lfile):
463 def _updatelfile(repo, lfdirstate, lfile):
464 '''updates a single largefile and copies the state of its standin from
464 '''updates a single largefile and copies the state of its standin from
465 the repository's dirstate to its state in the lfdirstate.
465 the repository's dirstate to its state in the lfdirstate.
466
466
467 returns 1 if the file was modified, -1 if the file was removed, 0 if the
467 returns 1 if the file was modified, -1 if the file was removed, 0 if the
468 file was unchanged, and None if the needed largefile was missing from the
468 file was unchanged, and None if the needed largefile was missing from the
469 cache.'''
469 cache.'''
470 ret = 0
470 ret = 0
471 abslfile = repo.wjoin(lfile)
471 abslfile = repo.wjoin(lfile)
472 absstandin = repo.wjoin(lfutil.standin(lfile))
472 absstandin = repo.wjoin(lfutil.standin(lfile))
473 if os.path.exists(absstandin):
473 if os.path.exists(absstandin):
474 if os.path.exists(absstandin+'.orig'):
474 if os.path.exists(absstandin+'.orig'):
475 shutil.copyfile(abslfile, abslfile+'.orig')
475 shutil.copyfile(abslfile, abslfile+'.orig')
476 expecthash = lfutil.readstandin(repo, lfile)
476 expecthash = lfutil.readstandin(repo, lfile)
477 if (expecthash != '' and
477 if (expecthash != '' and
478 (not os.path.exists(abslfile) or
478 (not os.path.exists(abslfile) or
479 expecthash != lfutil.hashfile(abslfile))):
479 expecthash != lfutil.hashfile(abslfile))):
480 if not lfutil.copyfromcache(repo, expecthash, lfile):
480 if not lfutil.copyfromcache(repo, expecthash, lfile):
481 # use normallookup() to allocate entry in largefiles dirstate,
481 # use normallookup() to allocate entry in largefiles dirstate,
482 # because lack of it misleads lfilesrepo.status() into
482 # because lack of it misleads lfilesrepo.status() into
483 # recognition that such cache missing files are REMOVED.
483 # recognition that such cache missing files are REMOVED.
484 lfdirstate.normallookup(lfile)
484 lfdirstate.normallookup(lfile)
485 return None # don't try to set the mode
485 return None # don't try to set the mode
486 else:
486 else:
487 # Synchronize largefile dirstate to the last modified time of
487 # Synchronize largefile dirstate to the last modified time of
488 # the file
488 # the file
489 lfdirstate.normal(lfile)
489 lfdirstate.normal(lfile)
490 ret = 1
490 ret = 1
491 mode = os.stat(absstandin).st_mode
491 mode = os.stat(absstandin).st_mode
492 if mode != os.stat(abslfile).st_mode:
492 if mode != os.stat(abslfile).st_mode:
493 os.chmod(abslfile, mode)
493 os.chmod(abslfile, mode)
494 ret = 1
494 ret = 1
495 else:
495 else:
496 # Remove lfiles for which the standin is deleted, unless the
496 # Remove lfiles for which the standin is deleted, unless the
497 # lfile is added to the repository again. This happens when a
497 # lfile is added to the repository again. This happens when a
498 # largefile is converted back to a normal file: the standin
498 # largefile is converted back to a normal file: the standin
499 # disappears, but a new (normal) file appears as the lfile.
499 # disappears, but a new (normal) file appears as the lfile.
500 if os.path.exists(abslfile) and lfile not in repo[None]:
500 if os.path.exists(abslfile) and lfile not in repo[None]:
501 util.unlinkpath(abslfile)
501 util.unlinkpath(abslfile)
502 ret = -1
502 ret = -1
503 state = repo.dirstate[lfutil.standin(lfile)]
503 state = repo.dirstate[lfutil.standin(lfile)]
504 if state == 'n':
504 if state == 'n':
505 # When rebasing, we need to synchronize the standin and the largefile,
505 # When rebasing, we need to synchronize the standin and the largefile,
506 # because otherwise the largefile will get reverted. But for commit's
506 # because otherwise the largefile will get reverted. But for commit's
507 # sake, we have to mark the file as unclean.
507 # sake, we have to mark the file as unclean.
508 if getattr(repo, "_isrebasing", False):
508 if getattr(repo, "_isrebasing", False):
509 lfdirstate.normallookup(lfile)
509 lfdirstate.normallookup(lfile)
510 else:
510 else:
511 lfdirstate.normal(lfile)
511 lfdirstate.normal(lfile)
512 elif state == 'r':
512 elif state == 'r':
513 lfdirstate.remove(lfile)
513 lfdirstate.remove(lfile)
514 elif state == 'a':
514 elif state == 'a':
515 lfdirstate.add(lfile)
515 lfdirstate.add(lfile)
516 elif state == '?':
516 elif state == '?':
517 lfdirstate.drop(lfile)
517 lfdirstate.drop(lfile)
518 return ret
518 return ret
519
519
520 def catlfile(repo, lfile, rev, filename):
520 def catlfile(repo, lfile, rev, filename):
521 hash = lfutil.readstandin(repo, lfile, rev)
521 hash = lfutil.readstandin(repo, lfile, rev)
522 if not lfutil.inusercache(repo.ui, hash):
522 if not lfutil.inusercache(repo.ui, hash):
523 store = basestore._openstore(repo)
523 store = basestore._openstore(repo)
524 success, missing = store.get([(lfile, hash)])
524 success, missing = store.get([(lfile, hash)])
525 if len(success) != 1:
525 if len(success) != 1:
526 raise util.Abort(
526 raise util.Abort(
527 _('largefile %s is not in cache and could not be downloaded')
527 _('largefile %s is not in cache and could not be downloaded')
528 % lfile)
528 % lfile)
529 path = lfutil.usercachepath(repo.ui, hash)
529 path = lfutil.usercachepath(repo.ui, hash)
530 fpout = cmdutil.makefileobj(repo, filename)
530 fpout = cmdutil.makefileobj(repo, filename)
531 fpin = open(path, "rb")
531 fpin = open(path, "rb")
532 fpout.write(fpin.read())
532 fpout.write(fpin.read())
533 fpout.close()
533 fpout.close()
534 fpin.close()
534 fpin.close()
535 return 0
535 return 0
536
536
537 # -- hg commands declarations ------------------------------------------------
537 # -- hg commands declarations ------------------------------------------------
538
538
539 cmdtable = {
539 cmdtable = {
540 'lfconvert': (lfconvert,
540 'lfconvert': (lfconvert,
541 [('s', 'size', '',
541 [('s', 'size', '',
542 _('minimum size (MB) for files to be converted '
542 _('minimum size (MB) for files to be converted '
543 'as largefiles'),
543 'as largefiles'),
544 'SIZE'),
544 'SIZE'),
545 ('', 'to-normal', False,
545 ('', 'to-normal', False,
546 _('convert from a largefiles repo to a normal repo')),
546 _('convert from a largefiles repo to a normal repo')),
547 ],
547 ],
548 _('hg lfconvert SOURCE DEST [FILE ...]')),
548 _('hg lfconvert SOURCE DEST [FILE ...]')),
549 }
549 }
General Comments 0
You need to be logged in to leave comments. Login now