##// END OF EJS Templates
largefiles: use wlock for lfconvert (issue3444)...
Mads Kiilerich -
r16717:1eede2ea stable
parent child Browse files
Show More
@@ -1,517 +1,521
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error, cmdutil
14 from mercurial import util, match as match_, hg, node, context, error, cmdutil
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.lock import release
16
17
17 import lfutil
18 import lfutil
18 import basestore
19 import basestore
19
20
20 # -- Commands ----------------------------------------------------------
21 # -- Commands ----------------------------------------------------------
21
22
22 def lfconvert(ui, src, dest, *pats, **opts):
23 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
24 '''convert a normal repository to a largefiles repository
24
25
25 Convert repository SOURCE to a new repository DEST, identical to
26 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
27 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
28 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
29 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
30 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
31 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
32 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
33 configuration as ``largefiles.size``.
33
34
34 After running this command you will need to make sure that
35 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
36 largefiles is enabled anywhere you intend to push the new
36 repository.
37 repository.
37
38
38 Use --to-normal to convert largefiles back to normal files; after
39 Use --to-normal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
40 this, the DEST repository can be used without largefiles at all.'''
40
41
41 if opts['to_normal']:
42 if opts['to_normal']:
42 tolfile = False
43 tolfile = False
43 else:
44 else:
44 tolfile = True
45 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46
47
47 if not hg.islocal(src):
48 if not hg.islocal(src):
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 if not hg.islocal(dest):
50 if not hg.islocal(dest):
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51
52
52 rsrc = hg.repository(ui, src)
53 rsrc = hg.repository(ui, src)
53 ui.status(_('initializing destination %s\n') % dest)
54 ui.status(_('initializing destination %s\n') % dest)
54 rdst = hg.repository(ui, dest, create=True)
55 rdst = hg.repository(ui, dest, create=True)
55
56
56 success = False
57 success = False
58 dstwlock = dstlock = None
57 try:
59 try:
58 # Lock destination to prevent modification while it is converted to.
60 # Lock destination to prevent modification while it is converted to.
59 # Don't need to lock src because we are just reading from its history
61 # Don't need to lock src because we are just reading from its history
60 # which can't change.
62 # which can't change.
63 dstwlock = rdst.wlock()
61 dstlock = rdst.lock()
64 dstlock = rdst.lock()
62
65
63 # Get a list of all changesets in the source. The easy way to do this
66 # Get a list of all changesets in the source. The easy way to do this
64 # is to simply walk the changelog, using changelog.nodesbewteen().
67 # is to simply walk the changelog, using changelog.nodesbewteen().
65 # Take a look at mercurial/revlog.py:639 for more details.
68 # Take a look at mercurial/revlog.py:639 for more details.
66 # Use a generator instead of a list to decrease memory usage
69 # Use a generator instead of a list to decrease memory usage
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
70 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
68 rsrc.heads())[0])
71 rsrc.heads())[0])
69 revmap = {node.nullid: node.nullid}
72 revmap = {node.nullid: node.nullid}
70 if tolfile:
73 if tolfile:
71 lfiles = set()
74 lfiles = set()
72 normalfiles = set()
75 normalfiles = set()
73 if not pats:
76 if not pats:
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
77 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
75 if pats:
78 if pats:
76 matcher = match_.match(rsrc.root, '', list(pats))
79 matcher = match_.match(rsrc.root, '', list(pats))
77 else:
80 else:
78 matcher = None
81 matcher = None
79
82
80 lfiletohash = {}
83 lfiletohash = {}
81 for ctx in ctxs:
84 for ctx in ctxs:
82 ui.progress(_('converting revisions'), ctx.rev(),
85 ui.progress(_('converting revisions'), ctx.rev(),
83 unit=_('revision'), total=rsrc['tip'].rev())
86 unit=_('revision'), total=rsrc['tip'].rev())
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
87 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
85 lfiles, normalfiles, matcher, size, lfiletohash)
88 lfiles, normalfiles, matcher, size, lfiletohash)
86 ui.progress(_('converting revisions'), None)
89 ui.progress(_('converting revisions'), None)
87
90
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
91 if os.path.exists(rdst.wjoin(lfutil.shortname)):
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
92 shutil.rmtree(rdst.wjoin(lfutil.shortname))
90
93
91 for f in lfiletohash.keys():
94 for f in lfiletohash.keys():
92 if os.path.isfile(rdst.wjoin(f)):
95 if os.path.isfile(rdst.wjoin(f)):
93 os.unlink(rdst.wjoin(f))
96 os.unlink(rdst.wjoin(f))
94 try:
97 try:
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
98 os.removedirs(os.path.dirname(rdst.wjoin(f)))
96 except OSError:
99 except OSError:
97 pass
100 pass
98
101
99 # If there were any files converted to largefiles, add largefiles
102 # If there were any files converted to largefiles, add largefiles
100 # to the destination repository's requirements.
103 # to the destination repository's requirements.
101 if lfiles:
104 if lfiles:
102 rdst.requirements.add('largefiles')
105 rdst.requirements.add('largefiles')
103 rdst._writerequirements()
106 rdst._writerequirements()
104 else:
107 else:
105 for ctx in ctxs:
108 for ctx in ctxs:
106 ui.progress(_('converting revisions'), ctx.rev(),
109 ui.progress(_('converting revisions'), ctx.rev(),
107 unit=_('revision'), total=rsrc['tip'].rev())
110 unit=_('revision'), total=rsrc['tip'].rev())
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
111 _addchangeset(ui, rsrc, rdst, ctx, revmap)
109
112
110 ui.progress(_('converting revisions'), None)
113 ui.progress(_('converting revisions'), None)
111 success = True
114 success = True
112 finally:
115 finally:
116 rdst.dirstate.clear()
117 release(dstlock, dstwlock)
113 if not success:
118 if not success:
114 # we failed, remove the new directory
119 # we failed, remove the new directory
115 shutil.rmtree(rdst.root)
120 shutil.rmtree(rdst.root)
116 dstlock.release()
117
121
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
122 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
119 # Convert src parents to dst parents
123 # Convert src parents to dst parents
120 parents = _convertparents(ctx, revmap)
124 parents = _convertparents(ctx, revmap)
121
125
122 # Generate list of changed files
126 # Generate list of changed files
123 files = _getchangedfiles(ctx, parents)
127 files = _getchangedfiles(ctx, parents)
124
128
125 def getfilectx(repo, memctx, f):
129 def getfilectx(repo, memctx, f):
126 if lfutil.standin(f) in files:
130 if lfutil.standin(f) in files:
127 # if the file isn't in the manifest then it was removed
131 # if the file isn't in the manifest then it was removed
128 # or renamed, raise IOError to indicate this
132 # or renamed, raise IOError to indicate this
129 try:
133 try:
130 fctx = ctx.filectx(lfutil.standin(f))
134 fctx = ctx.filectx(lfutil.standin(f))
131 except error.LookupError:
135 except error.LookupError:
132 raise IOError()
136 raise IOError()
133 renamed = fctx.renamed()
137 renamed = fctx.renamed()
134 if renamed:
138 if renamed:
135 renamed = lfutil.splitstandin(renamed[0])
139 renamed = lfutil.splitstandin(renamed[0])
136
140
137 hash = fctx.data().strip()
141 hash = fctx.data().strip()
138 path = lfutil.findfile(rsrc, hash)
142 path = lfutil.findfile(rsrc, hash)
139 ### TODO: What if the file is not cached?
143 ### TODO: What if the file is not cached?
140 data = ''
144 data = ''
141 fd = None
145 fd = None
142 try:
146 try:
143 fd = open(path, 'rb')
147 fd = open(path, 'rb')
144 data = fd.read()
148 data = fd.read()
145 finally:
149 finally:
146 if fd:
150 if fd:
147 fd.close()
151 fd.close()
148 return context.memfilectx(f, data, 'l' in fctx.flags(),
152 return context.memfilectx(f, data, 'l' in fctx.flags(),
149 'x' in fctx.flags(), renamed)
153 'x' in fctx.flags(), renamed)
150 else:
154 else:
151 return _getnormalcontext(repo.ui, ctx, f, revmap)
155 return _getnormalcontext(repo.ui, ctx, f, revmap)
152
156
153 dstfiles = []
157 dstfiles = []
154 for file in files:
158 for file in files:
155 if lfutil.isstandin(file):
159 if lfutil.isstandin(file):
156 dstfiles.append(lfutil.splitstandin(file))
160 dstfiles.append(lfutil.splitstandin(file))
157 else:
161 else:
158 dstfiles.append(file)
162 dstfiles.append(file)
159 # Commit
163 # Commit
160 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
164 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
161
165
162 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
166 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
163 matcher, size, lfiletohash):
167 matcher, size, lfiletohash):
164 # Convert src parents to dst parents
168 # Convert src parents to dst parents
165 parents = _convertparents(ctx, revmap)
169 parents = _convertparents(ctx, revmap)
166
170
167 # Generate list of changed files
171 # Generate list of changed files
168 files = _getchangedfiles(ctx, parents)
172 files = _getchangedfiles(ctx, parents)
169
173
170 dstfiles = []
174 dstfiles = []
171 for f in files:
175 for f in files:
172 if f not in lfiles and f not in normalfiles:
176 if f not in lfiles and f not in normalfiles:
173 islfile = _islfile(f, ctx, matcher, size)
177 islfile = _islfile(f, ctx, matcher, size)
174 # If this file was renamed or copied then copy
178 # If this file was renamed or copied then copy
175 # the lfileness of its predecessor
179 # the lfileness of its predecessor
176 if f in ctx.manifest():
180 if f in ctx.manifest():
177 fctx = ctx.filectx(f)
181 fctx = ctx.filectx(f)
178 renamed = fctx.renamed()
182 renamed = fctx.renamed()
179 renamedlfile = renamed and renamed[0] in lfiles
183 renamedlfile = renamed and renamed[0] in lfiles
180 islfile |= renamedlfile
184 islfile |= renamedlfile
181 if 'l' in fctx.flags():
185 if 'l' in fctx.flags():
182 if renamedlfile:
186 if renamedlfile:
183 raise util.Abort(
187 raise util.Abort(
184 _('renamed/copied largefile %s becomes symlink')
188 _('renamed/copied largefile %s becomes symlink')
185 % f)
189 % f)
186 islfile = False
190 islfile = False
187 if islfile:
191 if islfile:
188 lfiles.add(f)
192 lfiles.add(f)
189 else:
193 else:
190 normalfiles.add(f)
194 normalfiles.add(f)
191
195
192 if f in lfiles:
196 if f in lfiles:
193 dstfiles.append(lfutil.standin(f))
197 dstfiles.append(lfutil.standin(f))
194 # largefile in manifest if it has not been removed/renamed
198 # largefile in manifest if it has not been removed/renamed
195 if f in ctx.manifest():
199 if f in ctx.manifest():
196 fctx = ctx.filectx(f)
200 fctx = ctx.filectx(f)
197 if 'l' in fctx.flags():
201 if 'l' in fctx.flags():
198 renamed = fctx.renamed()
202 renamed = fctx.renamed()
199 if renamed and renamed[0] in lfiles:
203 if renamed and renamed[0] in lfiles:
200 raise util.Abort(_('largefile %s becomes symlink') % f)
204 raise util.Abort(_('largefile %s becomes symlink') % f)
201
205
202 # largefile was modified, update standins
206 # largefile was modified, update standins
203 fullpath = rdst.wjoin(f)
207 fullpath = rdst.wjoin(f)
204 util.makedirs(os.path.dirname(fullpath))
208 util.makedirs(os.path.dirname(fullpath))
205 m = util.sha1('')
209 m = util.sha1('')
206 m.update(ctx[f].data())
210 m.update(ctx[f].data())
207 hash = m.hexdigest()
211 hash = m.hexdigest()
208 if f not in lfiletohash or lfiletohash[f] != hash:
212 if f not in lfiletohash or lfiletohash[f] != hash:
209 try:
213 try:
210 fd = open(fullpath, 'wb')
214 fd = open(fullpath, 'wb')
211 fd.write(ctx[f].data())
215 fd.write(ctx[f].data())
212 finally:
216 finally:
213 if fd:
217 if fd:
214 fd.close()
218 fd.close()
215 executable = 'x' in ctx[f].flags()
219 executable = 'x' in ctx[f].flags()
216 os.chmod(fullpath, lfutil.getmode(executable))
220 os.chmod(fullpath, lfutil.getmode(executable))
217 lfutil.writestandin(rdst, lfutil.standin(f), hash,
221 lfutil.writestandin(rdst, lfutil.standin(f), hash,
218 executable)
222 executable)
219 lfiletohash[f] = hash
223 lfiletohash[f] = hash
220 else:
224 else:
221 # normal file
225 # normal file
222 dstfiles.append(f)
226 dstfiles.append(f)
223
227
224 def getfilectx(repo, memctx, f):
228 def getfilectx(repo, memctx, f):
225 if lfutil.isstandin(f):
229 if lfutil.isstandin(f):
226 # if the file isn't in the manifest then it was removed
230 # if the file isn't in the manifest then it was removed
227 # or renamed, raise IOError to indicate this
231 # or renamed, raise IOError to indicate this
228 srcfname = lfutil.splitstandin(f)
232 srcfname = lfutil.splitstandin(f)
229 try:
233 try:
230 fctx = ctx.filectx(srcfname)
234 fctx = ctx.filectx(srcfname)
231 except error.LookupError:
235 except error.LookupError:
232 raise IOError()
236 raise IOError()
233 renamed = fctx.renamed()
237 renamed = fctx.renamed()
234 if renamed:
238 if renamed:
235 # standin is always a largefile because largefile-ness
239 # standin is always a largefile because largefile-ness
236 # doesn't change after rename or copy
240 # doesn't change after rename or copy
237 renamed = lfutil.standin(renamed[0])
241 renamed = lfutil.standin(renamed[0])
238
242
239 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
243 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
240 fctx.flags(), 'x' in fctx.flags(), renamed)
244 fctx.flags(), 'x' in fctx.flags(), renamed)
241 else:
245 else:
242 return _getnormalcontext(repo.ui, ctx, f, revmap)
246 return _getnormalcontext(repo.ui, ctx, f, revmap)
243
247
244 # Commit
248 # Commit
245 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
249 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
246
250
247 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
251 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
248 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
252 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
249 getfilectx, ctx.user(), ctx.date(), ctx.extra())
253 getfilectx, ctx.user(), ctx.date(), ctx.extra())
250 ret = rdst.commitctx(mctx)
254 ret = rdst.commitctx(mctx)
251 rdst.setparents(ret)
255 rdst.setparents(ret)
252 revmap[ctx.node()] = rdst.changelog.tip()
256 revmap[ctx.node()] = rdst.changelog.tip()
253
257
254 # Generate list of changed files
258 # Generate list of changed files
255 def _getchangedfiles(ctx, parents):
259 def _getchangedfiles(ctx, parents):
256 files = set(ctx.files())
260 files = set(ctx.files())
257 if node.nullid not in parents:
261 if node.nullid not in parents:
258 mc = ctx.manifest()
262 mc = ctx.manifest()
259 mp1 = ctx.parents()[0].manifest()
263 mp1 = ctx.parents()[0].manifest()
260 mp2 = ctx.parents()[1].manifest()
264 mp2 = ctx.parents()[1].manifest()
261 files |= (set(mp1) | set(mp2)) - set(mc)
265 files |= (set(mp1) | set(mp2)) - set(mc)
262 for f in mc:
266 for f in mc:
263 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
267 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
264 files.add(f)
268 files.add(f)
265 return files
269 return files
266
270
267 # Convert src parents to dst parents
271 # Convert src parents to dst parents
268 def _convertparents(ctx, revmap):
272 def _convertparents(ctx, revmap):
269 parents = []
273 parents = []
270 for p in ctx.parents():
274 for p in ctx.parents():
271 parents.append(revmap[p.node()])
275 parents.append(revmap[p.node()])
272 while len(parents) < 2:
276 while len(parents) < 2:
273 parents.append(node.nullid)
277 parents.append(node.nullid)
274 return parents
278 return parents
275
279
276 # Get memfilectx for a normal file
280 # Get memfilectx for a normal file
277 def _getnormalcontext(ui, ctx, f, revmap):
281 def _getnormalcontext(ui, ctx, f, revmap):
278 try:
282 try:
279 fctx = ctx.filectx(f)
283 fctx = ctx.filectx(f)
280 except error.LookupError:
284 except error.LookupError:
281 raise IOError()
285 raise IOError()
282 renamed = fctx.renamed()
286 renamed = fctx.renamed()
283 if renamed:
287 if renamed:
284 renamed = renamed[0]
288 renamed = renamed[0]
285
289
286 data = fctx.data()
290 data = fctx.data()
287 if f == '.hgtags':
291 if f == '.hgtags':
288 data = _converttags (ui, revmap, data)
292 data = _converttags (ui, revmap, data)
289 return context.memfilectx(f, data, 'l' in fctx.flags(),
293 return context.memfilectx(f, data, 'l' in fctx.flags(),
290 'x' in fctx.flags(), renamed)
294 'x' in fctx.flags(), renamed)
291
295
292 # Remap tag data using a revision map
296 # Remap tag data using a revision map
293 def _converttags(ui, revmap, data):
297 def _converttags(ui, revmap, data):
294 newdata = []
298 newdata = []
295 for line in data.splitlines():
299 for line in data.splitlines():
296 try:
300 try:
297 id, name = line.split(' ', 1)
301 id, name = line.split(' ', 1)
298 except ValueError:
302 except ValueError:
299 ui.warn(_('skipping incorrectly formatted tag %s\n'
303 ui.warn(_('skipping incorrectly formatted tag %s\n'
300 % line))
304 % line))
301 continue
305 continue
302 try:
306 try:
303 newid = node.bin(id)
307 newid = node.bin(id)
304 except TypeError:
308 except TypeError:
305 ui.warn(_('skipping incorrectly formatted id %s\n'
309 ui.warn(_('skipping incorrectly formatted id %s\n'
306 % id))
310 % id))
307 continue
311 continue
308 try:
312 try:
309 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
313 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
310 name))
314 name))
311 except KeyError:
315 except KeyError:
312 ui.warn(_('no mapping for id %s\n') % id)
316 ui.warn(_('no mapping for id %s\n') % id)
313 continue
317 continue
314 return ''.join(newdata)
318 return ''.join(newdata)
315
319
316 def _islfile(file, ctx, matcher, size):
320 def _islfile(file, ctx, matcher, size):
317 '''Return true if file should be considered a largefile, i.e.
321 '''Return true if file should be considered a largefile, i.e.
318 matcher matches it or it is larger than size.'''
322 matcher matches it or it is larger than size.'''
319 # never store special .hg* files as largefiles
323 # never store special .hg* files as largefiles
320 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
324 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
321 return False
325 return False
322 if matcher and matcher(file):
326 if matcher and matcher(file):
323 return True
327 return True
324 try:
328 try:
325 return ctx.filectx(file).size() >= size * 1024 * 1024
329 return ctx.filectx(file).size() >= size * 1024 * 1024
326 except error.LookupError:
330 except error.LookupError:
327 return False
331 return False
328
332
329 def uploadlfiles(ui, rsrc, rdst, files):
333 def uploadlfiles(ui, rsrc, rdst, files):
330 '''upload largefiles to the central store'''
334 '''upload largefiles to the central store'''
331
335
332 if not files:
336 if not files:
333 return
337 return
334
338
335 store = basestore._openstore(rsrc, rdst, put=True)
339 store = basestore._openstore(rsrc, rdst, put=True)
336
340
337 at = 0
341 at = 0
338 files = filter(lambda h: not store.exists(h), files)
342 files = filter(lambda h: not store.exists(h), files)
339 for hash in files:
343 for hash in files:
340 ui.progress(_('uploading largefiles'), at, unit='largefile',
344 ui.progress(_('uploading largefiles'), at, unit='largefile',
341 total=len(files))
345 total=len(files))
342 source = lfutil.findfile(rsrc, hash)
346 source = lfutil.findfile(rsrc, hash)
343 if not source:
347 if not source:
344 raise util.Abort(_('largefile %s missing from store'
348 raise util.Abort(_('largefile %s missing from store'
345 ' (needs to be uploaded)') % hash)
349 ' (needs to be uploaded)') % hash)
346 # XXX check for errors here
350 # XXX check for errors here
347 store.put(source, hash)
351 store.put(source, hash)
348 at += 1
352 at += 1
349 ui.progress(_('uploading largefiles'), None)
353 ui.progress(_('uploading largefiles'), None)
350
354
351 def verifylfiles(ui, repo, all=False, contents=False):
355 def verifylfiles(ui, repo, all=False, contents=False):
352 '''Verify that every big file revision in the current changeset
356 '''Verify that every big file revision in the current changeset
353 exists in the central store. With --contents, also verify that
357 exists in the central store. With --contents, also verify that
354 the contents of each big file revision are correct (SHA-1 hash
358 the contents of each big file revision are correct (SHA-1 hash
355 matches the revision ID). With --all, check every changeset in
359 matches the revision ID). With --all, check every changeset in
356 this repository.'''
360 this repository.'''
357 if all:
361 if all:
358 # Pass a list to the function rather than an iterator because we know a
362 # Pass a list to the function rather than an iterator because we know a
359 # list will work.
363 # list will work.
360 revs = range(len(repo))
364 revs = range(len(repo))
361 else:
365 else:
362 revs = ['.']
366 revs = ['.']
363
367
364 store = basestore._openstore(repo)
368 store = basestore._openstore(repo)
365 return store.verify(revs, contents=contents)
369 return store.verify(revs, contents=contents)
366
370
367 def cachelfiles(ui, repo, node):
371 def cachelfiles(ui, repo, node):
368 '''cachelfiles ensures that all largefiles needed by the specified revision
372 '''cachelfiles ensures that all largefiles needed by the specified revision
369 are present in the repository's largefile cache.
373 are present in the repository's largefile cache.
370
374
371 returns a tuple (cached, missing). cached is the list of files downloaded
375 returns a tuple (cached, missing). cached is the list of files downloaded
372 by this operation; missing is the list of files that were needed but could
376 by this operation; missing is the list of files that were needed but could
373 not be found.'''
377 not be found.'''
374 lfiles = lfutil.listlfiles(repo, node)
378 lfiles = lfutil.listlfiles(repo, node)
375 toget = []
379 toget = []
376
380
377 for lfile in lfiles:
381 for lfile in lfiles:
378 # If we are mid-merge, then we have to trust the standin that is in the
382 # If we are mid-merge, then we have to trust the standin that is in the
379 # working copy to have the correct hashvalue. This is because the
383 # working copy to have the correct hashvalue. This is because the
380 # original hg.merge() already updated the standin as part of the normal
384 # original hg.merge() already updated the standin as part of the normal
381 # merge process -- we just have to udpate the largefile to match.
385 # merge process -- we just have to udpate the largefile to match.
382 if (getattr(repo, "_ismerging", False) and
386 if (getattr(repo, "_ismerging", False) and
383 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
387 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
384 expectedhash = lfutil.readstandin(repo, lfile)
388 expectedhash = lfutil.readstandin(repo, lfile)
385 else:
389 else:
386 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
390 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
387
391
388 # if it exists and its hash matches, it might have been locally
392 # if it exists and its hash matches, it might have been locally
389 # modified before updating and the user chose 'local'. in this case,
393 # modified before updating and the user chose 'local'. in this case,
390 # it will not be in any store, so don't look for it.
394 # it will not be in any store, so don't look for it.
391 if ((not os.path.exists(repo.wjoin(lfile)) or
395 if ((not os.path.exists(repo.wjoin(lfile)) or
392 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
396 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
393 not lfutil.findfile(repo, expectedhash)):
397 not lfutil.findfile(repo, expectedhash)):
394 toget.append((lfile, expectedhash))
398 toget.append((lfile, expectedhash))
395
399
396 if toget:
400 if toget:
397 store = basestore._openstore(repo)
401 store = basestore._openstore(repo)
398 ret = store.get(toget)
402 ret = store.get(toget)
399 return ret
403 return ret
400
404
401 return ([], [])
405 return ([], [])
402
406
403 def updatelfiles(ui, repo, filelist=None, printmessage=True):
407 def updatelfiles(ui, repo, filelist=None, printmessage=True):
404 wlock = repo.wlock()
408 wlock = repo.wlock()
405 try:
409 try:
406 lfdirstate = lfutil.openlfdirstate(ui, repo)
410 lfdirstate = lfutil.openlfdirstate(ui, repo)
407 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
411 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
408
412
409 if filelist is not None:
413 if filelist is not None:
410 lfiles = [f for f in lfiles if f in filelist]
414 lfiles = [f for f in lfiles if f in filelist]
411
415
412 printed = False
416 printed = False
413 if printmessage and lfiles:
417 if printmessage and lfiles:
414 ui.status(_('getting changed largefiles\n'))
418 ui.status(_('getting changed largefiles\n'))
415 printed = True
419 printed = True
416 cachelfiles(ui, repo, '.')
420 cachelfiles(ui, repo, '.')
417
421
418 updated, removed = 0, 0
422 updated, removed = 0, 0
419 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
423 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
420 # increment the appropriate counter according to _updatelfile's
424 # increment the appropriate counter according to _updatelfile's
421 # return value
425 # return value
422 updated += i > 0 and i or 0
426 updated += i > 0 and i or 0
423 removed -= i < 0 and i or 0
427 removed -= i < 0 and i or 0
424 if printmessage and (removed or updated) and not printed:
428 if printmessage and (removed or updated) and not printed:
425 ui.status(_('getting changed largefiles\n'))
429 ui.status(_('getting changed largefiles\n'))
426 printed = True
430 printed = True
427
431
428 lfdirstate.write()
432 lfdirstate.write()
429 if printed and printmessage:
433 if printed and printmessage:
430 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
434 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
431 removed))
435 removed))
432 finally:
436 finally:
433 wlock.release()
437 wlock.release()
434
438
435 def _updatelfile(repo, lfdirstate, lfile):
439 def _updatelfile(repo, lfdirstate, lfile):
436 '''updates a single largefile and copies the state of its standin from
440 '''updates a single largefile and copies the state of its standin from
437 the repository's dirstate to its state in the lfdirstate.
441 the repository's dirstate to its state in the lfdirstate.
438
442
439 returns 1 if the file was modified, -1 if the file was removed, 0 if the
443 returns 1 if the file was modified, -1 if the file was removed, 0 if the
440 file was unchanged, and None if the needed largefile was missing from the
444 file was unchanged, and None if the needed largefile was missing from the
441 cache.'''
445 cache.'''
442 ret = 0
446 ret = 0
443 abslfile = repo.wjoin(lfile)
447 abslfile = repo.wjoin(lfile)
444 absstandin = repo.wjoin(lfutil.standin(lfile))
448 absstandin = repo.wjoin(lfutil.standin(lfile))
445 if os.path.exists(absstandin):
449 if os.path.exists(absstandin):
446 if os.path.exists(absstandin+'.orig'):
450 if os.path.exists(absstandin+'.orig'):
447 shutil.copyfile(abslfile, abslfile+'.orig')
451 shutil.copyfile(abslfile, abslfile+'.orig')
448 expecthash = lfutil.readstandin(repo, lfile)
452 expecthash = lfutil.readstandin(repo, lfile)
449 if (expecthash != '' and
453 if (expecthash != '' and
450 (not os.path.exists(abslfile) or
454 (not os.path.exists(abslfile) or
451 expecthash != lfutil.hashfile(abslfile))):
455 expecthash != lfutil.hashfile(abslfile))):
452 if not lfutil.copyfromcache(repo, expecthash, lfile):
456 if not lfutil.copyfromcache(repo, expecthash, lfile):
453 # use normallookup() to allocate entry in largefiles dirstate,
457 # use normallookup() to allocate entry in largefiles dirstate,
454 # because lack of it misleads lfilesrepo.status() into
458 # because lack of it misleads lfilesrepo.status() into
455 # recognition that such cache missing files are REMOVED.
459 # recognition that such cache missing files are REMOVED.
456 lfdirstate.normallookup(lfile)
460 lfdirstate.normallookup(lfile)
457 return None # don't try to set the mode
461 return None # don't try to set the mode
458 ret = 1
462 ret = 1
459 mode = os.stat(absstandin).st_mode
463 mode = os.stat(absstandin).st_mode
460 if mode != os.stat(abslfile).st_mode:
464 if mode != os.stat(abslfile).st_mode:
461 os.chmod(abslfile, mode)
465 os.chmod(abslfile, mode)
462 ret = 1
466 ret = 1
463 else:
467 else:
464 # Remove lfiles for which the standin is deleted, unless the
468 # Remove lfiles for which the standin is deleted, unless the
465 # lfile is added to the repository again. This happens when a
469 # lfile is added to the repository again. This happens when a
466 # largefile is converted back to a normal file: the standin
470 # largefile is converted back to a normal file: the standin
467 # disappears, but a new (normal) file appears as the lfile.
471 # disappears, but a new (normal) file appears as the lfile.
468 if os.path.exists(abslfile) and lfile not in repo[None]:
472 if os.path.exists(abslfile) and lfile not in repo[None]:
469 util.unlinkpath(abslfile)
473 util.unlinkpath(abslfile)
470 ret = -1
474 ret = -1
471 state = repo.dirstate[lfutil.standin(lfile)]
475 state = repo.dirstate[lfutil.standin(lfile)]
472 if state == 'n':
476 if state == 'n':
473 # When rebasing, we need to synchronize the standin and the largefile,
477 # When rebasing, we need to synchronize the standin and the largefile,
474 # because otherwise the largefile will get reverted. But for commit's
478 # because otherwise the largefile will get reverted. But for commit's
475 # sake, we have to mark the file as unclean.
479 # sake, we have to mark the file as unclean.
476 if getattr(repo, "_isrebasing", False):
480 if getattr(repo, "_isrebasing", False):
477 lfdirstate.normallookup(lfile)
481 lfdirstate.normallookup(lfile)
478 else:
482 else:
479 lfdirstate.normal(lfile)
483 lfdirstate.normal(lfile)
480 elif state == 'r':
484 elif state == 'r':
481 lfdirstate.remove(lfile)
485 lfdirstate.remove(lfile)
482 elif state == 'a':
486 elif state == 'a':
483 lfdirstate.add(lfile)
487 lfdirstate.add(lfile)
484 elif state == '?':
488 elif state == '?':
485 lfdirstate.drop(lfile)
489 lfdirstate.drop(lfile)
486 return ret
490 return ret
487
491
488 def catlfile(repo, lfile, rev, filename):
492 def catlfile(repo, lfile, rev, filename):
489 hash = lfutil.readstandin(repo, lfile, rev)
493 hash = lfutil.readstandin(repo, lfile, rev)
490 if not lfutil.inusercache(repo.ui, hash):
494 if not lfutil.inusercache(repo.ui, hash):
491 store = basestore._openstore(repo)
495 store = basestore._openstore(repo)
492 success, missing = store.get([(lfile, hash)])
496 success, missing = store.get([(lfile, hash)])
493 if len(success) != 1:
497 if len(success) != 1:
494 raise util.Abort(
498 raise util.Abort(
495 _('largefile %s is not in cache and could not be downloaded')
499 _('largefile %s is not in cache and could not be downloaded')
496 % lfile)
500 % lfile)
497 path = lfutil.usercachepath(repo.ui, hash)
501 path = lfutil.usercachepath(repo.ui, hash)
498 fpout = cmdutil.makefileobj(repo, filename)
502 fpout = cmdutil.makefileobj(repo, filename)
499 fpin = open(path, "rb")
503 fpin = open(path, "rb")
500 fpout.write(fpin.read())
504 fpout.write(fpin.read())
501 fpout.close()
505 fpout.close()
502 fpin.close()
506 fpin.close()
503 return 0
507 return 0
504
508
505 # -- hg commands declarations ------------------------------------------------
509 # -- hg commands declarations ------------------------------------------------
506
510
507 cmdtable = {
511 cmdtable = {
508 'lfconvert': (lfconvert,
512 'lfconvert': (lfconvert,
509 [('s', 'size', '',
513 [('s', 'size', '',
510 _('minimum size (MB) for files to be converted '
514 _('minimum size (MB) for files to be converted '
511 'as largefiles'),
515 'as largefiles'),
512 'SIZE'),
516 'SIZE'),
513 ('', 'to-normal', False,
517 ('', 'to-normal', False,
514 _('convert from a largefiles repo to a normal repo')),
518 _('convert from a largefiles repo to a normal repo')),
515 ],
519 ],
516 _('hg lfconvert SOURCE DEST [FILE ...]')),
520 _('hg lfconvert SOURCE DEST [FILE ...]')),
517 }
521 }
General Comments 0
You need to be logged in to leave comments. Login now