##// END OF EJS Templates
largefiles: don't reference uninitialized variable (issue3092)
Levi Bard -
r15808:62098aeb default
parent child Browse files
Show More
@@ -1,485 +1,487
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
23 '''convert a normal repository to a largefiles repository
24
24
25 Convert repository SOURCE to a new repository DEST, identical to
25 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
26 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
27 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
28 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
29 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
30 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
31 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
32 configuration as ``largefiles.size``.
33
33
34 After running this command you will need to make sure that
34 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
35 largefiles is enabled anywhere you intend to push the new
36 repository.
36 repository.
37
37
38 Use --to-normal to convert largefiles back to normal files; after
38 Use --to-normal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
39 this, the DEST repository can be used without largefiles at all.'''
40
40
41 if opts['to_normal']:
41 if opts['to_normal']:
42 tolfile = False
42 tolfile = False
43 else:
43 else:
44 tolfile = True
44 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46
46
47 if not hg.islocal(src):
47 if not hg.islocal(src):
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 if not hg.islocal(dest):
49 if not hg.islocal(dest):
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51
51
52 rsrc = hg.repository(ui, src)
52 rsrc = hg.repository(ui, src)
53 ui.status(_('initializing destination %s\n') % dest)
53 ui.status(_('initializing destination %s\n') % dest)
54 rdst = hg.repository(ui, dest, create=True)
54 rdst = hg.repository(ui, dest, create=True)
55
55
56 success = False
56 success = False
57 try:
57 try:
58 # Lock destination to prevent modification while it is converted to.
58 # Lock destination to prevent modification while it is converted to.
59 # Don't need to lock src because we are just reading from its history
59 # Don't need to lock src because we are just reading from its history
60 # which can't change.
60 # which can't change.
61 dst_lock = rdst.lock()
61 dst_lock = rdst.lock()
62
62
63 # Get a list of all changesets in the source. The easy way to do this
63 # Get a list of all changesets in the source. The easy way to do this
64 # is to simply walk the changelog, using changelog.nodesbewteen().
64 # is to simply walk the changelog, using changelog.nodesbewteen().
65 # Take a look at mercurial/revlog.py:639 for more details.
65 # Take a look at mercurial/revlog.py:639 for more details.
66 # Use a generator instead of a list to decrease memory usage
66 # Use a generator instead of a list to decrease memory usage
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
68 rsrc.heads())[0])
68 rsrc.heads())[0])
69 revmap = {node.nullid: node.nullid}
69 revmap = {node.nullid: node.nullid}
70 if tolfile:
70 if tolfile:
71 lfiles = set()
71 lfiles = set()
72 normalfiles = set()
72 normalfiles = set()
73 if not pats:
73 if not pats:
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
75 if pats:
75 if pats:
76 matcher = match_.match(rsrc.root, '', list(pats))
76 matcher = match_.match(rsrc.root, '', list(pats))
77 else:
77 else:
78 matcher = None
78 matcher = None
79
79
80 lfiletohash = {}
80 lfiletohash = {}
81 for ctx in ctxs:
81 for ctx in ctxs:
82 ui.progress(_('converting revisions'), ctx.rev(),
82 ui.progress(_('converting revisions'), ctx.rev(),
83 unit=_('revision'), total=rsrc['tip'].rev())
83 unit=_('revision'), total=rsrc['tip'].rev())
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
85 lfiles, normalfiles, matcher, size, lfiletohash)
85 lfiles, normalfiles, matcher, size, lfiletohash)
86 ui.progress(_('converting revisions'), None)
86 ui.progress(_('converting revisions'), None)
87
87
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
90
90
91 for f in lfiletohash.keys():
91 for f in lfiletohash.keys():
92 if os.path.isfile(rdst.wjoin(f)):
92 if os.path.isfile(rdst.wjoin(f)):
93 os.unlink(rdst.wjoin(f))
93 os.unlink(rdst.wjoin(f))
94 try:
94 try:
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
96 except OSError:
96 except OSError:
97 pass
97 pass
98
98
99 # If there were any files converted to largefiles, add largefiles
99 # If there were any files converted to largefiles, add largefiles
100 # to the destination repository's requirements.
100 # to the destination repository's requirements.
101 if lfiles:
101 if lfiles:
102 rdst.requirements.add('largefiles')
102 rdst.requirements.add('largefiles')
103 rdst._writerequirements()
103 rdst._writerequirements()
104 else:
104 else:
105 for ctx in ctxs:
105 for ctx in ctxs:
106 ui.progress(_('converting revisions'), ctx.rev(),
106 ui.progress(_('converting revisions'), ctx.rev(),
107 unit=_('revision'), total=rsrc['tip'].rev())
107 unit=_('revision'), total=rsrc['tip'].rev())
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
109
109
110 ui.progress(_('converting revisions'), None)
110 ui.progress(_('converting revisions'), None)
111 success = True
111 success = True
112 finally:
112 finally:
113 if not success:
113 if not success:
114 # we failed, remove the new directory
114 # we failed, remove the new directory
115 shutil.rmtree(rdst.root)
115 shutil.rmtree(rdst.root)
116 dst_lock.release()
116 dst_lock.release()
117
117
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
119 # Convert src parents to dst parents
119 # Convert src parents to dst parents
120 parents = []
120 parents = []
121 for p in ctx.parents():
121 for p in ctx.parents():
122 parents.append(revmap[p.node()])
122 parents.append(revmap[p.node()])
123 while len(parents) < 2:
123 while len(parents) < 2:
124 parents.append(node.nullid)
124 parents.append(node.nullid)
125
125
126 # Generate list of changed files
126 # Generate list of changed files
127 files = set(ctx.files())
127 files = set(ctx.files())
128 if node.nullid not in parents:
128 if node.nullid not in parents:
129 mc = ctx.manifest()
129 mc = ctx.manifest()
130 mp1 = ctx.parents()[0].manifest()
130 mp1 = ctx.parents()[0].manifest()
131 mp2 = ctx.parents()[1].manifest()
131 mp2 = ctx.parents()[1].manifest()
132 files |= (set(mp1) | set(mp2)) - set(mc)
132 files |= (set(mp1) | set(mp2)) - set(mc)
133 for f in mc:
133 for f in mc:
134 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
134 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
135 files.add(f)
135 files.add(f)
136
136
137 def getfilectx(repo, memctx, f):
137 def getfilectx(repo, memctx, f):
138 if lfutil.standin(f) in files:
138 if lfutil.standin(f) in files:
139 # if the file isn't in the manifest then it was removed
139 # if the file isn't in the manifest then it was removed
140 # or renamed, raise IOError to indicate this
140 # or renamed, raise IOError to indicate this
141 try:
141 try:
142 fctx = ctx.filectx(lfutil.standin(f))
142 fctx = ctx.filectx(lfutil.standin(f))
143 except error.LookupError:
143 except error.LookupError:
144 raise IOError()
144 raise IOError()
145 renamed = fctx.renamed()
145 renamed = fctx.renamed()
146 if renamed:
146 if renamed:
147 renamed = lfutil.splitstandin(renamed[0])
147 renamed = lfutil.splitstandin(renamed[0])
148
148
149 hash = fctx.data().strip()
149 hash = fctx.data().strip()
150 path = lfutil.findfile(rsrc, hash)
150 path = lfutil.findfile(rsrc, hash)
151 ### TODO: What if the file is not cached?
151 ### TODO: What if the file is not cached?
152 data = ''
152 data = ''
153 fd = None
153 fd = None
154 try:
154 try:
155 fd = open(path, 'rb')
155 fd = open(path, 'rb')
156 data = fd.read()
156 data = fd.read()
157 finally:
157 finally:
158 if fd:
158 if fd:
159 fd.close()
159 fd.close()
160 return context.memfilectx(f, data, 'l' in fctx.flags(),
160 return context.memfilectx(f, data, 'l' in fctx.flags(),
161 'x' in fctx.flags(), renamed)
161 'x' in fctx.flags(), renamed)
162 else:
162 else:
163 try:
163 try:
164 fctx = ctx.filectx(f)
164 fctx = ctx.filectx(f)
165 except error.LookupError:
165 except error.LookupError:
166 raise IOError()
166 raise IOError()
167 renamed = fctx.renamed()
167 renamed = fctx.renamed()
168 if renamed:
168 if renamed:
169 renamed = renamed[0]
169 renamed = renamed[0]
170 data = fctx.data()
170 data = fctx.data()
171 if f == '.hgtags':
171 if f == '.hgtags':
172 newdata = []
172 newdata = []
173 for line in data.splitlines():
173 for line in data.splitlines():
174 id, name = line.split(' ', 1)
174 id, name = line.split(' ', 1)
175 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
175 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
176 name))
176 name))
177 data = ''.join(newdata)
177 data = ''.join(newdata)
178 return context.memfilectx(f, data, 'l' in fctx.flags(),
178 return context.memfilectx(f, data, 'l' in fctx.flags(),
179 'x' in fctx.flags(), renamed)
179 'x' in fctx.flags(), renamed)
180
180
181 dstfiles = []
181 dstfiles = []
182 for file in files:
182 for file in files:
183 if lfutil.isstandin(file):
183 if lfutil.isstandin(file):
184 dstfiles.append(lfutil.splitstandin(file))
184 dstfiles.append(lfutil.splitstandin(file))
185 else:
185 else:
186 dstfiles.append(file)
186 dstfiles.append(file)
187 # Commit
187 # Commit
188 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
188 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
189 getfilectx, ctx.user(), ctx.date(), ctx.extra())
189 getfilectx, ctx.user(), ctx.date(), ctx.extra())
190 ret = rdst.commitctx(mctx)
190 ret = rdst.commitctx(mctx)
191 rdst.dirstate.setparents(ret)
191 rdst.dirstate.setparents(ret)
192 revmap[ctx.node()] = rdst.changelog.tip()
192 revmap[ctx.node()] = rdst.changelog.tip()
193
193
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
195 matcher, size, lfiletohash):
195 matcher, size, lfiletohash):
196 # Convert src parents to dst parents
196 # Convert src parents to dst parents
197 parents = []
197 parents = []
198 for p in ctx.parents():
198 for p in ctx.parents():
199 parents.append(revmap[p.node()])
199 parents.append(revmap[p.node()])
200 while len(parents) < 2:
200 while len(parents) < 2:
201 parents.append(node.nullid)
201 parents.append(node.nullid)
202
202
203 # Generate list of changed files
203 # Generate list of changed files
204 files = set(ctx.files())
204 files = set(ctx.files())
205 if node.nullid not in parents:
205 if node.nullid not in parents:
206 mc = ctx.manifest()
206 mc = ctx.manifest()
207 mp1 = ctx.parents()[0].manifest()
207 mp1 = ctx.parents()[0].manifest()
208 mp2 = ctx.parents()[1].manifest()
208 mp2 = ctx.parents()[1].manifest()
209 files |= (set(mp1) | set(mp2)) - set(mc)
209 files |= (set(mp1) | set(mp2)) - set(mc)
210 for f in mc:
210 for f in mc:
211 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
211 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
212 files.add(f)
212 files.add(f)
213
213
214 dstfiles = []
214 dstfiles = []
215 for f in files:
215 for f in files:
216 if f not in lfiles and f not in normalfiles:
216 if f not in lfiles and f not in normalfiles:
217 islfile = _islfile(f, ctx, matcher, size)
217 islfile = _islfile(f, ctx, matcher, size)
218 # If this file was renamed or copied then copy
218 # If this file was renamed or copied then copy
219 # the lfileness of its predecessor
219 # the lfileness of its predecessor
220 if f in ctx.manifest():
220 if f in ctx.manifest():
221 fctx = ctx.filectx(f)
221 fctx = ctx.filectx(f)
222 renamed = fctx.renamed()
222 renamed = fctx.renamed()
223 renamedlfile = renamed and renamed[0] in lfiles
223 renamedlfile = renamed and renamed[0] in lfiles
224 islfile |= renamedlfile
224 islfile |= renamedlfile
225 if 'l' in fctx.flags():
225 if 'l' in fctx.flags():
226 if renamedlfile:
226 if renamedlfile:
227 raise util.Abort(
227 raise util.Abort(
228 _('renamed/copied largefile %s becomes symlink')
228 _('renamed/copied largefile %s becomes symlink')
229 % f)
229 % f)
230 islfile = False
230 islfile = False
231 if islfile:
231 if islfile:
232 lfiles.add(f)
232 lfiles.add(f)
233 else:
233 else:
234 normalfiles.add(f)
234 normalfiles.add(f)
235
235
236 if f in lfiles:
236 if f in lfiles:
237 dstfiles.append(lfutil.standin(f))
237 dstfiles.append(lfutil.standin(f))
238 # largefile in manifest if it has not been removed/renamed
238 # largefile in manifest if it has not been removed/renamed
239 if f in ctx.manifest():
239 if f in ctx.manifest():
240 if 'l' in ctx.filectx(f).flags():
240 fctx = ctx.filectx(f)
241 if 'l' in fctx.flags():
242 renamed = fctx.renamed()
241 if renamed and renamed[0] in lfiles:
243 if renamed and renamed[0] in lfiles:
242 raise util.Abort(_('largefile %s becomes symlink') % f)
244 raise util.Abort(_('largefile %s becomes symlink') % f)
243
245
244 # largefile was modified, update standins
246 # largefile was modified, update standins
245 fullpath = rdst.wjoin(f)
247 fullpath = rdst.wjoin(f)
246 util.makedirs(os.path.dirname(fullpath))
248 util.makedirs(os.path.dirname(fullpath))
247 m = util.sha1('')
249 m = util.sha1('')
248 m.update(ctx[f].data())
250 m.update(ctx[f].data())
249 hash = m.hexdigest()
251 hash = m.hexdigest()
250 if f not in lfiletohash or lfiletohash[f] != hash:
252 if f not in lfiletohash or lfiletohash[f] != hash:
251 try:
253 try:
252 fd = open(fullpath, 'wb')
254 fd = open(fullpath, 'wb')
253 fd.write(ctx[f].data())
255 fd.write(ctx[f].data())
254 finally:
256 finally:
255 if fd:
257 if fd:
256 fd.close()
258 fd.close()
257 executable = 'x' in ctx[f].flags()
259 executable = 'x' in ctx[f].flags()
258 os.chmod(fullpath, lfutil.getmode(executable))
260 os.chmod(fullpath, lfutil.getmode(executable))
259 lfutil.writestandin(rdst, lfutil.standin(f), hash,
261 lfutil.writestandin(rdst, lfutil.standin(f), hash,
260 executable)
262 executable)
261 lfiletohash[f] = hash
263 lfiletohash[f] = hash
262 else:
264 else:
263 # normal file
265 # normal file
264 dstfiles.append(f)
266 dstfiles.append(f)
265
267
266 def getfilectx(repo, memctx, f):
268 def getfilectx(repo, memctx, f):
267 if lfutil.isstandin(f):
269 if lfutil.isstandin(f):
268 # if the file isn't in the manifest then it was removed
270 # if the file isn't in the manifest then it was removed
269 # or renamed, raise IOError to indicate this
271 # or renamed, raise IOError to indicate this
270 srcfname = lfutil.splitstandin(f)
272 srcfname = lfutil.splitstandin(f)
271 try:
273 try:
272 fctx = ctx.filectx(srcfname)
274 fctx = ctx.filectx(srcfname)
273 except error.LookupError:
275 except error.LookupError:
274 raise IOError()
276 raise IOError()
275 renamed = fctx.renamed()
277 renamed = fctx.renamed()
276 if renamed:
278 if renamed:
277 # standin is always a largefile because largefile-ness
279 # standin is always a largefile because largefile-ness
278 # doesn't change after rename or copy
280 # doesn't change after rename or copy
279 renamed = lfutil.standin(renamed[0])
281 renamed = lfutil.standin(renamed[0])
280
282
281 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
283 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
282 fctx.flags(), 'x' in fctx.flags(), renamed)
284 fctx.flags(), 'x' in fctx.flags(), renamed)
283 else:
285 else:
284 try:
286 try:
285 fctx = ctx.filectx(f)
287 fctx = ctx.filectx(f)
286 except error.LookupError:
288 except error.LookupError:
287 raise IOError()
289 raise IOError()
288 renamed = fctx.renamed()
290 renamed = fctx.renamed()
289 if renamed:
291 if renamed:
290 renamed = renamed[0]
292 renamed = renamed[0]
291
293
292 data = fctx.data()
294 data = fctx.data()
293 if f == '.hgtags':
295 if f == '.hgtags':
294 newdata = []
296 newdata = []
295 for line in data.splitlines():
297 for line in data.splitlines():
296 id, name = line.split(' ', 1)
298 id, name = line.split(' ', 1)
297 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
299 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
298 name))
300 name))
299 data = ''.join(newdata)
301 data = ''.join(newdata)
300 return context.memfilectx(f, data, 'l' in fctx.flags(),
302 return context.memfilectx(f, data, 'l' in fctx.flags(),
301 'x' in fctx.flags(), renamed)
303 'x' in fctx.flags(), renamed)
302
304
303 # Commit
305 # Commit
304 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
306 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
305 getfilectx, ctx.user(), ctx.date(), ctx.extra())
307 getfilectx, ctx.user(), ctx.date(), ctx.extra())
306 ret = rdst.commitctx(mctx)
308 ret = rdst.commitctx(mctx)
307 rdst.dirstate.setparents(ret)
309 rdst.dirstate.setparents(ret)
308 revmap[ctx.node()] = rdst.changelog.tip()
310 revmap[ctx.node()] = rdst.changelog.tip()
309
311
310 def _islfile(file, ctx, matcher, size):
312 def _islfile(file, ctx, matcher, size):
311 '''Return true if file should be considered a largefile, i.e.
313 '''Return true if file should be considered a largefile, i.e.
312 matcher matches it or it is larger than size.'''
314 matcher matches it or it is larger than size.'''
313 # never store special .hg* files as largefiles
315 # never store special .hg* files as largefiles
314 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
316 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
315 return False
317 return False
316 if matcher and matcher(file):
318 if matcher and matcher(file):
317 return True
319 return True
318 try:
320 try:
319 return ctx.filectx(file).size() >= size * 1024 * 1024
321 return ctx.filectx(file).size() >= size * 1024 * 1024
320 except error.LookupError:
322 except error.LookupError:
321 return False
323 return False
322
324
323 def uploadlfiles(ui, rsrc, rdst, files):
325 def uploadlfiles(ui, rsrc, rdst, files):
324 '''upload largefiles to the central store'''
326 '''upload largefiles to the central store'''
325
327
326 if not files:
328 if not files:
327 return
329 return
328
330
329 store = basestore._openstore(rsrc, rdst, put=True)
331 store = basestore._openstore(rsrc, rdst, put=True)
330
332
331 at = 0
333 at = 0
332 files = filter(lambda h: not store.exists(h), files)
334 files = filter(lambda h: not store.exists(h), files)
333 for hash in files:
335 for hash in files:
334 ui.progress(_('uploading largefiles'), at, unit='largefile',
336 ui.progress(_('uploading largefiles'), at, unit='largefile',
335 total=len(files))
337 total=len(files))
336 source = lfutil.findfile(rsrc, hash)
338 source = lfutil.findfile(rsrc, hash)
337 if not source:
339 if not source:
338 raise util.Abort(_('largefile %s missing from store'
340 raise util.Abort(_('largefile %s missing from store'
339 ' (needs to be uploaded)') % hash)
341 ' (needs to be uploaded)') % hash)
340 # XXX check for errors here
342 # XXX check for errors here
341 store.put(source, hash)
343 store.put(source, hash)
342 at += 1
344 at += 1
343 ui.progress(_('uploading largefiles'), None)
345 ui.progress(_('uploading largefiles'), None)
344
346
345 def verifylfiles(ui, repo, all=False, contents=False):
347 def verifylfiles(ui, repo, all=False, contents=False):
346 '''Verify that every big file revision in the current changeset
348 '''Verify that every big file revision in the current changeset
347 exists in the central store. With --contents, also verify that
349 exists in the central store. With --contents, also verify that
348 the contents of each big file revision are correct (SHA-1 hash
350 the contents of each big file revision are correct (SHA-1 hash
349 matches the revision ID). With --all, check every changeset in
351 matches the revision ID). With --all, check every changeset in
350 this repository.'''
352 this repository.'''
351 if all:
353 if all:
352 # Pass a list to the function rather than an iterator because we know a
354 # Pass a list to the function rather than an iterator because we know a
353 # list will work.
355 # list will work.
354 revs = range(len(repo))
356 revs = range(len(repo))
355 else:
357 else:
356 revs = ['.']
358 revs = ['.']
357
359
358 store = basestore._openstore(repo)
360 store = basestore._openstore(repo)
359 return store.verify(revs, contents=contents)
361 return store.verify(revs, contents=contents)
360
362
361 def cachelfiles(ui, repo, node):
363 def cachelfiles(ui, repo, node):
362 '''cachelfiles ensures that all largefiles needed by the specified revision
364 '''cachelfiles ensures that all largefiles needed by the specified revision
363 are present in the repository's largefile cache.
365 are present in the repository's largefile cache.
364
366
365 returns a tuple (cached, missing). cached is the list of files downloaded
367 returns a tuple (cached, missing). cached is the list of files downloaded
366 by this operation; missing is the list of files that were needed but could
368 by this operation; missing is the list of files that were needed but could
367 not be found.'''
369 not be found.'''
368 lfiles = lfutil.listlfiles(repo, node)
370 lfiles = lfutil.listlfiles(repo, node)
369 toget = []
371 toget = []
370
372
371 for lfile in lfiles:
373 for lfile in lfiles:
372 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
374 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
373 # if it exists and its hash matches, it might have been locally
375 # if it exists and its hash matches, it might have been locally
374 # modified before updating and the user chose 'local'. in this case,
376 # modified before updating and the user chose 'local'. in this case,
375 # it will not be in any store, so don't look for it.
377 # it will not be in any store, so don't look for it.
376 if ((not os.path.exists(repo.wjoin(lfile)) or
378 if ((not os.path.exists(repo.wjoin(lfile)) or
377 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
379 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
378 not lfutil.findfile(repo, expectedhash)):
380 not lfutil.findfile(repo, expectedhash)):
379 toget.append((lfile, expectedhash))
381 toget.append((lfile, expectedhash))
380
382
381 if toget:
383 if toget:
382 store = basestore._openstore(repo)
384 store = basestore._openstore(repo)
383 ret = store.get(toget)
385 ret = store.get(toget)
384 return ret
386 return ret
385
387
386 return ([], [])
388 return ([], [])
387
389
388 def updatelfiles(ui, repo, filelist=None, printmessage=True):
390 def updatelfiles(ui, repo, filelist=None, printmessage=True):
389 wlock = repo.wlock()
391 wlock = repo.wlock()
390 try:
392 try:
391 lfdirstate = lfutil.openlfdirstate(ui, repo)
393 lfdirstate = lfutil.openlfdirstate(ui, repo)
392 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
394 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
393
395
394 if filelist is not None:
396 if filelist is not None:
395 lfiles = [f for f in lfiles if f in filelist]
397 lfiles = [f for f in lfiles if f in filelist]
396
398
397 printed = False
399 printed = False
398 if printmessage and lfiles:
400 if printmessage and lfiles:
399 ui.status(_('getting changed largefiles\n'))
401 ui.status(_('getting changed largefiles\n'))
400 printed = True
402 printed = True
401 cachelfiles(ui, repo, '.')
403 cachelfiles(ui, repo, '.')
402
404
403 updated, removed = 0, 0
405 updated, removed = 0, 0
404 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
406 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
405 # increment the appropriate counter according to _updatelfile's
407 # increment the appropriate counter according to _updatelfile's
406 # return value
408 # return value
407 updated += i > 0 and i or 0
409 updated += i > 0 and i or 0
408 removed -= i < 0 and i or 0
410 removed -= i < 0 and i or 0
409 if printmessage and (removed or updated) and not printed:
411 if printmessage and (removed or updated) and not printed:
410 ui.status(_('getting changed largefiles\n'))
412 ui.status(_('getting changed largefiles\n'))
411 printed = True
413 printed = True
412
414
413 lfdirstate.write()
415 lfdirstate.write()
414 if printed and printmessage:
416 if printed and printmessage:
415 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
417 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
416 removed))
418 removed))
417 finally:
419 finally:
418 wlock.release()
420 wlock.release()
419
421
420 def _updatelfile(repo, lfdirstate, lfile):
422 def _updatelfile(repo, lfdirstate, lfile):
421 '''updates a single largefile and copies the state of its standin from
423 '''updates a single largefile and copies the state of its standin from
422 the repository's dirstate to its state in the lfdirstate.
424 the repository's dirstate to its state in the lfdirstate.
423
425
424 returns 1 if the file was modified, -1 if the file was removed, 0 if the
426 returns 1 if the file was modified, -1 if the file was removed, 0 if the
425 file was unchanged, and None if the needed largefile was missing from the
427 file was unchanged, and None if the needed largefile was missing from the
426 cache.'''
428 cache.'''
427 ret = 0
429 ret = 0
428 abslfile = repo.wjoin(lfile)
430 abslfile = repo.wjoin(lfile)
429 absstandin = repo.wjoin(lfutil.standin(lfile))
431 absstandin = repo.wjoin(lfutil.standin(lfile))
430 if os.path.exists(absstandin):
432 if os.path.exists(absstandin):
431 if os.path.exists(absstandin+'.orig'):
433 if os.path.exists(absstandin+'.orig'):
432 shutil.copyfile(abslfile, abslfile+'.orig')
434 shutil.copyfile(abslfile, abslfile+'.orig')
433 expecthash = lfutil.readstandin(repo, lfile)
435 expecthash = lfutil.readstandin(repo, lfile)
434 if (expecthash != '' and
436 if (expecthash != '' and
435 (not os.path.exists(abslfile) or
437 (not os.path.exists(abslfile) or
436 expecthash != lfutil.hashfile(abslfile))):
438 expecthash != lfutil.hashfile(abslfile))):
437 if not lfutil.copyfromcache(repo, expecthash, lfile):
439 if not lfutil.copyfromcache(repo, expecthash, lfile):
438 # use normallookup() to allocate entry in largefiles dirstate,
440 # use normallookup() to allocate entry in largefiles dirstate,
439 # because lack of it misleads lfiles_repo.status() into
441 # because lack of it misleads lfiles_repo.status() into
440 # recognition that such cache missing files are REMOVED.
442 # recognition that such cache missing files are REMOVED.
441 lfdirstate.normallookup(lfile)
443 lfdirstate.normallookup(lfile)
442 return None # don't try to set the mode
444 return None # don't try to set the mode
443 ret = 1
445 ret = 1
444 mode = os.stat(absstandin).st_mode
446 mode = os.stat(absstandin).st_mode
445 if mode != os.stat(abslfile).st_mode:
447 if mode != os.stat(abslfile).st_mode:
446 os.chmod(abslfile, mode)
448 os.chmod(abslfile, mode)
447 ret = 1
449 ret = 1
448 else:
450 else:
449 # Remove lfiles for which the standin is deleted, unless the
451 # Remove lfiles for which the standin is deleted, unless the
450 # lfile is added to the repository again. This happens when a
452 # lfile is added to the repository again. This happens when a
451 # largefile is converted back to a normal file: the standin
453 # largefile is converted back to a normal file: the standin
452 # disappears, but a new (normal) file appears as the lfile.
454 # disappears, but a new (normal) file appears as the lfile.
453 if os.path.exists(abslfile) and lfile not in repo[None]:
455 if os.path.exists(abslfile) and lfile not in repo[None]:
454 os.unlink(abslfile)
456 os.unlink(abslfile)
455 ret = -1
457 ret = -1
456 state = repo.dirstate[lfutil.standin(lfile)]
458 state = repo.dirstate[lfutil.standin(lfile)]
457 if state == 'n':
459 if state == 'n':
458 # When rebasing, we need to synchronize the standin and the largefile,
460 # When rebasing, we need to synchronize the standin and the largefile,
459 # because otherwise the largefile will get reverted. But for commit's
461 # because otherwise the largefile will get reverted. But for commit's
460 # sake, we have to mark the file as unclean.
462 # sake, we have to mark the file as unclean.
461 if getattr(repo, "_isrebasing", False):
463 if getattr(repo, "_isrebasing", False):
462 lfdirstate.normallookup(lfile)
464 lfdirstate.normallookup(lfile)
463 else:
465 else:
464 lfdirstate.normal(lfile)
466 lfdirstate.normal(lfile)
465 elif state == 'r':
467 elif state == 'r':
466 lfdirstate.remove(lfile)
468 lfdirstate.remove(lfile)
467 elif state == 'a':
469 elif state == 'a':
468 lfdirstate.add(lfile)
470 lfdirstate.add(lfile)
469 elif state == '?':
471 elif state == '?':
470 lfdirstate.drop(lfile)
472 lfdirstate.drop(lfile)
471 return ret
473 return ret
472
474
473 # -- hg commands declarations ------------------------------------------------
475 # -- hg commands declarations ------------------------------------------------
474
476
475 cmdtable = {
477 cmdtable = {
476 'lfconvert': (lfconvert,
478 'lfconvert': (lfconvert,
477 [('s', 'size', '',
479 [('s', 'size', '',
478 _('minimum size (MB) for files to be converted '
480 _('minimum size (MB) for files to be converted '
479 'as largefiles'),
481 'as largefiles'),
480 'SIZE'),
482 'SIZE'),
481 ('', 'to-normal', False,
483 ('', 'to-normal', False,
482 _('convert from a largefiles repo to a normal repo')),
484 _('convert from a largefiles repo to a normal repo')),
483 ],
485 ],
484 _('hg lfconvert SOURCE DEST [FILE ...]')),
486 _('hg lfconvert SOURCE DEST [FILE ...]')),
485 }
487 }
General Comments 0
You need to be logged in to leave comments. Login now