##// END OF EJS Templates
largefiles: correctly handle dirstate status when rebasing...
Na'Tosha Bard -
r15793:3ef07ecd default
parent child Browse files
Show More
@@ -1,479 +1,485 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
23 '''convert a normal repository to a largefiles repository
24
24
25 Convert repository SOURCE to a new repository DEST, identical to
25 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
26 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
27 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
28 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
29 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
30 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
31 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
32 configuration as ``largefiles.size``.
33
33
34 After running this command you will need to make sure that
34 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
35 largefiles is enabled anywhere you intend to push the new
36 repository.
36 repository.
37
37
38 Use --to-normal to convert largefiles back to normal files; after
38 Use --to-normal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
39 this, the DEST repository can be used without largefiles at all.'''
40
40
41 if opts['to_normal']:
41 if opts['to_normal']:
42 tolfile = False
42 tolfile = False
43 else:
43 else:
44 tolfile = True
44 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46
46
47 if not hg.islocal(src):
47 if not hg.islocal(src):
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 if not hg.islocal(dest):
49 if not hg.islocal(dest):
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51
51
52 rsrc = hg.repository(ui, src)
52 rsrc = hg.repository(ui, src)
53 ui.status(_('initializing destination %s\n') % dest)
53 ui.status(_('initializing destination %s\n') % dest)
54 rdst = hg.repository(ui, dest, create=True)
54 rdst = hg.repository(ui, dest, create=True)
55
55
56 success = False
56 success = False
57 try:
57 try:
58 # Lock destination to prevent modification while it is converted to.
58 # Lock destination to prevent modification while it is converted to.
59 # Don't need to lock src because we are just reading from its history
59 # Don't need to lock src because we are just reading from its history
60 # which can't change.
60 # which can't change.
61 dst_lock = rdst.lock()
61 dst_lock = rdst.lock()
62
62
63 # Get a list of all changesets in the source. The easy way to do this
63 # Get a list of all changesets in the source. The easy way to do this
64 # is to simply walk the changelog, using changelog.nodesbewteen().
64 # is to simply walk the changelog, using changelog.nodesbewteen().
65 # Take a look at mercurial/revlog.py:639 for more details.
65 # Take a look at mercurial/revlog.py:639 for more details.
66 # Use a generator instead of a list to decrease memory usage
66 # Use a generator instead of a list to decrease memory usage
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
68 rsrc.heads())[0])
68 rsrc.heads())[0])
69 revmap = {node.nullid: node.nullid}
69 revmap = {node.nullid: node.nullid}
70 if tolfile:
70 if tolfile:
71 lfiles = set()
71 lfiles = set()
72 normalfiles = set()
72 normalfiles = set()
73 if not pats:
73 if not pats:
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
75 if pats:
75 if pats:
76 matcher = match_.match(rsrc.root, '', list(pats))
76 matcher = match_.match(rsrc.root, '', list(pats))
77 else:
77 else:
78 matcher = None
78 matcher = None
79
79
80 lfiletohash = {}
80 lfiletohash = {}
81 for ctx in ctxs:
81 for ctx in ctxs:
82 ui.progress(_('converting revisions'), ctx.rev(),
82 ui.progress(_('converting revisions'), ctx.rev(),
83 unit=_('revision'), total=rsrc['tip'].rev())
83 unit=_('revision'), total=rsrc['tip'].rev())
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
85 lfiles, normalfiles, matcher, size, lfiletohash)
85 lfiles, normalfiles, matcher, size, lfiletohash)
86 ui.progress(_('converting revisions'), None)
86 ui.progress(_('converting revisions'), None)
87
87
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
90
90
91 for f in lfiletohash.keys():
91 for f in lfiletohash.keys():
92 if os.path.isfile(rdst.wjoin(f)):
92 if os.path.isfile(rdst.wjoin(f)):
93 os.unlink(rdst.wjoin(f))
93 os.unlink(rdst.wjoin(f))
94 try:
94 try:
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
96 except OSError:
96 except OSError:
97 pass
97 pass
98
98
99 # If there were any files converted to largefiles, add largefiles
99 # If there were any files converted to largefiles, add largefiles
100 # to the destination repository's requirements.
100 # to the destination repository's requirements.
101 if lfiles:
101 if lfiles:
102 rdst.requirements.add('largefiles')
102 rdst.requirements.add('largefiles')
103 rdst._writerequirements()
103 rdst._writerequirements()
104 else:
104 else:
105 for ctx in ctxs:
105 for ctx in ctxs:
106 ui.progress(_('converting revisions'), ctx.rev(),
106 ui.progress(_('converting revisions'), ctx.rev(),
107 unit=_('revision'), total=rsrc['tip'].rev())
107 unit=_('revision'), total=rsrc['tip'].rev())
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
109
109
110 ui.progress(_('converting revisions'), None)
110 ui.progress(_('converting revisions'), None)
111 success = True
111 success = True
112 finally:
112 finally:
113 if not success:
113 if not success:
114 # we failed, remove the new directory
114 # we failed, remove the new directory
115 shutil.rmtree(rdst.root)
115 shutil.rmtree(rdst.root)
116 dst_lock.release()
116 dst_lock.release()
117
117
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
119 # Convert src parents to dst parents
119 # Convert src parents to dst parents
120 parents = []
120 parents = []
121 for p in ctx.parents():
121 for p in ctx.parents():
122 parents.append(revmap[p.node()])
122 parents.append(revmap[p.node()])
123 while len(parents) < 2:
123 while len(parents) < 2:
124 parents.append(node.nullid)
124 parents.append(node.nullid)
125
125
126 # Generate list of changed files
126 # Generate list of changed files
127 files = set(ctx.files())
127 files = set(ctx.files())
128 if node.nullid not in parents:
128 if node.nullid not in parents:
129 mc = ctx.manifest()
129 mc = ctx.manifest()
130 mp1 = ctx.parents()[0].manifest()
130 mp1 = ctx.parents()[0].manifest()
131 mp2 = ctx.parents()[1].manifest()
131 mp2 = ctx.parents()[1].manifest()
132 files |= (set(mp1) | set(mp2)) - set(mc)
132 files |= (set(mp1) | set(mp2)) - set(mc)
133 for f in mc:
133 for f in mc:
134 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
134 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
135 files.add(f)
135 files.add(f)
136
136
137 def getfilectx(repo, memctx, f):
137 def getfilectx(repo, memctx, f):
138 if lfutil.standin(f) in files:
138 if lfutil.standin(f) in files:
139 # if the file isn't in the manifest then it was removed
139 # if the file isn't in the manifest then it was removed
140 # or renamed, raise IOError to indicate this
140 # or renamed, raise IOError to indicate this
141 try:
141 try:
142 fctx = ctx.filectx(lfutil.standin(f))
142 fctx = ctx.filectx(lfutil.standin(f))
143 except error.LookupError:
143 except error.LookupError:
144 raise IOError()
144 raise IOError()
145 renamed = fctx.renamed()
145 renamed = fctx.renamed()
146 if renamed:
146 if renamed:
147 renamed = lfutil.splitstandin(renamed[0])
147 renamed = lfutil.splitstandin(renamed[0])
148
148
149 hash = fctx.data().strip()
149 hash = fctx.data().strip()
150 path = lfutil.findfile(rsrc, hash)
150 path = lfutil.findfile(rsrc, hash)
151 ### TODO: What if the file is not cached?
151 ### TODO: What if the file is not cached?
152 data = ''
152 data = ''
153 fd = None
153 fd = None
154 try:
154 try:
155 fd = open(path, 'rb')
155 fd = open(path, 'rb')
156 data = fd.read()
156 data = fd.read()
157 finally:
157 finally:
158 if fd:
158 if fd:
159 fd.close()
159 fd.close()
160 return context.memfilectx(f, data, 'l' in fctx.flags(),
160 return context.memfilectx(f, data, 'l' in fctx.flags(),
161 'x' in fctx.flags(), renamed)
161 'x' in fctx.flags(), renamed)
162 else:
162 else:
163 try:
163 try:
164 fctx = ctx.filectx(f)
164 fctx = ctx.filectx(f)
165 except error.LookupError:
165 except error.LookupError:
166 raise IOError()
166 raise IOError()
167 renamed = fctx.renamed()
167 renamed = fctx.renamed()
168 if renamed:
168 if renamed:
169 renamed = renamed[0]
169 renamed = renamed[0]
170 data = fctx.data()
170 data = fctx.data()
171 if f == '.hgtags':
171 if f == '.hgtags':
172 newdata = []
172 newdata = []
173 for line in data.splitlines():
173 for line in data.splitlines():
174 id, name = line.split(' ', 1)
174 id, name = line.split(' ', 1)
175 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
175 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
176 name))
176 name))
177 data = ''.join(newdata)
177 data = ''.join(newdata)
178 return context.memfilectx(f, data, 'l' in fctx.flags(),
178 return context.memfilectx(f, data, 'l' in fctx.flags(),
179 'x' in fctx.flags(), renamed)
179 'x' in fctx.flags(), renamed)
180
180
181 dstfiles = []
181 dstfiles = []
182 for file in files:
182 for file in files:
183 if lfutil.isstandin(file):
183 if lfutil.isstandin(file):
184 dstfiles.append(lfutil.splitstandin(file))
184 dstfiles.append(lfutil.splitstandin(file))
185 else:
185 else:
186 dstfiles.append(file)
186 dstfiles.append(file)
187 # Commit
187 # Commit
188 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
188 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
189 getfilectx, ctx.user(), ctx.date(), ctx.extra())
189 getfilectx, ctx.user(), ctx.date(), ctx.extra())
190 ret = rdst.commitctx(mctx)
190 ret = rdst.commitctx(mctx)
191 rdst.dirstate.setparents(ret)
191 rdst.dirstate.setparents(ret)
192 revmap[ctx.node()] = rdst.changelog.tip()
192 revmap[ctx.node()] = rdst.changelog.tip()
193
193
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
195 matcher, size, lfiletohash):
195 matcher, size, lfiletohash):
196 # Convert src parents to dst parents
196 # Convert src parents to dst parents
197 parents = []
197 parents = []
198 for p in ctx.parents():
198 for p in ctx.parents():
199 parents.append(revmap[p.node()])
199 parents.append(revmap[p.node()])
200 while len(parents) < 2:
200 while len(parents) < 2:
201 parents.append(node.nullid)
201 parents.append(node.nullid)
202
202
203 # Generate list of changed files
203 # Generate list of changed files
204 files = set(ctx.files())
204 files = set(ctx.files())
205 if node.nullid not in parents:
205 if node.nullid not in parents:
206 mc = ctx.manifest()
206 mc = ctx.manifest()
207 mp1 = ctx.parents()[0].manifest()
207 mp1 = ctx.parents()[0].manifest()
208 mp2 = ctx.parents()[1].manifest()
208 mp2 = ctx.parents()[1].manifest()
209 files |= (set(mp1) | set(mp2)) - set(mc)
209 files |= (set(mp1) | set(mp2)) - set(mc)
210 for f in mc:
210 for f in mc:
211 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
211 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
212 files.add(f)
212 files.add(f)
213
213
214 dstfiles = []
214 dstfiles = []
215 for f in files:
215 for f in files:
216 if f not in lfiles and f not in normalfiles:
216 if f not in lfiles and f not in normalfiles:
217 islfile = _islfile(f, ctx, matcher, size)
217 islfile = _islfile(f, ctx, matcher, size)
218 # If this file was renamed or copied then copy
218 # If this file was renamed or copied then copy
219 # the lfileness of its predecessor
219 # the lfileness of its predecessor
220 if f in ctx.manifest():
220 if f in ctx.manifest():
221 fctx = ctx.filectx(f)
221 fctx = ctx.filectx(f)
222 renamed = fctx.renamed()
222 renamed = fctx.renamed()
223 renamedlfile = renamed and renamed[0] in lfiles
223 renamedlfile = renamed and renamed[0] in lfiles
224 islfile |= renamedlfile
224 islfile |= renamedlfile
225 if 'l' in fctx.flags():
225 if 'l' in fctx.flags():
226 if renamedlfile:
226 if renamedlfile:
227 raise util.Abort(
227 raise util.Abort(
228 _('renamed/copied largefile %s becomes symlink')
228 _('renamed/copied largefile %s becomes symlink')
229 % f)
229 % f)
230 islfile = False
230 islfile = False
231 if islfile:
231 if islfile:
232 lfiles.add(f)
232 lfiles.add(f)
233 else:
233 else:
234 normalfiles.add(f)
234 normalfiles.add(f)
235
235
236 if f in lfiles:
236 if f in lfiles:
237 dstfiles.append(lfutil.standin(f))
237 dstfiles.append(lfutil.standin(f))
238 # largefile in manifest if it has not been removed/renamed
238 # largefile in manifest if it has not been removed/renamed
239 if f in ctx.manifest():
239 if f in ctx.manifest():
240 if 'l' in ctx.filectx(f).flags():
240 if 'l' in ctx.filectx(f).flags():
241 if renamed and renamed[0] in lfiles:
241 if renamed and renamed[0] in lfiles:
242 raise util.Abort(_('largefile %s becomes symlink') % f)
242 raise util.Abort(_('largefile %s becomes symlink') % f)
243
243
244 # largefile was modified, update standins
244 # largefile was modified, update standins
245 fullpath = rdst.wjoin(f)
245 fullpath = rdst.wjoin(f)
246 util.makedirs(os.path.dirname(fullpath))
246 util.makedirs(os.path.dirname(fullpath))
247 m = util.sha1('')
247 m = util.sha1('')
248 m.update(ctx[f].data())
248 m.update(ctx[f].data())
249 hash = m.hexdigest()
249 hash = m.hexdigest()
250 if f not in lfiletohash or lfiletohash[f] != hash:
250 if f not in lfiletohash or lfiletohash[f] != hash:
251 try:
251 try:
252 fd = open(fullpath, 'wb')
252 fd = open(fullpath, 'wb')
253 fd.write(ctx[f].data())
253 fd.write(ctx[f].data())
254 finally:
254 finally:
255 if fd:
255 if fd:
256 fd.close()
256 fd.close()
257 executable = 'x' in ctx[f].flags()
257 executable = 'x' in ctx[f].flags()
258 os.chmod(fullpath, lfutil.getmode(executable))
258 os.chmod(fullpath, lfutil.getmode(executable))
259 lfutil.writestandin(rdst, lfutil.standin(f), hash,
259 lfutil.writestandin(rdst, lfutil.standin(f), hash,
260 executable)
260 executable)
261 lfiletohash[f] = hash
261 lfiletohash[f] = hash
262 else:
262 else:
263 # normal file
263 # normal file
264 dstfiles.append(f)
264 dstfiles.append(f)
265
265
266 def getfilectx(repo, memctx, f):
266 def getfilectx(repo, memctx, f):
267 if lfutil.isstandin(f):
267 if lfutil.isstandin(f):
268 # if the file isn't in the manifest then it was removed
268 # if the file isn't in the manifest then it was removed
269 # or renamed, raise IOError to indicate this
269 # or renamed, raise IOError to indicate this
270 srcfname = lfutil.splitstandin(f)
270 srcfname = lfutil.splitstandin(f)
271 try:
271 try:
272 fctx = ctx.filectx(srcfname)
272 fctx = ctx.filectx(srcfname)
273 except error.LookupError:
273 except error.LookupError:
274 raise IOError()
274 raise IOError()
275 renamed = fctx.renamed()
275 renamed = fctx.renamed()
276 if renamed:
276 if renamed:
277 # standin is always a largefile because largefile-ness
277 # standin is always a largefile because largefile-ness
278 # doesn't change after rename or copy
278 # doesn't change after rename or copy
279 renamed = lfutil.standin(renamed[0])
279 renamed = lfutil.standin(renamed[0])
280
280
281 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
281 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
282 fctx.flags(), 'x' in fctx.flags(), renamed)
282 fctx.flags(), 'x' in fctx.flags(), renamed)
283 else:
283 else:
284 try:
284 try:
285 fctx = ctx.filectx(f)
285 fctx = ctx.filectx(f)
286 except error.LookupError:
286 except error.LookupError:
287 raise IOError()
287 raise IOError()
288 renamed = fctx.renamed()
288 renamed = fctx.renamed()
289 if renamed:
289 if renamed:
290 renamed = renamed[0]
290 renamed = renamed[0]
291
291
292 data = fctx.data()
292 data = fctx.data()
293 if f == '.hgtags':
293 if f == '.hgtags':
294 newdata = []
294 newdata = []
295 for line in data.splitlines():
295 for line in data.splitlines():
296 id, name = line.split(' ', 1)
296 id, name = line.split(' ', 1)
297 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
297 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
298 name))
298 name))
299 data = ''.join(newdata)
299 data = ''.join(newdata)
300 return context.memfilectx(f, data, 'l' in fctx.flags(),
300 return context.memfilectx(f, data, 'l' in fctx.flags(),
301 'x' in fctx.flags(), renamed)
301 'x' in fctx.flags(), renamed)
302
302
303 # Commit
303 # Commit
304 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
304 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
305 getfilectx, ctx.user(), ctx.date(), ctx.extra())
305 getfilectx, ctx.user(), ctx.date(), ctx.extra())
306 ret = rdst.commitctx(mctx)
306 ret = rdst.commitctx(mctx)
307 rdst.dirstate.setparents(ret)
307 rdst.dirstate.setparents(ret)
308 revmap[ctx.node()] = rdst.changelog.tip()
308 revmap[ctx.node()] = rdst.changelog.tip()
309
309
310 def _islfile(file, ctx, matcher, size):
310 def _islfile(file, ctx, matcher, size):
311 '''Return true if file should be considered a largefile, i.e.
311 '''Return true if file should be considered a largefile, i.e.
312 matcher matches it or it is larger than size.'''
312 matcher matches it or it is larger than size.'''
313 # never store special .hg* files as largefiles
313 # never store special .hg* files as largefiles
314 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
314 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
315 return False
315 return False
316 if matcher and matcher(file):
316 if matcher and matcher(file):
317 return True
317 return True
318 try:
318 try:
319 return ctx.filectx(file).size() >= size * 1024 * 1024
319 return ctx.filectx(file).size() >= size * 1024 * 1024
320 except error.LookupError:
320 except error.LookupError:
321 return False
321 return False
322
322
323 def uploadlfiles(ui, rsrc, rdst, files):
323 def uploadlfiles(ui, rsrc, rdst, files):
324 '''upload largefiles to the central store'''
324 '''upload largefiles to the central store'''
325
325
326 if not files:
326 if not files:
327 return
327 return
328
328
329 store = basestore._openstore(rsrc, rdst, put=True)
329 store = basestore._openstore(rsrc, rdst, put=True)
330
330
331 at = 0
331 at = 0
332 files = filter(lambda h: not store.exists(h), files)
332 files = filter(lambda h: not store.exists(h), files)
333 for hash in files:
333 for hash in files:
334 ui.progress(_('uploading largefiles'), at, unit='largefile',
334 ui.progress(_('uploading largefiles'), at, unit='largefile',
335 total=len(files))
335 total=len(files))
336 source = lfutil.findfile(rsrc, hash)
336 source = lfutil.findfile(rsrc, hash)
337 if not source:
337 if not source:
338 raise util.Abort(_('largefile %s missing from store'
338 raise util.Abort(_('largefile %s missing from store'
339 ' (needs to be uploaded)') % hash)
339 ' (needs to be uploaded)') % hash)
340 # XXX check for errors here
340 # XXX check for errors here
341 store.put(source, hash)
341 store.put(source, hash)
342 at += 1
342 at += 1
343 ui.progress(_('uploading largefiles'), None)
343 ui.progress(_('uploading largefiles'), None)
344
344
345 def verifylfiles(ui, repo, all=False, contents=False):
345 def verifylfiles(ui, repo, all=False, contents=False):
346 '''Verify that every big file revision in the current changeset
346 '''Verify that every big file revision in the current changeset
347 exists in the central store. With --contents, also verify that
347 exists in the central store. With --contents, also verify that
348 the contents of each big file revision are correct (SHA-1 hash
348 the contents of each big file revision are correct (SHA-1 hash
349 matches the revision ID). With --all, check every changeset in
349 matches the revision ID). With --all, check every changeset in
350 this repository.'''
350 this repository.'''
351 if all:
351 if all:
352 # Pass a list to the function rather than an iterator because we know a
352 # Pass a list to the function rather than an iterator because we know a
353 # list will work.
353 # list will work.
354 revs = range(len(repo))
354 revs = range(len(repo))
355 else:
355 else:
356 revs = ['.']
356 revs = ['.']
357
357
358 store = basestore._openstore(repo)
358 store = basestore._openstore(repo)
359 return store.verify(revs, contents=contents)
359 return store.verify(revs, contents=contents)
360
360
361 def cachelfiles(ui, repo, node):
361 def cachelfiles(ui, repo, node):
362 '''cachelfiles ensures that all largefiles needed by the specified revision
362 '''cachelfiles ensures that all largefiles needed by the specified revision
363 are present in the repository's largefile cache.
363 are present in the repository's largefile cache.
364
364
365 returns a tuple (cached, missing). cached is the list of files downloaded
365 returns a tuple (cached, missing). cached is the list of files downloaded
366 by this operation; missing is the list of files that were needed but could
366 by this operation; missing is the list of files that were needed but could
367 not be found.'''
367 not be found.'''
368 lfiles = lfutil.listlfiles(repo, node)
368 lfiles = lfutil.listlfiles(repo, node)
369 toget = []
369 toget = []
370
370
371 for lfile in lfiles:
371 for lfile in lfiles:
372 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
372 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
373 # if it exists and its hash matches, it might have been locally
373 # if it exists and its hash matches, it might have been locally
374 # modified before updating and the user chose 'local'. in this case,
374 # modified before updating and the user chose 'local'. in this case,
375 # it will not be in any store, so don't look for it.
375 # it will not be in any store, so don't look for it.
376 if ((not os.path.exists(repo.wjoin(lfile)) or
376 if ((not os.path.exists(repo.wjoin(lfile)) or
377 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
377 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
378 not lfutil.findfile(repo, expectedhash)):
378 not lfutil.findfile(repo, expectedhash)):
379 toget.append((lfile, expectedhash))
379 toget.append((lfile, expectedhash))
380
380
381 if toget:
381 if toget:
382 store = basestore._openstore(repo)
382 store = basestore._openstore(repo)
383 ret = store.get(toget)
383 ret = store.get(toget)
384 return ret
384 return ret
385
385
386 return ([], [])
386 return ([], [])
387
387
388 def updatelfiles(ui, repo, filelist=None, printmessage=True):
388 def updatelfiles(ui, repo, filelist=None, printmessage=True):
389 wlock = repo.wlock()
389 wlock = repo.wlock()
390 try:
390 try:
391 lfdirstate = lfutil.openlfdirstate(ui, repo)
391 lfdirstate = lfutil.openlfdirstate(ui, repo)
392 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
392 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
393
393
394 if filelist is not None:
394 if filelist is not None:
395 lfiles = [f for f in lfiles if f in filelist]
395 lfiles = [f for f in lfiles if f in filelist]
396
396
397 printed = False
397 printed = False
398 if printmessage and lfiles:
398 if printmessage and lfiles:
399 ui.status(_('getting changed largefiles\n'))
399 ui.status(_('getting changed largefiles\n'))
400 printed = True
400 printed = True
401 cachelfiles(ui, repo, '.')
401 cachelfiles(ui, repo, '.')
402
402
403 updated, removed = 0, 0
403 updated, removed = 0, 0
404 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
404 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
405 # increment the appropriate counter according to _updatelfile's
405 # increment the appropriate counter according to _updatelfile's
406 # return value
406 # return value
407 updated += i > 0 and i or 0
407 updated += i > 0 and i or 0
408 removed -= i < 0 and i or 0
408 removed -= i < 0 and i or 0
409 if printmessage and (removed or updated) and not printed:
409 if printmessage and (removed or updated) and not printed:
410 ui.status(_('getting changed largefiles\n'))
410 ui.status(_('getting changed largefiles\n'))
411 printed = True
411 printed = True
412
412
413 lfdirstate.write()
413 lfdirstate.write()
414 if printed and printmessage:
414 if printed and printmessage:
415 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
415 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
416 removed))
416 removed))
417 finally:
417 finally:
418 wlock.release()
418 wlock.release()
419
419
420 def _updatelfile(repo, lfdirstate, lfile):
420 def _updatelfile(repo, lfdirstate, lfile):
421 '''updates a single largefile and copies the state of its standin from
421 '''updates a single largefile and copies the state of its standin from
422 the repository's dirstate to its state in the lfdirstate.
422 the repository's dirstate to its state in the lfdirstate.
423
423
424 returns 1 if the file was modified, -1 if the file was removed, 0 if the
424 returns 1 if the file was modified, -1 if the file was removed, 0 if the
425 file was unchanged, and None if the needed largefile was missing from the
425 file was unchanged, and None if the needed largefile was missing from the
426 cache.'''
426 cache.'''
427 ret = 0
427 ret = 0
428 abslfile = repo.wjoin(lfile)
428 abslfile = repo.wjoin(lfile)
429 absstandin = repo.wjoin(lfutil.standin(lfile))
429 absstandin = repo.wjoin(lfutil.standin(lfile))
430 if os.path.exists(absstandin):
430 if os.path.exists(absstandin):
431 if os.path.exists(absstandin+'.orig'):
431 if os.path.exists(absstandin+'.orig'):
432 shutil.copyfile(abslfile, abslfile+'.orig')
432 shutil.copyfile(abslfile, abslfile+'.orig')
433 expecthash = lfutil.readstandin(repo, lfile)
433 expecthash = lfutil.readstandin(repo, lfile)
434 if (expecthash != '' and
434 if (expecthash != '' and
435 (not os.path.exists(abslfile) or
435 (not os.path.exists(abslfile) or
436 expecthash != lfutil.hashfile(abslfile))):
436 expecthash != lfutil.hashfile(abslfile))):
437 if not lfutil.copyfromcache(repo, expecthash, lfile):
437 if not lfutil.copyfromcache(repo, expecthash, lfile):
438 # use normallookup() to allocate entry in largefiles dirstate,
438 # use normallookup() to allocate entry in largefiles dirstate,
439 # because lack of it misleads lfiles_repo.status() into
439 # because lack of it misleads lfiles_repo.status() into
440 # recognition that such cache missing files are REMOVED.
440 # recognition that such cache missing files are REMOVED.
441 lfdirstate.normallookup(lfile)
441 lfdirstate.normallookup(lfile)
442 return None # don't try to set the mode
442 return None # don't try to set the mode
443 ret = 1
443 ret = 1
444 mode = os.stat(absstandin).st_mode
444 mode = os.stat(absstandin).st_mode
445 if mode != os.stat(abslfile).st_mode:
445 if mode != os.stat(abslfile).st_mode:
446 os.chmod(abslfile, mode)
446 os.chmod(abslfile, mode)
447 ret = 1
447 ret = 1
448 else:
448 else:
449 # Remove lfiles for which the standin is deleted, unless the
449 # Remove lfiles for which the standin is deleted, unless the
450 # lfile is added to the repository again. This happens when a
450 # lfile is added to the repository again. This happens when a
451 # largefile is converted back to a normal file: the standin
451 # largefile is converted back to a normal file: the standin
452 # disappears, but a new (normal) file appears as the lfile.
452 # disappears, but a new (normal) file appears as the lfile.
453 if os.path.exists(abslfile) and lfile not in repo[None]:
453 if os.path.exists(abslfile) and lfile not in repo[None]:
454 os.unlink(abslfile)
454 os.unlink(abslfile)
455 ret = -1
455 ret = -1
456 state = repo.dirstate[lfutil.standin(lfile)]
456 state = repo.dirstate[lfutil.standin(lfile)]
457 if state == 'n':
457 if state == 'n':
458 lfdirstate.normal(lfile)
458 # When rebasing, we need to synchronize the standin and the largefile,
459 # because otherwise the largefile will get reverted. But for commit's
460 # sake, we have to mark the file as unclean.
461 if getattr(repo, "_isrebasing", False):
462 lfdirstate.normallookup(lfile)
463 else:
464 lfdirstate.normal(lfile)
459 elif state == 'r':
465 elif state == 'r':
460 lfdirstate.remove(lfile)
466 lfdirstate.remove(lfile)
461 elif state == 'a':
467 elif state == 'a':
462 lfdirstate.add(lfile)
468 lfdirstate.add(lfile)
463 elif state == '?':
469 elif state == '?':
464 lfdirstate.drop(lfile)
470 lfdirstate.drop(lfile)
465 return ret
471 return ret
466
472
467 # -- hg commands declarations ------------------------------------------------
473 # -- hg commands declarations ------------------------------------------------
468
474
469 cmdtable = {
475 cmdtable = {
470 'lfconvert': (lfconvert,
476 'lfconvert': (lfconvert,
471 [('s', 'size', '',
477 [('s', 'size', '',
472 _('minimum size (MB) for files to be converted '
478 _('minimum size (MB) for files to be converted '
473 'as largefiles'),
479 'as largefiles'),
474 'SIZE'),
480 'SIZE'),
475 ('', 'to-normal', False,
481 ('', 'to-normal', False,
476 _('convert from a largefiles repo to a normal repo')),
482 _('convert from a largefiles repo to a normal repo')),
477 ],
483 ],
478 _('hg lfconvert SOURCE DEST [FILE ...]')),
484 _('hg lfconvert SOURCE DEST [FILE ...]')),
479 }
485 }
@@ -1,460 +1,462 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import tempfile
16 import tempfile
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on atomic copy
80 # if hardlinks fail, fallback on atomic copy
81 dst = util.atomictempfile(dest)
81 dst = util.atomictempfile(dest)
82 for chunk in util.filechunkiter(open(src, 'rb')):
82 for chunk in util.filechunkiter(open(src, 'rb')):
83 dst.write(chunk)
83 dst.write(chunk)
84 dst.close()
84 dst.close()
85 os.chmod(dest, os.stat(src).st_mode)
85 os.chmod(dest, os.stat(src).st_mode)
86
86
87 def usercachepath(ui, hash):
87 def usercachepath(ui, hash):
88 path = ui.configpath(longname, 'usercache', None)
88 path = ui.configpath(longname, 'usercache', None)
89 if path:
89 if path:
90 path = os.path.join(path, hash)
90 path = os.path.join(path, hash)
91 else:
91 else:
92 if os.name == 'nt':
92 if os.name == 'nt':
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 if appdata:
94 if appdata:
95 path = os.path.join(appdata, longname, hash)
95 path = os.path.join(appdata, longname, hash)
96 elif platform.system() == 'Darwin':
96 elif platform.system() == 'Darwin':
97 home = os.getenv('HOME')
97 home = os.getenv('HOME')
98 if home:
98 if home:
99 path = os.path.join(home, 'Library', 'Caches',
99 path = os.path.join(home, 'Library', 'Caches',
100 longname, hash)
100 longname, hash)
101 elif os.name == 'posix':
101 elif os.name == 'posix':
102 path = os.getenv('XDG_CACHE_HOME')
102 path = os.getenv('XDG_CACHE_HOME')
103 if path:
103 if path:
104 path = os.path.join(path, longname, hash)
104 path = os.path.join(path, longname, hash)
105 else:
105 else:
106 home = os.getenv('HOME')
106 home = os.getenv('HOME')
107 if home:
107 if home:
108 path = os.path.join(home, '.cache', longname, hash)
108 path = os.path.join(home, '.cache', longname, hash)
109 else:
109 else:
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
111 return path
111 return path
112
112
113 def inusercache(ui, hash):
113 def inusercache(ui, hash):
114 path = usercachepath(ui, hash)
114 path = usercachepath(ui, hash)
115 return path and os.path.exists(path)
115 return path and os.path.exists(path)
116
116
117 def findfile(repo, hash):
117 def findfile(repo, hash):
118 if instore(repo, hash):
118 if instore(repo, hash):
119 repo.ui.note(_('Found %s in store\n') % hash)
119 repo.ui.note(_('Found %s in store\n') % hash)
120 elif inusercache(repo.ui, hash):
120 elif inusercache(repo.ui, hash):
121 repo.ui.note(_('Found %s in system cache\n') % hash)
121 repo.ui.note(_('Found %s in system cache\n') % hash)
122 path = storepath(repo, hash)
122 path = storepath(repo, hash)
123 util.makedirs(os.path.dirname(path))
123 util.makedirs(os.path.dirname(path))
124 link(usercachepath(repo.ui, hash), path)
124 link(usercachepath(repo.ui, hash), path)
125 else:
125 else:
126 return None
126 return None
127 return storepath(repo, hash)
127 return storepath(repo, hash)
128
128
129 class largefiles_dirstate(dirstate.dirstate):
129 class largefiles_dirstate(dirstate.dirstate):
130 def __getitem__(self, key):
130 def __getitem__(self, key):
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
132 def normal(self, f):
132 def normal(self, f):
133 return super(largefiles_dirstate, self).normal(unixpath(f))
133 return super(largefiles_dirstate, self).normal(unixpath(f))
134 def remove(self, f):
134 def remove(self, f):
135 return super(largefiles_dirstate, self).remove(unixpath(f))
135 return super(largefiles_dirstate, self).remove(unixpath(f))
136 def add(self, f):
136 def add(self, f):
137 return super(largefiles_dirstate, self).add(unixpath(f))
137 return super(largefiles_dirstate, self).add(unixpath(f))
138 def drop(self, f):
138 def drop(self, f):
139 return super(largefiles_dirstate, self).drop(unixpath(f))
139 return super(largefiles_dirstate, self).drop(unixpath(f))
140 def forget(self, f):
140 def forget(self, f):
141 return super(largefiles_dirstate, self).forget(unixpath(f))
141 return super(largefiles_dirstate, self).forget(unixpath(f))
142 def normallookup(self, f):
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
142
144
143 def openlfdirstate(ui, repo):
145 def openlfdirstate(ui, repo):
144 '''
146 '''
145 Return a dirstate object that tracks largefiles: i.e. its root is
147 Return a dirstate object that tracks largefiles: i.e. its root is
146 the repo root, but it is saved in .hg/largefiles/dirstate.
148 the repo root, but it is saved in .hg/largefiles/dirstate.
147 '''
149 '''
148 admin = repo.join(longname)
150 admin = repo.join(longname)
149 opener = scmutil.opener(admin)
151 opener = scmutil.opener(admin)
150 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
151 repo.dirstate._validate)
153 repo.dirstate._validate)
152
154
153 # If the largefiles dirstate does not exist, populate and create
155 # If the largefiles dirstate does not exist, populate and create
154 # it. This ensures that we create it on the first meaningful
156 # it. This ensures that we create it on the first meaningful
155 # largefiles operation in a new clone. It also gives us an easy
157 # largefiles operation in a new clone. It also gives us an easy
156 # way to forcibly rebuild largefiles state:
158 # way to forcibly rebuild largefiles state:
157 # rm .hg/largefiles/dirstate && hg status
159 # rm .hg/largefiles/dirstate && hg status
158 # Or even, if things are really messed up:
160 # Or even, if things are really messed up:
159 # rm -rf .hg/largefiles && hg status
161 # rm -rf .hg/largefiles && hg status
160 if not os.path.exists(os.path.join(admin, 'dirstate')):
162 if not os.path.exists(os.path.join(admin, 'dirstate')):
161 util.makedirs(admin)
163 util.makedirs(admin)
162 matcher = getstandinmatcher(repo)
164 matcher = getstandinmatcher(repo)
163 for standin in dirstate_walk(repo.dirstate, matcher):
165 for standin in dirstate_walk(repo.dirstate, matcher):
164 lfile = splitstandin(standin)
166 lfile = splitstandin(standin)
165 hash = readstandin(repo, lfile)
167 hash = readstandin(repo, lfile)
166 lfdirstate.normallookup(lfile)
168 lfdirstate.normallookup(lfile)
167 try:
169 try:
168 if hash == hashfile(repo.wjoin(lfile)):
170 if hash == hashfile(repo.wjoin(lfile)):
169 lfdirstate.normal(lfile)
171 lfdirstate.normal(lfile)
170 except OSError, err:
172 except OSError, err:
171 if err.errno != errno.ENOENT:
173 if err.errno != errno.ENOENT:
172 raise
174 raise
173
175
174 lfdirstate.write()
176 lfdirstate.write()
175
177
176 return lfdirstate
178 return lfdirstate
177
179
178 def lfdirstate_status(lfdirstate, repo, rev):
180 def lfdirstate_status(lfdirstate, repo, rev):
179 wlock = repo.wlock()
181 wlock = repo.wlock()
180 try:
182 try:
181 match = match_.always(repo.root, repo.getcwd())
183 match = match_.always(repo.root, repo.getcwd())
182 s = lfdirstate.status(match, [], False, False, False)
184 s = lfdirstate.status(match, [], False, False, False)
183 unsure, modified, added, removed, missing, unknown, ignored, clean = s
185 unsure, modified, added, removed, missing, unknown, ignored, clean = s
184 for lfile in unsure:
186 for lfile in unsure:
185 if repo[rev][standin(lfile)].data().strip() != \
187 if repo[rev][standin(lfile)].data().strip() != \
186 hashfile(repo.wjoin(lfile)):
188 hashfile(repo.wjoin(lfile)):
187 modified.append(lfile)
189 modified.append(lfile)
188 else:
190 else:
189 clean.append(lfile)
191 clean.append(lfile)
190 lfdirstate.normal(lfile)
192 lfdirstate.normal(lfile)
191 lfdirstate.write()
193 lfdirstate.write()
192 finally:
194 finally:
193 wlock.release()
195 wlock.release()
194 return (modified, added, removed, missing, unknown, ignored, clean)
196 return (modified, added, removed, missing, unknown, ignored, clean)
195
197
196 def listlfiles(repo, rev=None, matcher=None):
198 def listlfiles(repo, rev=None, matcher=None):
197 '''return a list of largefiles in the working copy or the
199 '''return a list of largefiles in the working copy or the
198 specified changeset'''
200 specified changeset'''
199
201
200 if matcher is None:
202 if matcher is None:
201 matcher = getstandinmatcher(repo)
203 matcher = getstandinmatcher(repo)
202
204
203 # ignore unknown files in working directory
205 # ignore unknown files in working directory
204 return [splitstandin(f)
206 return [splitstandin(f)
205 for f in repo[rev].walk(matcher)
207 for f in repo[rev].walk(matcher)
206 if rev is not None or repo.dirstate[f] != '?']
208 if rev is not None or repo.dirstate[f] != '?']
207
209
208 def instore(repo, hash):
210 def instore(repo, hash):
209 return os.path.exists(storepath(repo, hash))
211 return os.path.exists(storepath(repo, hash))
210
212
211 def storepath(repo, hash):
213 def storepath(repo, hash):
212 return repo.join(os.path.join(longname, hash))
214 return repo.join(os.path.join(longname, hash))
213
215
214 def copyfromcache(repo, hash, filename):
216 def copyfromcache(repo, hash, filename):
215 '''Copy the specified largefile from the repo or system cache to
217 '''Copy the specified largefile from the repo or system cache to
216 filename in the repository. Return true on success or false if the
218 filename in the repository. Return true on success or false if the
217 file was not found in either cache (which should not happened:
219 file was not found in either cache (which should not happened:
218 this is meant to be called only after ensuring that the needed
220 this is meant to be called only after ensuring that the needed
219 largefile exists in the cache).'''
221 largefile exists in the cache).'''
220 path = findfile(repo, hash)
222 path = findfile(repo, hash)
221 if path is None:
223 if path is None:
222 return False
224 return False
223 util.makedirs(os.path.dirname(repo.wjoin(filename)))
225 util.makedirs(os.path.dirname(repo.wjoin(filename)))
224 # The write may fail before the file is fully written, but we
226 # The write may fail before the file is fully written, but we
225 # don't use atomic writes in the working copy.
227 # don't use atomic writes in the working copy.
226 shutil.copy(path, repo.wjoin(filename))
228 shutil.copy(path, repo.wjoin(filename))
227 return True
229 return True
228
230
229 def copytostore(repo, rev, file, uploaded=False):
231 def copytostore(repo, rev, file, uploaded=False):
230 hash = readstandin(repo, file)
232 hash = readstandin(repo, file)
231 if instore(repo, hash):
233 if instore(repo, hash):
232 return
234 return
233 copytostoreabsolute(repo, repo.wjoin(file), hash)
235 copytostoreabsolute(repo, repo.wjoin(file), hash)
234
236
235 def copytostoreabsolute(repo, file, hash):
237 def copytostoreabsolute(repo, file, hash):
236 util.makedirs(os.path.dirname(storepath(repo, hash)))
238 util.makedirs(os.path.dirname(storepath(repo, hash)))
237 if inusercache(repo.ui, hash):
239 if inusercache(repo.ui, hash):
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
240 link(usercachepath(repo.ui, hash), storepath(repo, hash))
239 else:
241 else:
240 dst = util.atomictempfile(storepath(repo, hash))
242 dst = util.atomictempfile(storepath(repo, hash))
241 for chunk in util.filechunkiter(open(file, 'rb')):
243 for chunk in util.filechunkiter(open(file, 'rb')):
242 dst.write(chunk)
244 dst.write(chunk)
243 dst.close()
245 dst.close()
244 util.copymode(file, storepath(repo, hash))
246 util.copymode(file, storepath(repo, hash))
245 linktousercache(repo, hash)
247 linktousercache(repo, hash)
246
248
247 def linktousercache(repo, hash):
249 def linktousercache(repo, hash):
248 path = usercachepath(repo.ui, hash)
250 path = usercachepath(repo.ui, hash)
249 if path:
251 if path:
250 util.makedirs(os.path.dirname(path))
252 util.makedirs(os.path.dirname(path))
251 link(storepath(repo, hash), path)
253 link(storepath(repo, hash), path)
252
254
253 def getstandinmatcher(repo, pats=[], opts={}):
255 def getstandinmatcher(repo, pats=[], opts={}):
254 '''Return a match object that applies pats to the standin directory'''
256 '''Return a match object that applies pats to the standin directory'''
255 standindir = repo.pathto(shortname)
257 standindir = repo.pathto(shortname)
256 if pats:
258 if pats:
257 # patterns supplied: search standin directory relative to current dir
259 # patterns supplied: search standin directory relative to current dir
258 cwd = repo.getcwd()
260 cwd = repo.getcwd()
259 if os.path.isabs(cwd):
261 if os.path.isabs(cwd):
260 # cwd is an absolute path for hg -R <reponame>
262 # cwd is an absolute path for hg -R <reponame>
261 # work relative to the repository root in this case
263 # work relative to the repository root in this case
262 cwd = ''
264 cwd = ''
263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
265 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
264 elif os.path.isdir(standindir):
266 elif os.path.isdir(standindir):
265 # no patterns: relative to repo root
267 # no patterns: relative to repo root
266 pats = [standindir]
268 pats = [standindir]
267 else:
269 else:
268 # no patterns and no standin dir: return matcher that matches nothing
270 # no patterns and no standin dir: return matcher that matches nothing
269 match = match_.match(repo.root, None, [], exact=True)
271 match = match_.match(repo.root, None, [], exact=True)
270 match.matchfn = lambda f: False
272 match.matchfn = lambda f: False
271 return match
273 return match
272 return getmatcher(repo, pats, opts, showbad=False)
274 return getmatcher(repo, pats, opts, showbad=False)
273
275
274 def getmatcher(repo, pats=[], opts={}, showbad=True):
276 def getmatcher(repo, pats=[], opts={}, showbad=True):
275 '''Wrapper around scmutil.match() that adds showbad: if false,
277 '''Wrapper around scmutil.match() that adds showbad: if false,
276 neuter the match object's bad() method so it does not print any
278 neuter the match object's bad() method so it does not print any
277 warnings about missing files or directories.'''
279 warnings about missing files or directories.'''
278 match = scmutil.match(repo[None], pats, opts)
280 match = scmutil.match(repo[None], pats, opts)
279
281
280 if not showbad:
282 if not showbad:
281 match.bad = lambda f, msg: None
283 match.bad = lambda f, msg: None
282 return match
284 return match
283
285
284 def composestandinmatcher(repo, rmatcher):
286 def composestandinmatcher(repo, rmatcher):
285 '''Return a matcher that accepts standins corresponding to the
287 '''Return a matcher that accepts standins corresponding to the
286 files accepted by rmatcher. Pass the list of files in the matcher
288 files accepted by rmatcher. Pass the list of files in the matcher
287 as the paths specified by the user.'''
289 as the paths specified by the user.'''
288 smatcher = getstandinmatcher(repo, rmatcher.files())
290 smatcher = getstandinmatcher(repo, rmatcher.files())
289 isstandin = smatcher.matchfn
291 isstandin = smatcher.matchfn
290 def composed_matchfn(f):
292 def composed_matchfn(f):
291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
293 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
292 smatcher.matchfn = composed_matchfn
294 smatcher.matchfn = composed_matchfn
293
295
294 return smatcher
296 return smatcher
295
297
296 def standin(filename):
298 def standin(filename):
297 '''Return the repo-relative path to the standin for the specified big
299 '''Return the repo-relative path to the standin for the specified big
298 file.'''
300 file.'''
299 # Notes:
301 # Notes:
300 # 1) Most callers want an absolute path, but _create_standin() needs
302 # 1) Most callers want an absolute path, but _create_standin() needs
301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
303 # it repo-relative so lfadd() can pass it to repo_add(). So leave
302 # it up to the caller to use repo.wjoin() to get an absolute path.
304 # it up to the caller to use repo.wjoin() to get an absolute path.
303 # 2) Join with '/' because that's what dirstate always uses, even on
305 # 2) Join with '/' because that's what dirstate always uses, even on
304 # Windows. Change existing separator to '/' first in case we are
306 # Windows. Change existing separator to '/' first in case we are
305 # passed filenames from an external source (like the command line).
307 # passed filenames from an external source (like the command line).
306 return shortname + '/' + filename.replace(os.sep, '/')
308 return shortname + '/' + filename.replace(os.sep, '/')
307
309
308 def isstandin(filename):
310 def isstandin(filename):
309 '''Return true if filename is a big file standin. filename must be
311 '''Return true if filename is a big file standin. filename must be
310 in Mercurial's internal form (slash-separated).'''
312 in Mercurial's internal form (slash-separated).'''
311 return filename.startswith(shortname + '/')
313 return filename.startswith(shortname + '/')
312
314
313 def splitstandin(filename):
315 def splitstandin(filename):
314 # Split on / because that's what dirstate always uses, even on Windows.
316 # Split on / because that's what dirstate always uses, even on Windows.
315 # Change local separator to / first just in case we are passed filenames
317 # Change local separator to / first just in case we are passed filenames
316 # from an external source (like the command line).
318 # from an external source (like the command line).
317 bits = filename.replace(os.sep, '/').split('/', 1)
319 bits = filename.replace(os.sep, '/').split('/', 1)
318 if len(bits) == 2 and bits[0] == shortname:
320 if len(bits) == 2 and bits[0] == shortname:
319 return bits[1]
321 return bits[1]
320 else:
322 else:
321 return None
323 return None
322
324
323 def updatestandin(repo, standin):
325 def updatestandin(repo, standin):
324 file = repo.wjoin(splitstandin(standin))
326 file = repo.wjoin(splitstandin(standin))
325 if os.path.exists(file):
327 if os.path.exists(file):
326 hash = hashfile(file)
328 hash = hashfile(file)
327 executable = getexecutable(file)
329 executable = getexecutable(file)
328 writestandin(repo, standin, hash, executable)
330 writestandin(repo, standin, hash, executable)
329
331
330 def readstandin(repo, filename, node=None):
332 def readstandin(repo, filename, node=None):
331 '''read hex hash from standin for filename at given node, or working
333 '''read hex hash from standin for filename at given node, or working
332 directory if no node is given'''
334 directory if no node is given'''
333 return repo[node][standin(filename)].data().strip()
335 return repo[node][standin(filename)].data().strip()
334
336
335 def writestandin(repo, standin, hash, executable):
337 def writestandin(repo, standin, hash, executable):
336 '''write hash to <repo.root>/<standin>'''
338 '''write hash to <repo.root>/<standin>'''
337 writehash(hash, repo.wjoin(standin), executable)
339 writehash(hash, repo.wjoin(standin), executable)
338
340
339 def copyandhash(instream, outfile):
341 def copyandhash(instream, outfile):
340 '''Read bytes from instream (iterable) and write them to outfile,
342 '''Read bytes from instream (iterable) and write them to outfile,
341 computing the SHA-1 hash of the data along the way. Close outfile
343 computing the SHA-1 hash of the data along the way. Close outfile
342 when done and return the binary hash.'''
344 when done and return the binary hash.'''
343 hasher = util.sha1('')
345 hasher = util.sha1('')
344 for data in instream:
346 for data in instream:
345 hasher.update(data)
347 hasher.update(data)
346 outfile.write(data)
348 outfile.write(data)
347
349
348 # Blecch: closing a file that somebody else opened is rude and
350 # Blecch: closing a file that somebody else opened is rude and
349 # wrong. But it's so darn convenient and practical! After all,
351 # wrong. But it's so darn convenient and practical! After all,
350 # outfile was opened just to copy and hash.
352 # outfile was opened just to copy and hash.
351 outfile.close()
353 outfile.close()
352
354
353 return hasher.digest()
355 return hasher.digest()
354
356
355 def hashrepofile(repo, file):
357 def hashrepofile(repo, file):
356 return hashfile(repo.wjoin(file))
358 return hashfile(repo.wjoin(file))
357
359
358 def hashfile(file):
360 def hashfile(file):
359 if not os.path.exists(file):
361 if not os.path.exists(file):
360 return ''
362 return ''
361 hasher = util.sha1('')
363 hasher = util.sha1('')
362 fd = open(file, 'rb')
364 fd = open(file, 'rb')
363 for data in blockstream(fd):
365 for data in blockstream(fd):
364 hasher.update(data)
366 hasher.update(data)
365 fd.close()
367 fd.close()
366 return hasher.hexdigest()
368 return hasher.hexdigest()
367
369
368 class limitreader(object):
370 class limitreader(object):
369 def __init__(self, f, limit):
371 def __init__(self, f, limit):
370 self.f = f
372 self.f = f
371 self.limit = limit
373 self.limit = limit
372
374
373 def read(self, length):
375 def read(self, length):
374 if self.limit == 0:
376 if self.limit == 0:
375 return ''
377 return ''
376 length = length > self.limit and self.limit or length
378 length = length > self.limit and self.limit or length
377 self.limit -= length
379 self.limit -= length
378 return self.f.read(length)
380 return self.f.read(length)
379
381
380 def close(self):
382 def close(self):
381 pass
383 pass
382
384
383 def blockstream(infile, blocksize=128 * 1024):
385 def blockstream(infile, blocksize=128 * 1024):
384 """Generator that yields blocks of data from infile and closes infile."""
386 """Generator that yields blocks of data from infile and closes infile."""
385 while True:
387 while True:
386 data = infile.read(blocksize)
388 data = infile.read(blocksize)
387 if not data:
389 if not data:
388 break
390 break
389 yield data
391 yield data
390 # same blecch as copyandhash() above
392 # same blecch as copyandhash() above
391 infile.close()
393 infile.close()
392
394
393 def readhash(filename):
395 def readhash(filename):
394 rfile = open(filename, 'rb')
396 rfile = open(filename, 'rb')
395 hash = rfile.read(40)
397 hash = rfile.read(40)
396 rfile.close()
398 rfile.close()
397 if len(hash) < 40:
399 if len(hash) < 40:
398 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
400 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
399 % (filename, len(hash)))
401 % (filename, len(hash)))
400 return hash
402 return hash
401
403
402 def writehash(hash, filename, executable):
404 def writehash(hash, filename, executable):
403 util.makedirs(os.path.dirname(filename))
405 util.makedirs(os.path.dirname(filename))
404 util.writefile(filename, hash + '\n')
406 util.writefile(filename, hash + '\n')
405 os.chmod(filename, getmode(executable))
407 os.chmod(filename, getmode(executable))
406
408
407 def getexecutable(filename):
409 def getexecutable(filename):
408 mode = os.stat(filename).st_mode
410 mode = os.stat(filename).st_mode
409 return ((mode & stat.S_IXUSR) and
411 return ((mode & stat.S_IXUSR) and
410 (mode & stat.S_IXGRP) and
412 (mode & stat.S_IXGRP) and
411 (mode & stat.S_IXOTH))
413 (mode & stat.S_IXOTH))
412
414
413 def getmode(executable):
415 def getmode(executable):
414 if executable:
416 if executable:
415 return 0755
417 return 0755
416 else:
418 else:
417 return 0644
419 return 0644
418
420
419 def urljoin(first, second, *arg):
421 def urljoin(first, second, *arg):
420 def join(left, right):
422 def join(left, right):
421 if not left.endswith('/'):
423 if not left.endswith('/'):
422 left += '/'
424 left += '/'
423 if right.startswith('/'):
425 if right.startswith('/'):
424 right = right[1:]
426 right = right[1:]
425 return left + right
427 return left + right
426
428
427 url = join(first, second)
429 url = join(first, second)
428 for a in arg:
430 for a in arg:
429 url = join(url, a)
431 url = join(url, a)
430 return url
432 return url
431
433
432 def hexsha1(data):
434 def hexsha1(data):
433 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
435 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
434 object data"""
436 object data"""
435 h = util.sha1()
437 h = util.sha1()
436 for chunk in util.filechunkiter(data):
438 for chunk in util.filechunkiter(data):
437 h.update(chunk)
439 h.update(chunk)
438 return h.hexdigest()
440 return h.hexdigest()
439
441
440 def httpsendfile(ui, filename):
442 def httpsendfile(ui, filename):
441 return httpconnection.httpsendfile(ui, filename, 'rb')
443 return httpconnection.httpsendfile(ui, filename, 'rb')
442
444
443 def unixpath(path):
445 def unixpath(path):
444 '''Return a version of path normalized for use with the lfdirstate.'''
446 '''Return a version of path normalized for use with the lfdirstate.'''
445 return os.path.normpath(path).replace(os.sep, '/')
447 return os.path.normpath(path).replace(os.sep, '/')
446
448
447 def islfilesrepo(repo):
449 def islfilesrepo(repo):
448 return ('largefiles' in repo.requirements and
450 return ('largefiles' in repo.requirements and
449 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
451 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
450
452
451 def mkstemp(repo, prefix):
453 def mkstemp(repo, prefix):
452 '''Returns a file descriptor and a filename corresponding to a temporary
454 '''Returns a file descriptor and a filename corresponding to a temporary
453 file in the repo's largefiles store.'''
455 file in the repo's largefiles store.'''
454 path = repo.join(longname)
456 path = repo.join(longname)
455 util.makedirs(path)
457 util.makedirs(path)
456 return tempfile.mkstemp(prefix=prefix, dir=path)
458 return tempfile.mkstemp(prefix=prefix, dir=path)
457
459
458 class storeprotonotcapable(Exception):
460 class storeprotonotcapable(Exception):
459 def __init__(self, storetypes):
461 def __init__(self, storetypes):
460 self.storetypes = storetypes
462 self.storetypes = storetypes
@@ -1,446 +1,450 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13
13
14 from mercurial import context, error, manifest, match as match_, util
14 from mercurial import context, error, manifest, match as match_, util
15 from mercurial import node as node_
15 from mercurial import node as node_
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfcommands
18 import lfcommands
19 import proto
19 import proto
20 import lfutil
20 import lfutil
21
21
22 def reposetup(ui, repo):
22 def reposetup(ui, repo):
23 # wire repositories should be given new wireproto functions but not the
23 # wire repositories should be given new wireproto functions but not the
24 # other largefiles modifications
24 # other largefiles modifications
25 if not repo.local():
25 if not repo.local():
26 return proto.wirereposetup(ui, repo)
26 return proto.wirereposetup(ui, repo)
27
27
28 for name in ('status', 'commitctx', 'commit', 'push'):
28 for name in ('status', 'commitctx', 'commit', 'push'):
29 method = getattr(repo, name)
29 method = getattr(repo, name)
30 if (isinstance(method, types.FunctionType) and
30 if (isinstance(method, types.FunctionType) and
31 method.func_name == 'wrap'):
31 method.func_name == 'wrap'):
32 ui.warn(_('largefiles: repo method %r appears to have already been'
32 ui.warn(_('largefiles: repo method %r appears to have already been'
33 ' wrapped by another extension: '
33 ' wrapped by another extension: '
34 'largefiles may behave incorrectly\n')
34 'largefiles may behave incorrectly\n')
35 % name)
35 % name)
36
36
37 class lfiles_repo(repo.__class__):
37 class lfiles_repo(repo.__class__):
38 lfstatus = False
38 lfstatus = False
39 def status_nolfiles(self, *args, **kwargs):
39 def status_nolfiles(self, *args, **kwargs):
40 return super(lfiles_repo, self).status(*args, **kwargs)
40 return super(lfiles_repo, self).status(*args, **kwargs)
41
41
42 # When lfstatus is set, return a context that gives the names
42 # When lfstatus is set, return a context that gives the names
43 # of largefiles instead of their corresponding standins and
43 # of largefiles instead of their corresponding standins and
44 # identifies the largefiles as always binary, regardless of
44 # identifies the largefiles as always binary, regardless of
45 # their actual contents.
45 # their actual contents.
46 def __getitem__(self, changeid):
46 def __getitem__(self, changeid):
47 ctx = super(lfiles_repo, self).__getitem__(changeid)
47 ctx = super(lfiles_repo, self).__getitem__(changeid)
48 if self.lfstatus:
48 if self.lfstatus:
49 class lfiles_manifestdict(manifest.manifestdict):
49 class lfiles_manifestdict(manifest.manifestdict):
50 def __contains__(self, filename):
50 def __contains__(self, filename):
51 if super(lfiles_manifestdict,
51 if super(lfiles_manifestdict,
52 self).__contains__(filename):
52 self).__contains__(filename):
53 return True
53 return True
54 return super(lfiles_manifestdict,
54 return super(lfiles_manifestdict,
55 self).__contains__(lfutil.standin(filename))
55 self).__contains__(lfutil.standin(filename))
56 class lfiles_ctx(ctx.__class__):
56 class lfiles_ctx(ctx.__class__):
57 def files(self):
57 def files(self):
58 filenames = super(lfiles_ctx, self).files()
58 filenames = super(lfiles_ctx, self).files()
59 return [lfutil.splitstandin(f) or f for f in filenames]
59 return [lfutil.splitstandin(f) or f for f in filenames]
60 def manifest(self):
60 def manifest(self):
61 man1 = super(lfiles_ctx, self).manifest()
61 man1 = super(lfiles_ctx, self).manifest()
62 man1.__class__ = lfiles_manifestdict
62 man1.__class__ = lfiles_manifestdict
63 return man1
63 return man1
64 def filectx(self, path, fileid=None, filelog=None):
64 def filectx(self, path, fileid=None, filelog=None):
65 try:
65 try:
66 result = super(lfiles_ctx, self).filectx(path,
66 result = super(lfiles_ctx, self).filectx(path,
67 fileid, filelog)
67 fileid, filelog)
68 except error.LookupError:
68 except error.LookupError:
69 # Adding a null character will cause Mercurial to
69 # Adding a null character will cause Mercurial to
70 # identify this as a binary file.
70 # identify this as a binary file.
71 result = super(lfiles_ctx, self).filectx(
71 result = super(lfiles_ctx, self).filectx(
72 lfutil.standin(path), fileid, filelog)
72 lfutil.standin(path), fileid, filelog)
73 olddata = result.data
73 olddata = result.data
74 result.data = lambda: olddata() + '\0'
74 result.data = lambda: olddata() + '\0'
75 return result
75 return result
76 ctx.__class__ = lfiles_ctx
76 ctx.__class__ = lfiles_ctx
77 return ctx
77 return ctx
78
78
79 # Figure out the status of big files and insert them into the
79 # Figure out the status of big files and insert them into the
80 # appropriate list in the result. Also removes standin files
80 # appropriate list in the result. Also removes standin files
81 # from the listing. Revert to the original status if
81 # from the listing. Revert to the original status if
82 # self.lfstatus is False.
82 # self.lfstatus is False.
83 def status(self, node1='.', node2=None, match=None, ignored=False,
83 def status(self, node1='.', node2=None, match=None, ignored=False,
84 clean=False, unknown=False, listsubrepos=False):
84 clean=False, unknown=False, listsubrepos=False):
85 listignored, listclean, listunknown = ignored, clean, unknown
85 listignored, listclean, listunknown = ignored, clean, unknown
86 if not self.lfstatus:
86 if not self.lfstatus:
87 return super(lfiles_repo, self).status(node1, node2, match,
87 return super(lfiles_repo, self).status(node1, node2, match,
88 listignored, listclean, listunknown, listsubrepos)
88 listignored, listclean, listunknown, listsubrepos)
89 else:
89 else:
90 # some calls in this function rely on the old version of status
90 # some calls in this function rely on the old version of status
91 self.lfstatus = False
91 self.lfstatus = False
92 if isinstance(node1, context.changectx):
92 if isinstance(node1, context.changectx):
93 ctx1 = node1
93 ctx1 = node1
94 else:
94 else:
95 ctx1 = repo[node1]
95 ctx1 = repo[node1]
96 if isinstance(node2, context.changectx):
96 if isinstance(node2, context.changectx):
97 ctx2 = node2
97 ctx2 = node2
98 else:
98 else:
99 ctx2 = repo[node2]
99 ctx2 = repo[node2]
100 working = ctx2.rev() is None
100 working = ctx2.rev() is None
101 parentworking = working and ctx1 == self['.']
101 parentworking = working and ctx1 == self['.']
102
102
103 def inctx(file, ctx):
103 def inctx(file, ctx):
104 try:
104 try:
105 if ctx.rev() is None:
105 if ctx.rev() is None:
106 return file in ctx.manifest()
106 return file in ctx.manifest()
107 ctx[file]
107 ctx[file]
108 return True
108 return True
109 except KeyError:
109 except KeyError:
110 return False
110 return False
111
111
112 if match is None:
112 if match is None:
113 match = match_.always(self.root, self.getcwd())
113 match = match_.always(self.root, self.getcwd())
114
114
115 # First check if there were files specified on the
115 # First check if there were files specified on the
116 # command line. If there were, and none of them were
116 # command line. If there were, and none of them were
117 # largefiles, we should just bail here and let super
117 # largefiles, we should just bail here and let super
118 # handle it -- thus gaining a big performance boost.
118 # handle it -- thus gaining a big performance boost.
119 lfdirstate = lfutil.openlfdirstate(ui, self)
119 lfdirstate = lfutil.openlfdirstate(ui, self)
120 if match.files() and not match.anypats():
120 if match.files() and not match.anypats():
121 matchedfiles = [f for f in match.files() if f in lfdirstate]
121 matchedfiles = [f for f in match.files() if f in lfdirstate]
122 if not matchedfiles:
122 if not matchedfiles:
123 return super(lfiles_repo, self).status(node1, node2,
123 return super(lfiles_repo, self).status(node1, node2,
124 match, listignored, listclean,
124 match, listignored, listclean,
125 listunknown, listsubrepos)
125 listunknown, listsubrepos)
126
126
127 # Create a copy of match that matches standins instead
127 # Create a copy of match that matches standins instead
128 # of largefiles.
128 # of largefiles.
129 def tostandin(file):
129 def tostandin(file):
130 if inctx(lfutil.standin(file), ctx2):
130 if inctx(lfutil.standin(file), ctx2):
131 return lfutil.standin(file)
131 return lfutil.standin(file)
132 return file
132 return file
133
133
134 # Create a function that we can use to override what is
134 # Create a function that we can use to override what is
135 # normally the ignore matcher. We've already checked
135 # normally the ignore matcher. We've already checked
136 # for ignored files on the first dirstate walk, and
136 # for ignored files on the first dirstate walk, and
137 # unecessarily re-checking here causes a huge performance
137 # unecessarily re-checking here causes a huge performance
138 # hit because lfdirstate only knows about largefiles
138 # hit because lfdirstate only knows about largefiles
139 def _ignoreoverride(self):
139 def _ignoreoverride(self):
140 return False
140 return False
141
141
142 m = copy.copy(match)
142 m = copy.copy(match)
143 m._files = [tostandin(f) for f in m._files]
143 m._files = [tostandin(f) for f in m._files]
144
144
145 # Get ignored files here even if we weren't asked for them; we
145 # Get ignored files here even if we weren't asked for them; we
146 # must use the result here for filtering later
146 # must use the result here for filtering later
147 result = super(lfiles_repo, self).status(node1, node2, m,
147 result = super(lfiles_repo, self).status(node1, node2, m,
148 True, clean, unknown, listsubrepos)
148 True, clean, unknown, listsubrepos)
149 if working:
149 if working:
150 # hold the wlock while we read largefiles and
150 # hold the wlock while we read largefiles and
151 # update the lfdirstate
151 # update the lfdirstate
152 wlock = repo.wlock()
152 wlock = repo.wlock()
153 try:
153 try:
154 # Any non-largefiles that were explicitly listed must be
154 # Any non-largefiles that were explicitly listed must be
155 # taken out or lfdirstate.status will report an error.
155 # taken out or lfdirstate.status will report an error.
156 # The status of these files was already computed using
156 # The status of these files was already computed using
157 # super's status.
157 # super's status.
158 # Override lfdirstate's ignore matcher to not do
158 # Override lfdirstate's ignore matcher to not do
159 # anything
159 # anything
160 orig_ignore = lfdirstate._ignore
160 orig_ignore = lfdirstate._ignore
161 lfdirstate._ignore = _ignoreoverride
161 lfdirstate._ignore = _ignoreoverride
162
162
163 match._files = [f for f in match._files if f in
163 match._files = [f for f in match._files if f in
164 lfdirstate]
164 lfdirstate]
165 # Don't waste time getting the ignored and unknown
165 # Don't waste time getting the ignored and unknown
166 # files again; we already have them
166 # files again; we already have them
167 s = lfdirstate.status(match, [], False,
167 s = lfdirstate.status(match, [], False,
168 listclean, False)
168 listclean, False)
169 (unsure, modified, added, removed, missing, unknown,
169 (unsure, modified, added, removed, missing, unknown,
170 ignored, clean) = s
170 ignored, clean) = s
171 # Replace the list of ignored and unknown files with
171 # Replace the list of ignored and unknown files with
172 # the previously caclulated lists, and strip out the
172 # the previously caclulated lists, and strip out the
173 # largefiles
173 # largefiles
174 lfiles = set(lfdirstate._map)
174 lfiles = set(lfdirstate._map)
175 ignored = set(result[5]).difference(lfiles)
175 ignored = set(result[5]).difference(lfiles)
176 unknown = set(result[4]).difference(lfiles)
176 unknown = set(result[4]).difference(lfiles)
177 if parentworking:
177 if parentworking:
178 for lfile in unsure:
178 for lfile in unsure:
179 standin = lfutil.standin(lfile)
179 standin = lfutil.standin(lfile)
180 if standin not in ctx1:
180 if standin not in ctx1:
181 # from second parent
181 # from second parent
182 modified.append(lfile)
182 modified.append(lfile)
183 elif ctx1[standin].data().strip() \
183 elif ctx1[standin].data().strip() \
184 != lfutil.hashfile(self.wjoin(lfile)):
184 != lfutil.hashfile(self.wjoin(lfile)):
185 modified.append(lfile)
185 modified.append(lfile)
186 else:
186 else:
187 clean.append(lfile)
187 clean.append(lfile)
188 lfdirstate.normal(lfile)
188 lfdirstate.normal(lfile)
189 lfdirstate.write()
189 lfdirstate.write()
190 else:
190 else:
191 tocheck = unsure + modified + added + clean
191 tocheck = unsure + modified + added + clean
192 modified, added, clean = [], [], []
192 modified, added, clean = [], [], []
193
193
194 for lfile in tocheck:
194 for lfile in tocheck:
195 standin = lfutil.standin(lfile)
195 standin = lfutil.standin(lfile)
196 if inctx(standin, ctx1):
196 if inctx(standin, ctx1):
197 if ctx1[standin].data().strip() != \
197 if ctx1[standin].data().strip() != \
198 lfutil.hashfile(self.wjoin(lfile)):
198 lfutil.hashfile(self.wjoin(lfile)):
199 modified.append(lfile)
199 modified.append(lfile)
200 else:
200 else:
201 clean.append(lfile)
201 clean.append(lfile)
202 else:
202 else:
203 added.append(lfile)
203 added.append(lfile)
204 # Replace the original ignore function
204 # Replace the original ignore function
205 lfdirstate._ignore = orig_ignore
205 lfdirstate._ignore = orig_ignore
206 finally:
206 finally:
207 wlock.release()
207 wlock.release()
208
208
209 for standin in ctx1.manifest():
209 for standin in ctx1.manifest():
210 if not lfutil.isstandin(standin):
210 if not lfutil.isstandin(standin):
211 continue
211 continue
212 lfile = lfutil.splitstandin(standin)
212 lfile = lfutil.splitstandin(standin)
213 if not match(lfile):
213 if not match(lfile):
214 continue
214 continue
215 if lfile not in lfdirstate:
215 if lfile not in lfdirstate:
216 removed.append(lfile)
216 removed.append(lfile)
217
217
218 # Filter result lists
218 # Filter result lists
219 result = list(result)
219 result = list(result)
220
220
221 # Largefiles are not really removed when they're
221 # Largefiles are not really removed when they're
222 # still in the normal dirstate. Likewise, normal
222 # still in the normal dirstate. Likewise, normal
223 # files are not really removed if it's still in
223 # files are not really removed if it's still in
224 # lfdirstate. This happens in merges where files
224 # lfdirstate. This happens in merges where files
225 # change type.
225 # change type.
226 removed = [f for f in removed if f not in repo.dirstate]
226 removed = [f for f in removed if f not in repo.dirstate]
227 result[2] = [f for f in result[2] if f not in lfdirstate]
227 result[2] = [f for f in result[2] if f not in lfdirstate]
228
228
229 # Unknown files
229 # Unknown files
230 unknown = set(unknown).difference(ignored)
230 unknown = set(unknown).difference(ignored)
231 result[4] = [f for f in unknown
231 result[4] = [f for f in unknown
232 if (repo.dirstate[f] == '?' and
232 if (repo.dirstate[f] == '?' and
233 not lfutil.isstandin(f))]
233 not lfutil.isstandin(f))]
234 # Ignored files were calculated earlier by the dirstate,
234 # Ignored files were calculated earlier by the dirstate,
235 # and we already stripped out the largefiles from the list
235 # and we already stripped out the largefiles from the list
236 result[5] = ignored
236 result[5] = ignored
237 # combine normal files and largefiles
237 # combine normal files and largefiles
238 normals = [[fn for fn in filelist
238 normals = [[fn for fn in filelist
239 if not lfutil.isstandin(fn)]
239 if not lfutil.isstandin(fn)]
240 for filelist in result]
240 for filelist in result]
241 lfiles = (modified, added, removed, missing, [], [], clean)
241 lfiles = (modified, added, removed, missing, [], [], clean)
242 result = [sorted(list1 + list2)
242 result = [sorted(list1 + list2)
243 for (list1, list2) in zip(normals, lfiles)]
243 for (list1, list2) in zip(normals, lfiles)]
244 else:
244 else:
245 def toname(f):
245 def toname(f):
246 if lfutil.isstandin(f):
246 if lfutil.isstandin(f):
247 return lfutil.splitstandin(f)
247 return lfutil.splitstandin(f)
248 return f
248 return f
249 result = [[toname(f) for f in items] for items in result]
249 result = [[toname(f) for f in items] for items in result]
250
250
251 if not listunknown:
251 if not listunknown:
252 result[4] = []
252 result[4] = []
253 if not listignored:
253 if not listignored:
254 result[5] = []
254 result[5] = []
255 if not listclean:
255 if not listclean:
256 result[6] = []
256 result[6] = []
257 self.lfstatus = True
257 self.lfstatus = True
258 return result
258 return result
259
259
260 # As part of committing, copy all of the largefiles into the
260 # As part of committing, copy all of the largefiles into the
261 # cache.
261 # cache.
262 def commitctx(self, *args, **kwargs):
262 def commitctx(self, *args, **kwargs):
263 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
263 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
264 ctx = self[node]
264 ctx = self[node]
265 for filename in ctx.files():
265 for filename in ctx.files():
266 if lfutil.isstandin(filename) and filename in ctx.manifest():
266 if lfutil.isstandin(filename) and filename in ctx.manifest():
267 realfile = lfutil.splitstandin(filename)
267 realfile = lfutil.splitstandin(filename)
268 lfutil.copytostore(self, ctx.node(), realfile)
268 lfutil.copytostore(self, ctx.node(), realfile)
269
269
270 return node
270 return node
271
271
272 # Before commit, largefile standins have not had their
272 # Before commit, largefile standins have not had their
273 # contents updated to reflect the hash of their largefile.
273 # contents updated to reflect the hash of their largefile.
274 # Do that here.
274 # Do that here.
275 def commit(self, text="", user=None, date=None, match=None,
275 def commit(self, text="", user=None, date=None, match=None,
276 force=False, editor=False, extra={}):
276 force=False, editor=False, extra={}):
277 orig = super(lfiles_repo, self).commit
277 orig = super(lfiles_repo, self).commit
278
278
279 wlock = repo.wlock()
279 wlock = repo.wlock()
280 try:
280 try:
281 # Case 0: Rebase
282 # We have to take the time to pull down the new largefiles now.
283 # Otherwise if we are rebasing, any largefiles that were
284 # modified in the destination changesets get overwritten, either
285 # by the rebase or in the first commit after the rebase.
286 # updatelfiles will update the dirstate to mark any pulled
287 # largefiles as modified
281 if getattr(repo, "_isrebasing", False):
288 if getattr(repo, "_isrebasing", False):
282 # We have to take the time to pull down the new
283 # largefiles now. Otherwise if we are rebasing,
284 # any largefiles that were modified in the
285 # destination changesets get overwritten, either
286 # by the rebase or in the first commit after the
287 # rebase.
288 lfcommands.updatelfiles(repo.ui, repo)
289 lfcommands.updatelfiles(repo.ui, repo)
290 result = orig(text=text, user=user, date=date, match=match,
291 force=force, editor=editor, extra=extra)
292 return result
289 # Case 1: user calls commit with no specific files or
293 # Case 1: user calls commit with no specific files or
290 # include/exclude patterns: refresh and commit all files that
294 # include/exclude patterns: refresh and commit all files that
291 # are "dirty".
295 # are "dirty".
292 if ((match is None) or
296 if ((match is None) or
293 (not match.anypats() and not match.files())):
297 (not match.anypats() and not match.files())):
294 # Spend a bit of time here to get a list of files we know
298 # Spend a bit of time here to get a list of files we know
295 # are modified so we can compare only against those.
299 # are modified so we can compare only against those.
296 # It can cost a lot of time (several seconds)
300 # It can cost a lot of time (several seconds)
297 # otherwise to update all standins if the largefiles are
301 # otherwise to update all standins if the largefiles are
298 # large.
302 # large.
299 lfdirstate = lfutil.openlfdirstate(ui, self)
303 lfdirstate = lfutil.openlfdirstate(ui, self)
300 dirtymatch = match_.always(repo.root, repo.getcwd())
304 dirtymatch = match_.always(repo.root, repo.getcwd())
301 s = lfdirstate.status(dirtymatch, [], False, False, False)
305 s = lfdirstate.status(dirtymatch, [], False, False, False)
302 modifiedfiles = []
306 modifiedfiles = []
303 for i in s:
307 for i in s:
304 modifiedfiles.extend(i)
308 modifiedfiles.extend(i)
305 lfiles = lfutil.listlfiles(self)
309 lfiles = lfutil.listlfiles(self)
306 # this only loops through largefiles that exist (not
310 # this only loops through largefiles that exist (not
307 # removed/renamed)
311 # removed/renamed)
308 for lfile in lfiles:
312 for lfile in lfiles:
309 if lfile in modifiedfiles:
313 if lfile in modifiedfiles:
310 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
314 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
311 # this handles the case where a rebase is being
315 # this handles the case where a rebase is being
312 # performed and the working copy is not updated
316 # performed and the working copy is not updated
313 # yet.
317 # yet.
314 if os.path.exists(self.wjoin(lfile)):
318 if os.path.exists(self.wjoin(lfile)):
315 lfutil.updatestandin(self,
319 lfutil.updatestandin(self,
316 lfutil.standin(lfile))
320 lfutil.standin(lfile))
317 lfdirstate.normal(lfile)
321 lfdirstate.normal(lfile)
318 for lfile in lfdirstate:
322 for lfile in lfdirstate:
319 if lfile in modifiedfiles:
323 if lfile in modifiedfiles:
320 if not os.path.exists(
324 if not os.path.exists(
321 repo.wjoin(lfutil.standin(lfile))):
325 repo.wjoin(lfutil.standin(lfile))):
322 lfdirstate.drop(lfile)
326 lfdirstate.drop(lfile)
323 lfdirstate.write()
327 lfdirstate.write()
324
328
325 return orig(text=text, user=user, date=date, match=match,
329 return orig(text=text, user=user, date=date, match=match,
326 force=force, editor=editor, extra=extra)
330 force=force, editor=editor, extra=extra)
327
331
328 for f in match.files():
332 for f in match.files():
329 if lfutil.isstandin(f):
333 if lfutil.isstandin(f):
330 raise util.Abort(
334 raise util.Abort(
331 _('file "%s" is a largefile standin') % f,
335 _('file "%s" is a largefile standin') % f,
332 hint=('commit the largefile itself instead'))
336 hint=('commit the largefile itself instead'))
333
337
334 # Case 2: user calls commit with specified patterns: refresh
338 # Case 2: user calls commit with specified patterns: refresh
335 # any matching big files.
339 # any matching big files.
336 smatcher = lfutil.composestandinmatcher(self, match)
340 smatcher = lfutil.composestandinmatcher(self, match)
337 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
341 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
338
342
339 # No matching big files: get out of the way and pass control to
343 # No matching big files: get out of the way and pass control to
340 # the usual commit() method.
344 # the usual commit() method.
341 if not standins:
345 if not standins:
342 return orig(text=text, user=user, date=date, match=match,
346 return orig(text=text, user=user, date=date, match=match,
343 force=force, editor=editor, extra=extra)
347 force=force, editor=editor, extra=extra)
344
348
345 # Refresh all matching big files. It's possible that the
349 # Refresh all matching big files. It's possible that the
346 # commit will end up failing, in which case the big files will
350 # commit will end up failing, in which case the big files will
347 # stay refreshed. No harm done: the user modified them and
351 # stay refreshed. No harm done: the user modified them and
348 # asked to commit them, so sooner or later we're going to
352 # asked to commit them, so sooner or later we're going to
349 # refresh the standins. Might as well leave them refreshed.
353 # refresh the standins. Might as well leave them refreshed.
350 lfdirstate = lfutil.openlfdirstate(ui, self)
354 lfdirstate = lfutil.openlfdirstate(ui, self)
351 for standin in standins:
355 for standin in standins:
352 lfile = lfutil.splitstandin(standin)
356 lfile = lfutil.splitstandin(standin)
353 if lfdirstate[lfile] <> 'r':
357 if lfdirstate[lfile] <> 'r':
354 lfutil.updatestandin(self, standin)
358 lfutil.updatestandin(self, standin)
355 lfdirstate.normal(lfile)
359 lfdirstate.normal(lfile)
356 else:
360 else:
357 lfdirstate.drop(lfile)
361 lfdirstate.drop(lfile)
358 lfdirstate.write()
362 lfdirstate.write()
359
363
360 # Cook up a new matcher that only matches regular files or
364 # Cook up a new matcher that only matches regular files or
361 # standins corresponding to the big files requested by the
365 # standins corresponding to the big files requested by the
362 # user. Have to modify _files to prevent commit() from
366 # user. Have to modify _files to prevent commit() from
363 # complaining "not tracked" for big files.
367 # complaining "not tracked" for big files.
364 lfiles = lfutil.listlfiles(repo)
368 lfiles = lfutil.listlfiles(repo)
365 match = copy.copy(match)
369 match = copy.copy(match)
366 orig_matchfn = match.matchfn
370 orig_matchfn = match.matchfn
367
371
368 # Check both the list of largefiles and the list of
372 # Check both the list of largefiles and the list of
369 # standins because if a largefile was removed, it
373 # standins because if a largefile was removed, it
370 # won't be in the list of largefiles at this point
374 # won't be in the list of largefiles at this point
371 match._files += sorted(standins)
375 match._files += sorted(standins)
372
376
373 actualfiles = []
377 actualfiles = []
374 for f in match._files:
378 for f in match._files:
375 fstandin = lfutil.standin(f)
379 fstandin = lfutil.standin(f)
376
380
377 # ignore known largefiles and standins
381 # ignore known largefiles and standins
378 if f in lfiles or fstandin in standins:
382 if f in lfiles or fstandin in standins:
379 continue
383 continue
380
384
381 # append directory separator to avoid collisions
385 # append directory separator to avoid collisions
382 if not fstandin.endswith(os.sep):
386 if not fstandin.endswith(os.sep):
383 fstandin += os.sep
387 fstandin += os.sep
384
388
385 # prevalidate matching standin directories
389 # prevalidate matching standin directories
386 if util.any(st for st in match._files
390 if util.any(st for st in match._files
387 if st.startswith(fstandin)):
391 if st.startswith(fstandin)):
388 continue
392 continue
389 actualfiles.append(f)
393 actualfiles.append(f)
390 match._files = actualfiles
394 match._files = actualfiles
391
395
392 def matchfn(f):
396 def matchfn(f):
393 if orig_matchfn(f):
397 if orig_matchfn(f):
394 return f not in lfiles
398 return f not in lfiles
395 else:
399 else:
396 return f in standins
400 return f in standins
397
401
398 match.matchfn = matchfn
402 match.matchfn = matchfn
399 return orig(text=text, user=user, date=date, match=match,
403 return orig(text=text, user=user, date=date, match=match,
400 force=force, editor=editor, extra=extra)
404 force=force, editor=editor, extra=extra)
401 finally:
405 finally:
402 wlock.release()
406 wlock.release()
403
407
404 def push(self, remote, force=False, revs=None, newbranch=False):
408 def push(self, remote, force=False, revs=None, newbranch=False):
405 o = lfutil.findoutgoing(repo, remote, force)
409 o = lfutil.findoutgoing(repo, remote, force)
406 if o:
410 if o:
407 toupload = set()
411 toupload = set()
408 o = repo.changelog.nodesbetween(o, revs)[0]
412 o = repo.changelog.nodesbetween(o, revs)[0]
409 for n in o:
413 for n in o:
410 parents = [p for p in repo.changelog.parents(n)
414 parents = [p for p in repo.changelog.parents(n)
411 if p != node_.nullid]
415 if p != node_.nullid]
412 ctx = repo[n]
416 ctx = repo[n]
413 files = set(ctx.files())
417 files = set(ctx.files())
414 if len(parents) == 2:
418 if len(parents) == 2:
415 mc = ctx.manifest()
419 mc = ctx.manifest()
416 mp1 = ctx.parents()[0].manifest()
420 mp1 = ctx.parents()[0].manifest()
417 mp2 = ctx.parents()[1].manifest()
421 mp2 = ctx.parents()[1].manifest()
418 for f in mp1:
422 for f in mp1:
419 if f not in mc:
423 if f not in mc:
420 files.add(f)
424 files.add(f)
421 for f in mp2:
425 for f in mp2:
422 if f not in mc:
426 if f not in mc:
423 files.add(f)
427 files.add(f)
424 for f in mc:
428 for f in mc:
425 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
429 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
426 None):
430 None):
427 files.add(f)
431 files.add(f)
428
432
429 toupload = toupload.union(
433 toupload = toupload.union(
430 set([ctx[f].data().strip()
434 set([ctx[f].data().strip()
431 for f in files
435 for f in files
432 if lfutil.isstandin(f) and f in ctx]))
436 if lfutil.isstandin(f) and f in ctx]))
433 lfcommands.uploadlfiles(ui, self, remote, toupload)
437 lfcommands.uploadlfiles(ui, self, remote, toupload)
434 return super(lfiles_repo, self).push(remote, force, revs,
438 return super(lfiles_repo, self).push(remote, force, revs,
435 newbranch)
439 newbranch)
436
440
437 repo.__class__ = lfiles_repo
441 repo.__class__ = lfiles_repo
438
442
439 def checkrequireslfiles(ui, repo, **kwargs):
443 def checkrequireslfiles(ui, repo, **kwargs):
440 if 'largefiles' not in repo.requirements and util.any(
444 if 'largefiles' not in repo.requirements and util.any(
441 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
445 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
442 repo.requirements.add('largefiles')
446 repo.requirements.add('largefiles')
443 repo._writerequirements()
447 repo._writerequirements()
444
448
445 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
449 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
446 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
450 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
General Comments 0
You need to be logged in to leave comments. Login now