##// END OF EJS Templates
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard -
r15224:7c604d8c default
parent child Browse files
Show More
@@ -1,488 +1,483 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command functions: lfadd() et. al, plus the cmdtable.'''
9 '''High-level command functions: lfadd() et. al, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''Convert a normal repository to a largefiles repository
23 '''Convert a normal repository to a largefiles repository
24
24
25 Convert source repository creating an identical repository, except that all
25 Convert source repository creating an identical repository, except that all
26 files that match the patterns given, or are over the given size will be
26 files that match the patterns given, or are over the given size will be
27 added as largefiles. The size used to determine whether or not to track a
27 added as largefiles. The size used to determine whether or not to track a
28 file as a largefile is the size of the first version of the file. After
28 file as a largefile is the size of the first version of the file. After
29 running this command you will need to make sure that largefiles is enabled
29 running this command you will need to make sure that largefiles is enabled
30 anywhere you intend to push the new repository.'''
30 anywhere you intend to push the new repository.'''
31
31
32 if opts['tonormal']:
32 if opts['tonormal']:
33 tolfile = False
33 tolfile = False
34 else:
34 else:
35 tolfile = True
35 tolfile = True
36 size = opts['size']
36 size = opts['size']
37 if not size:
37 if not size:
38 size = ui.config(lfutil.longname, 'size', default=None)
38 size = ui.config(lfutil.longname, 'size', default=None)
39 try:
39 try:
40 size = int(size)
40 size = int(size)
41 except ValueError:
41 except ValueError:
42 raise util.Abort(_('largefiles.size must be integer, was %s\n')
42 raise util.Abort(_('largefiles.size must be integer, was %s\n')
43 % size)
43 % size)
44 except TypeError:
44 except TypeError:
45 raise util.Abort(_('size must be specified'))
45 raise util.Abort(_('size must be specified'))
46
46
47 try:
47 try:
48 rsrc = hg.repository(ui, src)
48 rsrc = hg.repository(ui, src)
49 if not rsrc.local():
49 if not rsrc.local():
50 raise util.Abort(_('%s is not a local Mercurial repo') % src)
50 raise util.Abort(_('%s is not a local Mercurial repo') % src)
51 except error.RepoError, err:
51 except error.RepoError, err:
52 ui.traceback()
52 ui.traceback()
53 raise util.Abort(err.args[0])
53 raise util.Abort(err.args[0])
54 if os.path.exists(dest):
54 if os.path.exists(dest):
55 if not os.path.isdir(dest):
55 if not os.path.isdir(dest):
56 raise util.Abort(_('destination %s already exists') % dest)
56 raise util.Abort(_('destination %s already exists') % dest)
57 elif os.listdir(dest):
57 elif os.listdir(dest):
58 raise util.Abort(_('destination %s is not empty') % dest)
58 raise util.Abort(_('destination %s is not empty') % dest)
59 try:
59 try:
60 ui.status(_('initializing destination %s\n') % dest)
60 ui.status(_('initializing destination %s\n') % dest)
61 rdst = hg.repository(ui, dest, create=True)
61 rdst = hg.repository(ui, dest, create=True)
62 if not rdst.local():
62 if not rdst.local():
63 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
63 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
64 except error.RepoError:
64 except error.RepoError:
65 ui.traceback()
65 ui.traceback()
66 raise util.Abort(_('%s is not a repo') % dest)
66 raise util.Abort(_('%s is not a repo') % dest)
67
67
68 success = False
68 success = False
69 try:
69 try:
70 # Lock destination to prevent modification while it is converted to.
70 # Lock destination to prevent modification while it is converted to.
71 # Don't need to lock src because we are just reading from its history
71 # Don't need to lock src because we are just reading from its history
72 # which can't change.
72 # which can't change.
73 dst_lock = rdst.lock()
73 dst_lock = rdst.lock()
74
74
75 # Get a list of all changesets in the source. The easy way to do this
75 # Get a list of all changesets in the source. The easy way to do this
76 # is to simply walk the changelog, using changelog.nodesbewteen().
76 # is to simply walk the changelog, using changelog.nodesbewteen().
77 # Take a look at mercurial/revlog.py:639 for more details.
77 # Take a look at mercurial/revlog.py:639 for more details.
78 # Use a generator instead of a list to decrease memory usage
78 # Use a generator instead of a list to decrease memory usage
79 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
79 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
80 rsrc.heads())[0])
80 rsrc.heads())[0])
81 revmap = {node.nullid: node.nullid}
81 revmap = {node.nullid: node.nullid}
82 if tolfile:
82 if tolfile:
83 lfiles = set()
83 lfiles = set()
84 normalfiles = set()
84 normalfiles = set()
85 if not pats:
85 if not pats:
86 pats = ui.config(lfutil.longname, 'patterns', default=())
86 pats = ui.config(lfutil.longname, 'patterns', default=())
87 if pats:
87 if pats:
88 pats = pats.split(' ')
88 pats = pats.split(' ')
89 if pats:
89 if pats:
90 matcher = match_.match(rsrc.root, '', list(pats))
90 matcher = match_.match(rsrc.root, '', list(pats))
91 else:
91 else:
92 matcher = None
92 matcher = None
93
93
94 lfiletohash = {}
94 lfiletohash = {}
95 for ctx in ctxs:
95 for ctx in ctxs:
96 ui.progress(_('converting revisions'), ctx.rev(),
96 ui.progress(_('converting revisions'), ctx.rev(),
97 unit=_('revision'), total=rsrc['tip'].rev())
97 unit=_('revision'), total=rsrc['tip'].rev())
98 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
98 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
99 lfiles, normalfiles, matcher, size, lfiletohash)
99 lfiles, normalfiles, matcher, size, lfiletohash)
100 ui.progress(_('converting revisions'), None)
100 ui.progress(_('converting revisions'), None)
101
101
102 if os.path.exists(rdst.wjoin(lfutil.shortname)):
102 if os.path.exists(rdst.wjoin(lfutil.shortname)):
103 shutil.rmtree(rdst.wjoin(lfutil.shortname))
103 shutil.rmtree(rdst.wjoin(lfutil.shortname))
104
104
105 for f in lfiletohash.keys():
105 for f in lfiletohash.keys():
106 if os.path.isfile(rdst.wjoin(f)):
106 if os.path.isfile(rdst.wjoin(f)):
107 os.unlink(rdst.wjoin(f))
107 os.unlink(rdst.wjoin(f))
108 try:
108 try:
109 os.removedirs(os.path.dirname(rdst.wjoin(f)))
109 os.removedirs(os.path.dirname(rdst.wjoin(f)))
110 except OSError:
110 except OSError:
111 pass
111 pass
112
112
113 else:
113 else:
114 for ctx in ctxs:
114 for ctx in ctxs:
115 ui.progress(_('converting revisions'), ctx.rev(),
115 ui.progress(_('converting revisions'), ctx.rev(),
116 unit=_('revision'), total=rsrc['tip'].rev())
116 unit=_('revision'), total=rsrc['tip'].rev())
117 _addchangeset(ui, rsrc, rdst, ctx, revmap)
117 _addchangeset(ui, rsrc, rdst, ctx, revmap)
118
118
119 ui.progress(_('converting revisions'), None)
119 ui.progress(_('converting revisions'), None)
120 success = True
120 success = True
121 finally:
121 finally:
122 if not success:
122 if not success:
123 # we failed, remove the new directory
123 # we failed, remove the new directory
124 shutil.rmtree(rdst.root)
124 shutil.rmtree(rdst.root)
125 dst_lock.release()
125 dst_lock.release()
126
126
127 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
127 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
128 # Convert src parents to dst parents
128 # Convert src parents to dst parents
129 parents = []
129 parents = []
130 for p in ctx.parents():
130 for p in ctx.parents():
131 parents.append(revmap[p.node()])
131 parents.append(revmap[p.node()])
132 while len(parents) < 2:
132 while len(parents) < 2:
133 parents.append(node.nullid)
133 parents.append(node.nullid)
134
134
135 # Generate list of changed files
135 # Generate list of changed files
136 files = set(ctx.files())
136 files = set(ctx.files())
137 if node.nullid not in parents:
137 if node.nullid not in parents:
138 mc = ctx.manifest()
138 mc = ctx.manifest()
139 mp1 = ctx.parents()[0].manifest()
139 mp1 = ctx.parents()[0].manifest()
140 mp2 = ctx.parents()[1].manifest()
140 mp2 = ctx.parents()[1].manifest()
141 files |= (set(mp1) | set(mp2)) - set(mc)
141 files |= (set(mp1) | set(mp2)) - set(mc)
142 for f in mc:
142 for f in mc:
143 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
143 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
144 files.add(f)
144 files.add(f)
145
145
146 def getfilectx(repo, memctx, f):
146 def getfilectx(repo, memctx, f):
147 if lfutil.standin(f) in files:
147 if lfutil.standin(f) in files:
148 # if the file isn't in the manifest then it was removed
148 # if the file isn't in the manifest then it was removed
149 # or renamed, raise IOError to indicate this
149 # or renamed, raise IOError to indicate this
150 try:
150 try:
151 fctx = ctx.filectx(lfutil.standin(f))
151 fctx = ctx.filectx(lfutil.standin(f))
152 except error.LookupError:
152 except error.LookupError:
153 raise IOError()
153 raise IOError()
154 renamed = fctx.renamed()
154 renamed = fctx.renamed()
155 if renamed:
155 if renamed:
156 renamed = lfutil.splitstandin(renamed[0])
156 renamed = lfutil.splitstandin(renamed[0])
157
157
158 hash = fctx.data().strip()
158 hash = fctx.data().strip()
159 path = lfutil.findfile(rsrc, hash)
159 path = lfutil.findfile(rsrc, hash)
160 ### TODO: What if the file is not cached?
160 ### TODO: What if the file is not cached?
161 data = ''
161 data = ''
162 fd = None
162 fd = None
163 try:
163 try:
164 fd = open(path, 'rb')
164 fd = open(path, 'rb')
165 data = fd.read()
165 data = fd.read()
166 finally:
166 finally:
167 if fd:
167 if fd:
168 fd.close()
168 fd.close()
169 return context.memfilectx(f, data, 'l' in fctx.flags(),
169 return context.memfilectx(f, data, 'l' in fctx.flags(),
170 'x' in fctx.flags(), renamed)
170 'x' in fctx.flags(), renamed)
171 else:
171 else:
172 try:
172 try:
173 fctx = ctx.filectx(f)
173 fctx = ctx.filectx(f)
174 except error.LookupError:
174 except error.LookupError:
175 raise IOError()
175 raise IOError()
176 renamed = fctx.renamed()
176 renamed = fctx.renamed()
177 if renamed:
177 if renamed:
178 renamed = renamed[0]
178 renamed = renamed[0]
179 data = fctx.data()
179 data = fctx.data()
180 if f == '.hgtags':
180 if f == '.hgtags':
181 newdata = []
181 newdata = []
182 for line in data.splitlines():
182 for line in data.splitlines():
183 id, name = line.split(' ', 1)
183 id, name = line.split(' ', 1)
184 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
184 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
185 name))
185 name))
186 data = ''.join(newdata)
186 data = ''.join(newdata)
187 return context.memfilectx(f, data, 'l' in fctx.flags(),
187 return context.memfilectx(f, data, 'l' in fctx.flags(),
188 'x' in fctx.flags(), renamed)
188 'x' in fctx.flags(), renamed)
189
189
190 dstfiles = []
190 dstfiles = []
191 for file in files:
191 for file in files:
192 if lfutil.isstandin(file):
192 if lfutil.isstandin(file):
193 dstfiles.append(lfutil.splitstandin(file))
193 dstfiles.append(lfutil.splitstandin(file))
194 else:
194 else:
195 dstfiles.append(file)
195 dstfiles.append(file)
196 # Commit
196 # Commit
197 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
197 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
198 getfilectx, ctx.user(), ctx.date(), ctx.extra())
198 getfilectx, ctx.user(), ctx.date(), ctx.extra())
199 ret = rdst.commitctx(mctx)
199 ret = rdst.commitctx(mctx)
200 rdst.dirstate.setparents(ret)
200 rdst.dirstate.setparents(ret)
201 revmap[ctx.node()] = rdst.changelog.tip()
201 revmap[ctx.node()] = rdst.changelog.tip()
202
202
203 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
203 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
204 matcher, size, lfiletohash):
204 matcher, size, lfiletohash):
205 # Convert src parents to dst parents
205 # Convert src parents to dst parents
206 parents = []
206 parents = []
207 for p in ctx.parents():
207 for p in ctx.parents():
208 parents.append(revmap[p.node()])
208 parents.append(revmap[p.node()])
209 while len(parents) < 2:
209 while len(parents) < 2:
210 parents.append(node.nullid)
210 parents.append(node.nullid)
211
211
212 # Generate list of changed files
212 # Generate list of changed files
213 files = set(ctx.files())
213 files = set(ctx.files())
214 if node.nullid not in parents:
214 if node.nullid not in parents:
215 mc = ctx.manifest()
215 mc = ctx.manifest()
216 mp1 = ctx.parents()[0].manifest()
216 mp1 = ctx.parents()[0].manifest()
217 mp2 = ctx.parents()[1].manifest()
217 mp2 = ctx.parents()[1].manifest()
218 files |= (set(mp1) | set(mp2)) - set(mc)
218 files |= (set(mp1) | set(mp2)) - set(mc)
219 for f in mc:
219 for f in mc:
220 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
220 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
221 files.add(f)
221 files.add(f)
222
222
223 dstfiles = []
223 dstfiles = []
224 for f in files:
224 for f in files:
225 if f not in lfiles and f not in normalfiles:
225 if f not in lfiles and f not in normalfiles:
226 islfile = _islfile(f, ctx, matcher, size)
226 islfile = _islfile(f, ctx, matcher, size)
227 # If this file was renamed or copied then copy
227 # If this file was renamed or copied then copy
228 # the lfileness of its predecessor
228 # the lfileness of its predecessor
229 if f in ctx.manifest():
229 if f in ctx.manifest():
230 fctx = ctx.filectx(f)
230 fctx = ctx.filectx(f)
231 renamed = fctx.renamed()
231 renamed = fctx.renamed()
232 renamedlfile = renamed and renamed[0] in lfiles
232 renamedlfile = renamed and renamed[0] in lfiles
233 islfile |= renamedlfile
233 islfile |= renamedlfile
234 if 'l' in fctx.flags():
234 if 'l' in fctx.flags():
235 if renamedlfile:
235 if renamedlfile:
236 raise util.Abort(
236 raise util.Abort(
237 _('Renamed/copied largefile %s becomes symlink')
237 _('Renamed/copied largefile %s becomes symlink')
238 % f)
238 % f)
239 islfile = False
239 islfile = False
240 if islfile:
240 if islfile:
241 lfiles.add(f)
241 lfiles.add(f)
242 else:
242 else:
243 normalfiles.add(f)
243 normalfiles.add(f)
244
244
245 if f in lfiles:
245 if f in lfiles:
246 dstfiles.append(lfutil.standin(f))
246 dstfiles.append(lfutil.standin(f))
247 # lfile in manifest if it has not been removed/renamed
247 # lfile in manifest if it has not been removed/renamed
248 if f in ctx.manifest():
248 if f in ctx.manifest():
249 if 'l' in ctx.filectx(f).flags():
249 if 'l' in ctx.filectx(f).flags():
250 if renamed and renamed[0] in lfiles:
250 if renamed and renamed[0] in lfiles:
251 raise util.Abort(_('largefile %s becomes symlink') % f)
251 raise util.Abort(_('largefile %s becomes symlink') % f)
252
252
253 # lfile was modified, update standins
253 # lfile was modified, update standins
254 fullpath = rdst.wjoin(f)
254 fullpath = rdst.wjoin(f)
255 lfutil.createdir(os.path.dirname(fullpath))
255 lfutil.createdir(os.path.dirname(fullpath))
256 m = util.sha1('')
256 m = util.sha1('')
257 m.update(ctx[f].data())
257 m.update(ctx[f].data())
258 hash = m.hexdigest()
258 hash = m.hexdigest()
259 if f not in lfiletohash or lfiletohash[f] != hash:
259 if f not in lfiletohash or lfiletohash[f] != hash:
260 try:
260 try:
261 fd = open(fullpath, 'wb')
261 fd = open(fullpath, 'wb')
262 fd.write(ctx[f].data())
262 fd.write(ctx[f].data())
263 finally:
263 finally:
264 if fd:
264 if fd:
265 fd.close()
265 fd.close()
266 executable = 'x' in ctx[f].flags()
266 executable = 'x' in ctx[f].flags()
267 os.chmod(fullpath, lfutil.getmode(executable))
267 os.chmod(fullpath, lfutil.getmode(executable))
268 lfutil.writestandin(rdst, lfutil.standin(f), hash,
268 lfutil.writestandin(rdst, lfutil.standin(f), hash,
269 executable)
269 executable)
270 lfiletohash[f] = hash
270 lfiletohash[f] = hash
271 else:
271 else:
272 # normal file
272 # normal file
273 dstfiles.append(f)
273 dstfiles.append(f)
274
274
275 def getfilectx(repo, memctx, f):
275 def getfilectx(repo, memctx, f):
276 if lfutil.isstandin(f):
276 if lfutil.isstandin(f):
277 # if the file isn't in the manifest then it was removed
277 # if the file isn't in the manifest then it was removed
278 # or renamed, raise IOError to indicate this
278 # or renamed, raise IOError to indicate this
279 srcfname = lfutil.splitstandin(f)
279 srcfname = lfutil.splitstandin(f)
280 try:
280 try:
281 fctx = ctx.filectx(srcfname)
281 fctx = ctx.filectx(srcfname)
282 except error.LookupError:
282 except error.LookupError:
283 raise IOError()
283 raise IOError()
284 renamed = fctx.renamed()
284 renamed = fctx.renamed()
285 if renamed:
285 if renamed:
286 # standin is always a lfile because lfileness
286 # standin is always a lfile because lfileness
287 # doesn't change after rename or copy
287 # doesn't change after rename or copy
288 renamed = lfutil.standin(renamed[0])
288 renamed = lfutil.standin(renamed[0])
289
289
290 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
290 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
291 fctx.flags(), 'x' in fctx.flags(), renamed)
291 fctx.flags(), 'x' in fctx.flags(), renamed)
292 else:
292 else:
293 try:
293 try:
294 fctx = ctx.filectx(f)
294 fctx = ctx.filectx(f)
295 except error.LookupError:
295 except error.LookupError:
296 raise IOError()
296 raise IOError()
297 renamed = fctx.renamed()
297 renamed = fctx.renamed()
298 if renamed:
298 if renamed:
299 renamed = renamed[0]
299 renamed = renamed[0]
300
300
301 data = fctx.data()
301 data = fctx.data()
302 if f == '.hgtags':
302 if f == '.hgtags':
303 newdata = []
303 newdata = []
304 for line in data.splitlines():
304 for line in data.splitlines():
305 id, name = line.split(' ', 1)
305 id, name = line.split(' ', 1)
306 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
306 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
307 name))
307 name))
308 data = ''.join(newdata)
308 data = ''.join(newdata)
309 return context.memfilectx(f, data, 'l' in fctx.flags(),
309 return context.memfilectx(f, data, 'l' in fctx.flags(),
310 'x' in fctx.flags(), renamed)
310 'x' in fctx.flags(), renamed)
311
311
312 # Commit
312 # Commit
313 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
313 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
314 getfilectx, ctx.user(), ctx.date(), ctx.extra())
314 getfilectx, ctx.user(), ctx.date(), ctx.extra())
315 ret = rdst.commitctx(mctx)
315 ret = rdst.commitctx(mctx)
316 rdst.dirstate.setparents(ret)
316 rdst.dirstate.setparents(ret)
317 revmap[ctx.node()] = rdst.changelog.tip()
317 revmap[ctx.node()] = rdst.changelog.tip()
318
318
319 def _islfile(file, ctx, matcher, size):
319 def _islfile(file, ctx, matcher, size):
320 '''
320 '''
321 A file is a lfile if it matches a pattern or is over
321 A file is a lfile if it matches a pattern or is over
322 the given size.
322 the given size.
323 '''
323 '''
324 # Never store hgtags or hgignore as lfiles
324 # Never store hgtags or hgignore as lfiles
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
326 return False
326 return False
327 if matcher and matcher(file):
327 if matcher and matcher(file):
328 return True
328 return True
329 try:
329 try:
330 return ctx.filectx(file).size() >= size * 1024 * 1024
330 return ctx.filectx(file).size() >= size * 1024 * 1024
331 except error.LookupError:
331 except error.LookupError:
332 return False
332 return False
333
333
334 def uploadlfiles(ui, rsrc, rdst, files):
334 def uploadlfiles(ui, rsrc, rdst, files):
335 '''upload largefiles to the central store'''
335 '''upload largefiles to the central store'''
336
336
337 # Don't upload locally. All largefiles are in the system wide cache
337 # Don't upload locally. All largefiles are in the system wide cache
338 # so the other repo can just get them from there.
338 # so the other repo can just get them from there.
339 if not files or rdst.local():
339 if not files or rdst.local():
340 return
340 return
341
341
342 store = basestore._openstore(rsrc, rdst, put=True)
342 store = basestore._openstore(rsrc, rdst, put=True)
343
343
344 at = 0
344 at = 0
345 files = filter(lambda h: not store.exists(h), files)
345 files = filter(lambda h: not store.exists(h), files)
346 for hash in files:
346 for hash in files:
347 ui.progress(_('uploading largefiles'), at, unit='largefile',
347 ui.progress(_('uploading largefiles'), at, unit='largefile',
348 total=len(files))
348 total=len(files))
349 source = lfutil.findfile(rsrc, hash)
349 source = lfutil.findfile(rsrc, hash)
350 if not source:
350 if not source:
351 raise util.Abort(_('Missing largefile %s needs to be uploaded')
351 raise util.Abort(_('Missing largefile %s needs to be uploaded')
352 % hash)
352 % hash)
353 # XXX check for errors here
353 # XXX check for errors here
354 store.put(source, hash)
354 store.put(source, hash)
355 at += 1
355 at += 1
356 ui.progress(_('uploading largefiles'), None)
356 ui.progress(_('uploading largefiles'), None)
357
357
358 def verifylfiles(ui, repo, all=False, contents=False):
358 def verifylfiles(ui, repo, all=False, contents=False):
359 '''Verify that every big file revision in the current changeset
359 '''Verify that every big file revision in the current changeset
360 exists in the central store. With --contents, also verify that
360 exists in the central store. With --contents, also verify that
361 the contents of each big file revision are correct (SHA-1 hash
361 the contents of each big file revision are correct (SHA-1 hash
362 matches the revision ID). With --all, check every changeset in
362 matches the revision ID). With --all, check every changeset in
363 this repository.'''
363 this repository.'''
364 if all:
364 if all:
365 # Pass a list to the function rather than an iterator because we know a
365 # Pass a list to the function rather than an iterator because we know a
366 # list will work.
366 # list will work.
367 revs = range(len(repo))
367 revs = range(len(repo))
368 else:
368 else:
369 revs = ['.']
369 revs = ['.']
370
370
371 store = basestore._openstore(repo)
371 store = basestore._openstore(repo)
372 return store.verify(revs, contents=contents)
372 return store.verify(revs, contents=contents)
373
373
374 def cachelfiles(ui, repo, node):
374 def cachelfiles(ui, repo, node):
375 '''cachelfiles ensures that all largefiles needed by the specified revision
375 '''cachelfiles ensures that all largefiles needed by the specified revision
376 are present in the repository's largefile cache.
376 are present in the repository's largefile cache.
377
377
378 returns a tuple (cached, missing). cached is the list of files downloaded
378 returns a tuple (cached, missing). cached is the list of files downloaded
379 by this operation; missing is the list of files that were needed but could
379 by this operation; missing is the list of files that were needed but could
380 not be found.'''
380 not be found.'''
381 lfiles = lfutil.listlfiles(repo, node)
381 lfiles = lfutil.listlfiles(repo, node)
382 toget = []
382 toget = []
383
383
384 for lfile in lfiles:
384 for lfile in lfiles:
385 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
385 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
386 # if it exists and its hash matches, it might have been locally
386 # if it exists and its hash matches, it might have been locally
387 # modified before updating and the user chose 'local'. in this case,
387 # modified before updating and the user chose 'local'. in this case,
388 # it will not be in any store, so don't look for it.
388 # it will not be in any store, so don't look for it.
389 if (not os.path.exists(repo.wjoin(lfile)) \
389 if (not os.path.exists(repo.wjoin(lfile)) \
390 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
390 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
391 not lfutil.findfile(repo, expectedhash):
391 not lfutil.findfile(repo, expectedhash):
392 toget.append((lfile, expectedhash))
392 toget.append((lfile, expectedhash))
393
393
394 if toget:
394 if toget:
395 store = basestore._openstore(repo)
395 store = basestore._openstore(repo)
396 ret = store.get(toget)
396 ret = store.get(toget)
397 return ret
397 return ret
398
398
399 return ([], [])
399 return ([], [])
400
400
401 def updatelfiles(ui, repo, filelist=None, printmessage=True):
401 def updatelfiles(ui, repo, filelist=None, printmessage=True):
402 wlock = repo.wlock()
402 wlock = repo.wlock()
403 try:
403 try:
404 lfdirstate = lfutil.openlfdirstate(ui, repo)
404 lfdirstate = lfutil.openlfdirstate(ui, repo)
405 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
405 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
406
406
407 if filelist is not None:
407 if filelist is not None:
408 lfiles = [f for f in lfiles if f in filelist]
408 lfiles = [f for f in lfiles if f in filelist]
409
409
410 printed = False
410 printed = False
411 if printmessage and lfiles:
411 if printmessage and lfiles:
412 ui.status(_('getting changed largefiles\n'))
412 ui.status(_('getting changed largefiles\n'))
413 printed = True
413 printed = True
414 cachelfiles(ui, repo, '.')
414 cachelfiles(ui, repo, '.')
415
415
416 updated, removed = 0, 0
416 updated, removed = 0, 0
417 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
417 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
418 # increment the appropriate counter according to _updatelfile's
418 # increment the appropriate counter according to _updatelfile's
419 # return value
419 # return value
420 updated += i > 0 and i or 0
420 updated += i > 0 and i or 0
421 removed -= i < 0 and i or 0
421 removed -= i < 0 and i or 0
422 if printmessage and (removed or updated) and not printed:
422 if printmessage and (removed or updated) and not printed:
423 ui.status(_('getting changed largefiles\n'))
423 ui.status(_('getting changed largefiles\n'))
424 printed = True
424 printed = True
425
425
426 lfdirstate.write()
426 lfdirstate.write()
427 if printed and printmessage:
427 if printed and printmessage:
428 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
428 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
429 removed))
429 removed))
430 finally:
430 finally:
431 wlock.release()
431 wlock.release()
432
432
433 def _updatelfile(repo, lfdirstate, lfile):
433 def _updatelfile(repo, lfdirstate, lfile):
434 '''updates a single largefile and copies the state of its standin from
434 '''updates a single largefile and copies the state of its standin from
435 the repository's dirstate to its state in the lfdirstate.
435 the repository's dirstate to its state in the lfdirstate.
436
436
437 returns 1 if the file was modified, -1 if the file was removed, 0 if the
437 returns 1 if the file was modified, -1 if the file was removed, 0 if the
438 file was unchanged, and None if the needed largefile was missing from the
438 file was unchanged, and None if the needed largefile was missing from the
439 cache.'''
439 cache.'''
440 ret = 0
440 ret = 0
441 abslfile = repo.wjoin(lfile)
441 abslfile = repo.wjoin(lfile)
442 absstandin = repo.wjoin(lfutil.standin(lfile))
442 absstandin = repo.wjoin(lfutil.standin(lfile))
443 if os.path.exists(absstandin):
443 if os.path.exists(absstandin):
444 if os.path.exists(absstandin+'.orig'):
444 if os.path.exists(absstandin+'.orig'):
445 shutil.copyfile(abslfile, abslfile+'.orig')
445 shutil.copyfile(abslfile, abslfile+'.orig')
446 expecthash = lfutil.readstandin(repo, lfile)
446 expecthash = lfutil.readstandin(repo, lfile)
447 if expecthash != '' and \
447 if expecthash != '' and \
448 (not os.path.exists(abslfile) or \
448 (not os.path.exists(abslfile) or \
449 expecthash != lfutil.hashfile(abslfile)):
449 expecthash != lfutil.hashfile(abslfile)):
450 if not lfutil.copyfromcache(repo, expecthash, lfile):
450 if not lfutil.copyfromcache(repo, expecthash, lfile):
451 return None # don't try to set the mode or update the dirstate
451 return None # don't try to set the mode or update the dirstate
452 ret = 1
452 ret = 1
453 mode = os.stat(absstandin).st_mode
453 mode = os.stat(absstandin).st_mode
454 if mode != os.stat(abslfile).st_mode:
454 if mode != os.stat(abslfile).st_mode:
455 os.chmod(abslfile, mode)
455 os.chmod(abslfile, mode)
456 ret = 1
456 ret = 1
457 else:
457 else:
458 if os.path.exists(abslfile):
458 if os.path.exists(abslfile):
459 os.unlink(abslfile)
459 os.unlink(abslfile)
460 ret = -1
460 ret = -1
461 state = repo.dirstate[lfutil.standin(lfile)]
461 state = repo.dirstate[lfutil.standin(lfile)]
462 if state == 'n':
462 if state == 'n':
463 lfdirstate.normal(lfile)
463 lfdirstate.normal(lfile)
464 elif state == 'r':
464 elif state == 'r':
465 lfdirstate.remove(lfile)
465 lfdirstate.remove(lfile)
466 elif state == 'a':
466 elif state == 'a':
467 lfdirstate.add(lfile)
467 lfdirstate.add(lfile)
468 elif state == '?':
468 elif state == '?':
469 try:
470 # Mercurial >= 1.9
471 lfdirstate.drop(lfile)
469 lfdirstate.drop(lfile)
472 except AttributeError:
473 # Mercurial <= 1.8
474 lfdirstate.forget(lfile)
475 return ret
470 return ret
476
471
477 # -- hg commands declarations ------------------------------------------------
472 # -- hg commands declarations ------------------------------------------------
478
473
479
474
480 cmdtable = {
475 cmdtable = {
481 'lfconvert': (lfconvert,
476 'lfconvert': (lfconvert,
482 [('s', 'size', 0, 'All files over this size (in megabytes) '
477 [('s', 'size', 0, 'All files over this size (in megabytes) '
483 'will be considered largefiles. This can also be specified '
478 'will be considered largefiles. This can also be specified '
484 'in your hgrc as [largefiles].size.'),
479 'in your hgrc as [largefiles].size.'),
485 ('','tonormal',False,
480 ('','tonormal',False,
486 'Convert from a largefiles repo to a normal repo')],
481 'Convert from a largefiles repo to a normal repo')],
487 _('hg lfconvert SOURCE DEST [FILE ...]')),
482 _('hg lfconvert SOURCE DEST [FILE ...]')),
488 }
483 }
@@ -1,493 +1,436 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import inspect
14 import shutil
13 import shutil
15 import stat
14 import stat
16 import hashlib
15 import hashlib
17
16
18 from mercurial import cmdutil, dirstate, httpconnection, match as match_, \
17 from mercurial import dirstate, httpconnection, match as match_, util
19 url as url_, util
20 from mercurial.i18n import _
18 from mercurial.i18n import _
21
19
22 try:
20 try:
23 from mercurial import scmutil
21 from mercurial import scmutil
24 except ImportError:
22 except ImportError:
25 pass
23 pass
26
24
27 shortname = '.hglf'
25 shortname = '.hglf'
28 longname = 'largefiles'
26 longname = 'largefiles'
29
27
30
28
31 # -- Portability wrappers ----------------------------------------------
29 # -- Portability wrappers ----------------------------------------------
32
30
33 if 'subrepos' in inspect.getargspec(dirstate.dirstate.status)[0]:
34 # for Mercurial >= 1.5
35 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
31 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
36 return dirstate.walk(matcher, [], unknown, ignored)
32 return dirstate.walk(matcher, [], unknown, ignored)
37 else:
38 # for Mercurial <= 1.4
39 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
40 return dirstate.walk(matcher, unknown, ignored)
41
33
42 def repo_add(repo, list):
34 def repo_add(repo, list):
43 try:
44 # Mercurial <= 1.5
45 add = repo.add
46 except AttributeError:
47 # Mercurial >= 1.6
48 add = repo[None].add
35 add = repo[None].add
49 return add(list)
36 return add(list)
50
37
51 def repo_remove(repo, list, unlink=False):
38 def repo_remove(repo, list, unlink=False):
52 try:
53 # Mercurial <= 1.5
54 remove = repo.remove
55 except AttributeError:
56 # Mercurial >= 1.6
57 try:
58 # Mercurial <= 1.8
59 remove = repo[None].remove
60 except AttributeError:
61 # Mercurial >= 1.9
62 def remove(list, unlink):
39 def remove(list, unlink):
63 wlock = repo.wlock()
40 wlock = repo.wlock()
64 try:
41 try:
65 if unlink:
42 if unlink:
66 for f in list:
43 for f in list:
67 try:
44 try:
68 util.unlinkpath(repo.wjoin(f))
45 util.unlinkpath(repo.wjoin(f))
69 except OSError, inst:
46 except OSError, inst:
70 if inst.errno != errno.ENOENT:
47 if inst.errno != errno.ENOENT:
71 raise
48 raise
72 repo[None].forget(list)
49 repo[None].forget(list)
73 finally:
50 finally:
74 wlock.release()
51 wlock.release()
75
76 return remove(list, unlink=unlink)
52 return remove(list, unlink=unlink)
77
53
78 def repo_forget(repo, list):
54 def repo_forget(repo, list):
79 try:
80 # Mercurial <= 1.5
81 forget = repo.forget
82 except AttributeError:
83 # Mercurial >= 1.6
84 forget = repo[None].forget
55 forget = repo[None].forget
85 return forget(list)
56 return forget(list)
86
57
87 def findoutgoing(repo, remote, force):
58 def findoutgoing(repo, remote, force):
88 # First attempt is for Mercurial <= 1.5 second is for >= 1.6
89 try:
90 return repo.findoutgoing(remote)
91 except AttributeError:
92 from mercurial import discovery
59 from mercurial import discovery
93 try:
94 # Mercurial <= 1.8
95 return discovery.findoutgoing(repo, remote, force=force)
96 except AttributeError:
97 # Mercurial >= 1.9
98 common, _anyinc, _heads = discovery.findcommonincoming(repo,
60 common, _anyinc, _heads = discovery.findcommonincoming(repo,
99 remote, force=force)
61 remote, force=force)
100 return repo.changelog.findmissing(common)
62 return repo.changelog.findmissing(common)
101
63
102 # -- Private worker functions ------------------------------------------
64 # -- Private worker functions ------------------------------------------
103
65
104 def link(src, dest):
66 def link(src, dest):
105 try:
67 try:
106 util.oslink(src, dest)
68 util.oslink(src, dest)
107 except OSError:
69 except OSError:
108 # If hardlinks fail fall back on copy
70 # If hardlinks fail fall back on copy
109 shutil.copyfile(src, dest)
71 shutil.copyfile(src, dest)
110 os.chmod(dest, os.stat(src).st_mode)
72 os.chmod(dest, os.stat(src).st_mode)
111
73
112 def systemcachepath(ui, hash):
74 def systemcachepath(ui, hash):
113 path = ui.config(longname, 'systemcache', None)
75 path = ui.config(longname, 'systemcache', None)
114 if path:
76 if path:
115 path = os.path.join(path, hash)
77 path = os.path.join(path, hash)
116 else:
78 else:
117 if os.name == 'nt':
79 if os.name == 'nt':
118 path = os.path.join(os.getenv('LOCALAPPDATA') or \
80 path = os.path.join(os.getenv('LOCALAPPDATA') or \
119 os.getenv('APPDATA'), longname, hash)
81 os.getenv('APPDATA'), longname, hash)
120 elif os.name == 'posix':
82 elif os.name == 'posix':
121 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
83 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
122 else:
84 else:
123 raise util.Abort(_('Unknown operating system: %s\n') % os.name)
85 raise util.Abort(_('Unknown operating system: %s\n') % os.name)
124 return path
86 return path
125
87
126 def insystemcache(ui, hash):
88 def insystemcache(ui, hash):
127 return os.path.exists(systemcachepath(ui, hash))
89 return os.path.exists(systemcachepath(ui, hash))
128
90
129 def findfile(repo, hash):
91 def findfile(repo, hash):
130 if incache(repo, hash):
92 if incache(repo, hash):
131 repo.ui.note(_('Found %s in cache\n') % hash)
93 repo.ui.note(_('Found %s in cache\n') % hash)
132 return cachepath(repo, hash)
94 return cachepath(repo, hash)
133 if insystemcache(repo.ui, hash):
95 if insystemcache(repo.ui, hash):
134 repo.ui.note(_('Found %s in system cache\n') % hash)
96 repo.ui.note(_('Found %s in system cache\n') % hash)
135 return systemcachepath(repo.ui, hash)
97 return systemcachepath(repo.ui, hash)
136 return None
98 return None
137
99
138 class largefiles_dirstate(dirstate.dirstate):
100 class largefiles_dirstate(dirstate.dirstate):
139 def __getitem__(self, key):
101 def __getitem__(self, key):
140 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
102 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
141 def normal(self, f):
103 def normal(self, f):
142 return super(largefiles_dirstate, self).normal(unixpath(f))
104 return super(largefiles_dirstate, self).normal(unixpath(f))
143 def remove(self, f):
105 def remove(self, f):
144 return super(largefiles_dirstate, self).remove(unixpath(f))
106 return super(largefiles_dirstate, self).remove(unixpath(f))
145 def add(self, f):
107 def add(self, f):
146 return super(largefiles_dirstate, self).add(unixpath(f))
108 return super(largefiles_dirstate, self).add(unixpath(f))
147 def drop(self, f):
109 def drop(self, f):
148 return super(largefiles_dirstate, self).drop(unixpath(f))
110 return super(largefiles_dirstate, self).drop(unixpath(f))
149 def forget(self, f):
111 def forget(self, f):
150 return super(largefiles_dirstate, self).forget(unixpath(f))
112 return super(largefiles_dirstate, self).forget(unixpath(f))
151
113
152 def openlfdirstate(ui, repo):
114 def openlfdirstate(ui, repo):
153 '''
115 '''
154 Return a dirstate object that tracks big files: i.e. its root is the
116 Return a dirstate object that tracks big files: i.e. its root is the
155 repo root, but it is saved in .hg/largefiles/dirstate.
117 repo root, but it is saved in .hg/largefiles/dirstate.
156 '''
118 '''
157 admin = repo.join(longname)
119 admin = repo.join(longname)
158 try:
159 # Mercurial >= 1.9
160 opener = scmutil.opener(admin)
120 opener = scmutil.opener(admin)
161 except ImportError:
162 # Mercurial <= 1.8
163 opener = util.opener(admin)
164 if util.safehasattr(repo.dirstate, '_validate'):
121 if util.safehasattr(repo.dirstate, '_validate'):
165 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
122 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
166 repo.dirstate._validate)
123 repo.dirstate._validate)
167 else:
124 else:
168 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
125 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
169
126
170 # If the largefiles dirstate does not exist, populate and create it. This
127 # If the largefiles dirstate does not exist, populate and create it. This
171 # ensures that we create it on the first meaningful largefiles operation in
128 # ensures that we create it on the first meaningful largefiles operation in
172 # a new clone. It also gives us an easy way to forcibly rebuild largefiles
129 # a new clone. It also gives us an easy way to forcibly rebuild largefiles
173 # state:
130 # state:
174 # rm .hg/largefiles/dirstate && hg status
131 # rm .hg/largefiles/dirstate && hg status
175 # Or even, if things are really messed up:
132 # Or even, if things are really messed up:
176 # rm -rf .hg/largefiles && hg status
133 # rm -rf .hg/largefiles && hg status
177 if not os.path.exists(os.path.join(admin, 'dirstate')):
134 if not os.path.exists(os.path.join(admin, 'dirstate')):
178 util.makedirs(admin)
135 util.makedirs(admin)
179 matcher = getstandinmatcher(repo)
136 matcher = getstandinmatcher(repo)
180 for standin in dirstate_walk(repo.dirstate, matcher):
137 for standin in dirstate_walk(repo.dirstate, matcher):
181 lfile = splitstandin(standin)
138 lfile = splitstandin(standin)
182 hash = readstandin(repo, lfile)
139 hash = readstandin(repo, lfile)
183 lfdirstate.normallookup(lfile)
140 lfdirstate.normallookup(lfile)
184 try:
141 try:
185 if hash == hashfile(lfile):
142 if hash == hashfile(lfile):
186 lfdirstate.normal(lfile)
143 lfdirstate.normal(lfile)
187 except IOError, err:
144 except IOError, err:
188 if err.errno != errno.ENOENT:
145 if err.errno != errno.ENOENT:
189 raise
146 raise
190
147
191 lfdirstate.write()
148 lfdirstate.write()
192
149
193 return lfdirstate
150 return lfdirstate
194
151
195 def lfdirstate_status(lfdirstate, repo, rev):
152 def lfdirstate_status(lfdirstate, repo, rev):
196 wlock = repo.wlock()
153 wlock = repo.wlock()
197 try:
154 try:
198 match = match_.always(repo.root, repo.getcwd())
155 match = match_.always(repo.root, repo.getcwd())
199 s = lfdirstate.status(match, [], False, False, False)
156 s = lfdirstate.status(match, [], False, False, False)
200 unsure, modified, added, removed, missing, unknown, ignored, clean = s
157 unsure, modified, added, removed, missing, unknown, ignored, clean = s
201 for lfile in unsure:
158 for lfile in unsure:
202 if repo[rev][standin(lfile)].data().strip() != \
159 if repo[rev][standin(lfile)].data().strip() != \
203 hashfile(repo.wjoin(lfile)):
160 hashfile(repo.wjoin(lfile)):
204 modified.append(lfile)
161 modified.append(lfile)
205 else:
162 else:
206 clean.append(lfile)
163 clean.append(lfile)
207 lfdirstate.normal(lfile)
164 lfdirstate.normal(lfile)
208 lfdirstate.write()
165 lfdirstate.write()
209 finally:
166 finally:
210 wlock.release()
167 wlock.release()
211 return (modified, added, removed, missing, unknown, ignored, clean)
168 return (modified, added, removed, missing, unknown, ignored, clean)
212
169
213 def listlfiles(repo, rev=None, matcher=None):
170 def listlfiles(repo, rev=None, matcher=None):
214 '''list largefiles in the working copy or specified changeset'''
171 '''list largefiles in the working copy or specified changeset'''
215
172
216 if matcher is None:
173 if matcher is None:
217 matcher = getstandinmatcher(repo)
174 matcher = getstandinmatcher(repo)
218
175
219 # ignore unknown files in working directory
176 # ignore unknown files in working directory
220 return [splitstandin(f) for f in repo[rev].walk(matcher) \
177 return [splitstandin(f) for f in repo[rev].walk(matcher) \
221 if rev is not None or repo.dirstate[f] != '?']
178 if rev is not None or repo.dirstate[f] != '?']
222
179
223 def incache(repo, hash):
180 def incache(repo, hash):
224 return os.path.exists(cachepath(repo, hash))
181 return os.path.exists(cachepath(repo, hash))
225
182
226 def createdir(dir):
183 def createdir(dir):
227 if not os.path.exists(dir):
184 if not os.path.exists(dir):
228 os.makedirs(dir)
185 os.makedirs(dir)
229
186
230 def cachepath(repo, hash):
187 def cachepath(repo, hash):
231 return repo.join(os.path.join(longname, hash))
188 return repo.join(os.path.join(longname, hash))
232
189
233 def copyfromcache(repo, hash, filename):
190 def copyfromcache(repo, hash, filename):
234 '''copyfromcache copies the specified largefile from the repo or system
191 '''copyfromcache copies the specified largefile from the repo or system
235 cache to the specified location in the repository. It will not throw an
192 cache to the specified location in the repository. It will not throw an
236 exception on failure, as it is meant to be called only after ensuring that
193 exception on failure, as it is meant to be called only after ensuring that
237 the needed largefile exists in the cache.'''
194 the needed largefile exists in the cache.'''
238 path = findfile(repo, hash)
195 path = findfile(repo, hash)
239 if path is None:
196 if path is None:
240 return False
197 return False
241 util.makedirs(os.path.dirname(repo.wjoin(filename)))
198 util.makedirs(os.path.dirname(repo.wjoin(filename)))
242 shutil.copy(path, repo.wjoin(filename))
199 shutil.copy(path, repo.wjoin(filename))
243 return True
200 return True
244
201
245 def copytocache(repo, rev, file, uploaded=False):
202 def copytocache(repo, rev, file, uploaded=False):
246 hash = readstandin(repo, file)
203 hash = readstandin(repo, file)
247 if incache(repo, hash):
204 if incache(repo, hash):
248 return
205 return
249 copytocacheabsolute(repo, repo.wjoin(file), hash)
206 copytocacheabsolute(repo, repo.wjoin(file), hash)
250
207
251 def copytocacheabsolute(repo, file, hash):
208 def copytocacheabsolute(repo, file, hash):
252 createdir(os.path.dirname(cachepath(repo, hash)))
209 createdir(os.path.dirname(cachepath(repo, hash)))
253 if insystemcache(repo.ui, hash):
210 if insystemcache(repo.ui, hash):
254 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
211 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
255 else:
212 else:
256 shutil.copyfile(file, cachepath(repo, hash))
213 shutil.copyfile(file, cachepath(repo, hash))
257 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
214 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
258 linktosystemcache(repo, hash)
215 linktosystemcache(repo, hash)
259
216
260 def linktosystemcache(repo, hash):
217 def linktosystemcache(repo, hash):
261 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
218 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
262 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
219 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
263
220
264 def getstandinmatcher(repo, pats=[], opts={}):
221 def getstandinmatcher(repo, pats=[], opts={}):
265 '''Return a match object that applies pats to the standin directory'''
222 '''Return a match object that applies pats to the standin directory'''
266 standindir = repo.pathto(shortname)
223 standindir = repo.pathto(shortname)
267 if pats:
224 if pats:
268 # patterns supplied: search standin directory relative to current dir
225 # patterns supplied: search standin directory relative to current dir
269 cwd = repo.getcwd()
226 cwd = repo.getcwd()
270 if os.path.isabs(cwd):
227 if os.path.isabs(cwd):
271 # cwd is an absolute path for hg -R <reponame>
228 # cwd is an absolute path for hg -R <reponame>
272 # work relative to the repository root in this case
229 # work relative to the repository root in this case
273 cwd = ''
230 cwd = ''
274 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
231 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
275 elif os.path.isdir(standindir):
232 elif os.path.isdir(standindir):
276 # no patterns: relative to repo root
233 # no patterns: relative to repo root
277 pats = [standindir]
234 pats = [standindir]
278 else:
235 else:
279 # no patterns and no standin dir: return matcher that matches nothing
236 # no patterns and no standin dir: return matcher that matches nothing
280 match = match_.match(repo.root, None, [], exact=True)
237 match = match_.match(repo.root, None, [], exact=True)
281 match.matchfn = lambda f: False
238 match.matchfn = lambda f: False
282 return match
239 return match
283 return getmatcher(repo, pats, opts, showbad=False)
240 return getmatcher(repo, pats, opts, showbad=False)
284
241
285 def getmatcher(repo, pats=[], opts={}, showbad=True):
242 def getmatcher(repo, pats=[], opts={}, showbad=True):
286 '''Wrapper around scmutil.match() that adds showbad: if false, neuter
243 '''Wrapper around scmutil.match() that adds showbad: if false, neuter
287 the match object\'s bad() method so it does not print any warnings
244 the match object\'s bad() method so it does not print any warnings
288 about missing files or directories.'''
245 about missing files or directories.'''
289 try:
290 # Mercurial >= 1.9
291 match = scmutil.match(repo[None], pats, opts)
246 match = scmutil.match(repo[None], pats, opts)
292 except ImportError:
293 # Mercurial <= 1.8
294 match = cmdutil.match(repo, pats, opts)
295
247
296 if not showbad:
248 if not showbad:
297 match.bad = lambda f, msg: None
249 match.bad = lambda f, msg: None
298 return match
250 return match
299
251
300 def composestandinmatcher(repo, rmatcher):
252 def composestandinmatcher(repo, rmatcher):
301 '''Return a matcher that accepts standins corresponding to the files
253 '''Return a matcher that accepts standins corresponding to the files
302 accepted by rmatcher. Pass the list of files in the matcher as the
254 accepted by rmatcher. Pass the list of files in the matcher as the
303 paths specified by the user.'''
255 paths specified by the user.'''
304 smatcher = getstandinmatcher(repo, rmatcher.files())
256 smatcher = getstandinmatcher(repo, rmatcher.files())
305 isstandin = smatcher.matchfn
257 isstandin = smatcher.matchfn
306 def composed_matchfn(f):
258 def composed_matchfn(f):
307 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
259 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
308 smatcher.matchfn = composed_matchfn
260 smatcher.matchfn = composed_matchfn
309
261
310 return smatcher
262 return smatcher
311
263
312 def standin(filename):
264 def standin(filename):
313 '''Return the repo-relative path to the standin for the specified big
265 '''Return the repo-relative path to the standin for the specified big
314 file.'''
266 file.'''
315 # Notes:
267 # Notes:
316 # 1) Most callers want an absolute path, but _create_standin() needs
268 # 1) Most callers want an absolute path, but _create_standin() needs
317 # it repo-relative so lfadd() can pass it to repo_add(). So leave
269 # it repo-relative so lfadd() can pass it to repo_add(). So leave
318 # it up to the caller to use repo.wjoin() to get an absolute path.
270 # it up to the caller to use repo.wjoin() to get an absolute path.
319 # 2) Join with '/' because that's what dirstate always uses, even on
271 # 2) Join with '/' because that's what dirstate always uses, even on
320 # Windows. Change existing separator to '/' first in case we are
272 # Windows. Change existing separator to '/' first in case we are
321 # passed filenames from an external source (like the command line).
273 # passed filenames from an external source (like the command line).
322 return shortname + '/' + filename.replace(os.sep, '/')
274 return shortname + '/' + filename.replace(os.sep, '/')
323
275
324 def isstandin(filename):
276 def isstandin(filename):
325 '''Return true if filename is a big file standin. filename must
277 '''Return true if filename is a big file standin. filename must
326 be in Mercurial\'s internal form (slash-separated).'''
278 be in Mercurial\'s internal form (slash-separated).'''
327 return filename.startswith(shortname + '/')
279 return filename.startswith(shortname + '/')
328
280
329 def splitstandin(filename):
281 def splitstandin(filename):
330 # Split on / because that's what dirstate always uses, even on Windows.
282 # Split on / because that's what dirstate always uses, even on Windows.
331 # Change local separator to / first just in case we are passed filenames
283 # Change local separator to / first just in case we are passed filenames
332 # from an external source (like the command line).
284 # from an external source (like the command line).
333 bits = filename.replace(os.sep, '/').split('/', 1)
285 bits = filename.replace(os.sep, '/').split('/', 1)
334 if len(bits) == 2 and bits[0] == shortname:
286 if len(bits) == 2 and bits[0] == shortname:
335 return bits[1]
287 return bits[1]
336 else:
288 else:
337 return None
289 return None
338
290
339 def updatestandin(repo, standin):
291 def updatestandin(repo, standin):
340 file = repo.wjoin(splitstandin(standin))
292 file = repo.wjoin(splitstandin(standin))
341 if os.path.exists(file):
293 if os.path.exists(file):
342 hash = hashfile(file)
294 hash = hashfile(file)
343 executable = getexecutable(file)
295 executable = getexecutable(file)
344 writestandin(repo, standin, hash, executable)
296 writestandin(repo, standin, hash, executable)
345
297
346 def readstandin(repo, filename, node=None):
298 def readstandin(repo, filename, node=None):
347 '''read hex hash from standin for filename at given node, or working
299 '''read hex hash from standin for filename at given node, or working
348 directory if no node is given'''
300 directory if no node is given'''
349 return repo[node][standin(filename)].data().strip()
301 return repo[node][standin(filename)].data().strip()
350
302
351 def writestandin(repo, standin, hash, executable):
303 def writestandin(repo, standin, hash, executable):
352 '''write hhash to <repo.root>/<standin>'''
304 '''write hhash to <repo.root>/<standin>'''
353 writehash(hash, repo.wjoin(standin), executable)
305 writehash(hash, repo.wjoin(standin), executable)
354
306
355 def copyandhash(instream, outfile):
307 def copyandhash(instream, outfile):
356 '''Read bytes from instream (iterable) and write them to outfile,
308 '''Read bytes from instream (iterable) and write them to outfile,
357 computing the SHA-1 hash of the data along the way. Close outfile
309 computing the SHA-1 hash of the data along the way. Close outfile
358 when done and return the binary hash.'''
310 when done and return the binary hash.'''
359 hasher = util.sha1('')
311 hasher = util.sha1('')
360 for data in instream:
312 for data in instream:
361 hasher.update(data)
313 hasher.update(data)
362 outfile.write(data)
314 outfile.write(data)
363
315
364 # Blecch: closing a file that somebody else opened is rude and
316 # Blecch: closing a file that somebody else opened is rude and
365 # wrong. But it's so darn convenient and practical! After all,
317 # wrong. But it's so darn convenient and practical! After all,
366 # outfile was opened just to copy and hash.
318 # outfile was opened just to copy and hash.
367 outfile.close()
319 outfile.close()
368
320
369 return hasher.digest()
321 return hasher.digest()
370
322
371 def hashrepofile(repo, file):
323 def hashrepofile(repo, file):
372 return hashfile(repo.wjoin(file))
324 return hashfile(repo.wjoin(file))
373
325
374 def hashfile(file):
326 def hashfile(file):
375 if not os.path.exists(file):
327 if not os.path.exists(file):
376 return ''
328 return ''
377 hasher = util.sha1('')
329 hasher = util.sha1('')
378 fd = open(file, 'rb')
330 fd = open(file, 'rb')
379 for data in blockstream(fd):
331 for data in blockstream(fd):
380 hasher.update(data)
332 hasher.update(data)
381 fd.close()
333 fd.close()
382 return hasher.hexdigest()
334 return hasher.hexdigest()
383
335
384 class limitreader(object):
336 class limitreader(object):
385 def __init__(self, f, limit):
337 def __init__(self, f, limit):
386 self.f = f
338 self.f = f
387 self.limit = limit
339 self.limit = limit
388
340
389 def read(self, length):
341 def read(self, length):
390 if self.limit == 0:
342 if self.limit == 0:
391 return ''
343 return ''
392 length = length > self.limit and self.limit or length
344 length = length > self.limit and self.limit or length
393 self.limit -= length
345 self.limit -= length
394 return self.f.read(length)
346 return self.f.read(length)
395
347
396 def close(self):
348 def close(self):
397 pass
349 pass
398
350
399 def blockstream(infile, blocksize=128 * 1024):
351 def blockstream(infile, blocksize=128 * 1024):
400 """Generator that yields blocks of data from infile and closes infile."""
352 """Generator that yields blocks of data from infile and closes infile."""
401 while True:
353 while True:
402 data = infile.read(blocksize)
354 data = infile.read(blocksize)
403 if not data:
355 if not data:
404 break
356 break
405 yield data
357 yield data
406 # Same blecch as above.
358 # Same blecch as above.
407 infile.close()
359 infile.close()
408
360
409 def readhash(filename):
361 def readhash(filename):
410 rfile = open(filename, 'rb')
362 rfile = open(filename, 'rb')
411 hash = rfile.read(40)
363 hash = rfile.read(40)
412 rfile.close()
364 rfile.close()
413 if len(hash) < 40:
365 if len(hash) < 40:
414 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
366 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
415 % (filename, len(hash)))
367 % (filename, len(hash)))
416 return hash
368 return hash
417
369
418 def writehash(hash, filename, executable):
370 def writehash(hash, filename, executable):
419 util.makedirs(os.path.dirname(filename))
371 util.makedirs(os.path.dirname(filename))
420 if os.path.exists(filename):
372 if os.path.exists(filename):
421 os.unlink(filename)
373 os.unlink(filename)
422 wfile = open(filename, 'wb')
374 wfile = open(filename, 'wb')
423
375
424 try:
376 try:
425 wfile.write(hash)
377 wfile.write(hash)
426 wfile.write('\n')
378 wfile.write('\n')
427 finally:
379 finally:
428 wfile.close()
380 wfile.close()
429 if os.path.exists(filename):
381 if os.path.exists(filename):
430 os.chmod(filename, getmode(executable))
382 os.chmod(filename, getmode(executable))
431
383
432 def getexecutable(filename):
384 def getexecutable(filename):
433 mode = os.stat(filename).st_mode
385 mode = os.stat(filename).st_mode
434 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
386 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
435 stat.S_IXOTH)
387 stat.S_IXOTH)
436
388
437 def getmode(executable):
389 def getmode(executable):
438 if executable:
390 if executable:
439 return 0755
391 return 0755
440 else:
392 else:
441 return 0644
393 return 0644
442
394
443 def urljoin(first, second, *arg):
395 def urljoin(first, second, *arg):
444 def join(left, right):
396 def join(left, right):
445 if not left.endswith('/'):
397 if not left.endswith('/'):
446 left += '/'
398 left += '/'
447 if right.startswith('/'):
399 if right.startswith('/'):
448 right = right[1:]
400 right = right[1:]
449 return left + right
401 return left + right
450
402
451 url = join(first, second)
403 url = join(first, second)
452 for a in arg:
404 for a in arg:
453 url = join(url, a)
405 url = join(url, a)
454 return url
406 return url
455
407
456 def hexsha1(data):
408 def hexsha1(data):
457 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
409 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
458 object data"""
410 object data"""
459 h = hashlib.sha1()
411 h = hashlib.sha1()
460 for chunk in util.filechunkiter(data):
412 for chunk in util.filechunkiter(data):
461 h.update(chunk)
413 h.update(chunk)
462 return h.hexdigest()
414 return h.hexdigest()
463
415
464 def httpsendfile(ui, filename):
416 def httpsendfile(ui, filename):
465 try:
466 # Mercurial >= 1.9
467 return httpconnection.httpsendfile(ui, filename, 'rb')
417 return httpconnection.httpsendfile(ui, filename, 'rb')
468 except ImportError:
469 if 'ui' in inspect.getargspec(url_.httpsendfile.__init__)[0]:
470 # Mercurial == 1.8
471 return url_.httpsendfile(ui, filename, 'rb')
472 else:
473 # Mercurial <= 1.7
474 return url_.httpsendfile(filename, 'rb')
475
418
476 # Convert a path to a unix style path. This is used to give a
419 # Convert a path to a unix style path. This is used to give a
477 # canonical path to the lfdirstate.
420 # canonical path to the lfdirstate.
478 def unixpath(path):
421 def unixpath(path):
479 return os.path.normpath(path).replace(os.sep, '/')
422 return os.path.normpath(path).replace(os.sep, '/')
480
423
481 def islfilesrepo(repo):
424 def islfilesrepo(repo):
482 return ('largefiles' in repo.requirements and
425 return ('largefiles' in repo.requirements and
483 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
426 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
484
427
485 def any_(gen):
428 def any_(gen):
486 for x in gen:
429 for x in gen:
487 if x:
430 if x:
488 return True
431 return True
489 return False
432 return False
490
433
491 class storeprotonotcapable(BaseException):
434 class storeprotonotcapable(BaseException):
492 def __init__(self, storetypes):
435 def __init__(self, storetypes):
493 self.storetypes = storetypes
436 self.storetypes = storetypes
@@ -1,904 +1,830 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
15 archival, error, merge
15 archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 try:
20 try:
21 from mercurial import scmutil
21 from mercurial import scmutil
22 except ImportError:
22 except ImportError:
23 pass
23 pass
24
24
25 import lfutil
25 import lfutil
26 import lfcommands
26 import lfcommands
27
27
28 def installnormalfilesmatchfn(manifest):
28 def installnormalfilesmatchfn(manifest):
29 '''overrides scmutil.match so that the matcher it returns will ignore all
29 '''overrides scmutil.match so that the matcher it returns will ignore all
30 largefiles'''
30 largefiles'''
31 oldmatch = None # for the closure
31 oldmatch = None # for the closure
32 def override_match(repo, pats=[], opts={}, globbed=False,
32 def override_match(repo, pats=[], opts={}, globbed=False,
33 default='relpath'):
33 default='relpath'):
34 match = oldmatch(repo, pats, opts, globbed, default)
34 match = oldmatch(repo, pats, opts, globbed, default)
35 m = copy.copy(match)
35 m = copy.copy(match)
36 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
36 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
37 manifest)
37 manifest)
38 m._files = filter(notlfile, m._files)
38 m._files = filter(notlfile, m._files)
39 m._fmap = set(m._files)
39 m._fmap = set(m._files)
40 orig_matchfn = m.matchfn
40 orig_matchfn = m.matchfn
41 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
41 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
42 return m
42 return m
43 oldmatch = installmatchfn(override_match)
43 oldmatch = installmatchfn(override_match)
44
44
45 def installmatchfn(f):
45 def installmatchfn(f):
46 try:
47 # Mercurial >= 1.9
48 oldmatch = scmutil.match
46 oldmatch = scmutil.match
49 except ImportError:
50 # Mercurial <= 1.8
51 oldmatch = cmdutil.match
52 setattr(f, 'oldmatch', oldmatch)
47 setattr(f, 'oldmatch', oldmatch)
53 try:
54 # Mercurial >= 1.9
55 scmutil.match = f
48 scmutil.match = f
56 except ImportError:
57 # Mercurial <= 1.8
58 cmdutil.match = f
59 return oldmatch
49 return oldmatch
60
50
61 def restorematchfn():
51 def restorematchfn():
62 '''restores scmutil.match to what it was before installnormalfilesmatchfn
52 '''restores scmutil.match to what it was before installnormalfilesmatchfn
63 was called. no-op if scmutil.match is its original function.
53 was called. no-op if scmutil.match is its original function.
64
54
65 Note that n calls to installnormalfilesmatchfn will require n calls to
55 Note that n calls to installnormalfilesmatchfn will require n calls to
66 restore matchfn to reverse'''
56 restore matchfn to reverse'''
67 try:
68 # Mercurial >= 1.9
69 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
57 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
70 except ImportError:
71 # Mercurial <= 1.8
72 cmdutil.match = getattr(cmdutil.match, 'oldmatch', cmdutil.match)
73
58
74 # -- Wrappers: modify existing commands --------------------------------
59 # -- Wrappers: modify existing commands --------------------------------
75
60
76 # Add works by going through the files that the user wanted to add
61 # Add works by going through the files that the user wanted to add
77 # and checking if they should be added as lfiles. Then making a new
62 # and checking if they should be added as lfiles. Then making a new
78 # matcher which matches only the normal files and running the original
63 # matcher which matches only the normal files and running the original
79 # version of add.
64 # version of add.
80 def override_add(orig, ui, repo, *pats, **opts):
65 def override_add(orig, ui, repo, *pats, **opts):
81 large = opts.pop('large', None)
66 large = opts.pop('large', None)
82
67
83 lfsize = opts.pop('lfsize', None)
68 lfsize = opts.pop('lfsize', None)
84 if not lfsize and lfutil.islfilesrepo(repo):
69 if not lfsize and lfutil.islfilesrepo(repo):
85 lfsize = ui.config(lfutil.longname, 'size', default='10')
70 lfsize = ui.config(lfutil.longname, 'size', default='10')
86 if lfsize:
71 if lfsize:
87 try:
72 try:
88 lfsize = int(lfsize)
73 lfsize = int(lfsize)
89 except ValueError:
74 except ValueError:
90 raise util.Abort(_('largefiles: size must be an integer, was %s\n')
75 raise util.Abort(_('largefiles: size must be an integer, was %s\n')
91 % lfsize)
76 % lfsize)
92
77
93 lfmatcher = None
78 lfmatcher = None
94 if os.path.exists(repo.wjoin(lfutil.shortname)):
79 if os.path.exists(repo.wjoin(lfutil.shortname)):
95 lfpats = ui.config(lfutil.longname, 'patterns', default=())
80 lfpats = ui.config(lfutil.longname, 'patterns', default=())
96 if lfpats:
81 if lfpats:
97 lfpats = lfpats.split(' ')
82 lfpats = lfpats.split(' ')
98 lfmatcher = match_.match(repo.root, '', list(lfpats))
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
99
84
100 lfnames = []
85 lfnames = []
101 try:
102 # Mercurial >= 1.9
103 m = scmutil.match(repo[None], pats, opts)
86 m = scmutil.match(repo[None], pats, opts)
104 except ImportError:
105 # Mercurial <= 1.8
106 m = cmdutil.match(repo, pats, opts)
107 m.bad = lambda x, y: None
87 m.bad = lambda x, y: None
108 wctx = repo[None]
88 wctx = repo[None]
109 for f in repo.walk(m):
89 for f in repo.walk(m):
110 exact = m.exact(f)
90 exact = m.exact(f)
111 lfile = lfutil.standin(f) in wctx
91 lfile = lfutil.standin(f) in wctx
112 nfile = f in wctx
92 nfile = f in wctx
113 exists = lfile or nfile
93 exists = lfile or nfile
114
94
115 # Don't warn the user when they attempt to add a normal tracked file.
95 # Don't warn the user when they attempt to add a normal tracked file.
116 # The normal add code will do that for us.
96 # The normal add code will do that for us.
117 if exact and exists:
97 if exact and exists:
118 if lfile:
98 if lfile:
119 ui.warn(_('%s already a largefile\n') % f)
99 ui.warn(_('%s already a largefile\n') % f)
120 continue
100 continue
121
101
122 if exact or not exists:
102 if exact or not exists:
123 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
103 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
124 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
104 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
125 lfnames.append(f)
105 lfnames.append(f)
126 if ui.verbose or not exact:
106 if ui.verbose or not exact:
127 ui.status(_('adding %s as a largefile\n') % m.rel(f))
107 ui.status(_('adding %s as a largefile\n') % m.rel(f))
128
108
129 bad = []
109 bad = []
130 standins = []
110 standins = []
131
111
132 # Need to lock otherwise there could be a race condition inbetween when
112 # Need to lock otherwise there could be a race condition inbetween when
133 # standins are created and added to the repo
113 # standins are created and added to the repo
134 wlock = repo.wlock()
114 wlock = repo.wlock()
135 try:
115 try:
136 if not opts.get('dry_run'):
116 if not opts.get('dry_run'):
137 lfdirstate = lfutil.openlfdirstate(ui, repo)
117 lfdirstate = lfutil.openlfdirstate(ui, repo)
138 for f in lfnames:
118 for f in lfnames:
139 standinname = lfutil.standin(f)
119 standinname = lfutil.standin(f)
140 lfutil.writestandin(repo, standinname, hash='',
120 lfutil.writestandin(repo, standinname, hash='',
141 executable=lfutil.getexecutable(repo.wjoin(f)))
121 executable=lfutil.getexecutable(repo.wjoin(f)))
142 standins.append(standinname)
122 standins.append(standinname)
143 if lfdirstate[f] == 'r':
123 if lfdirstate[f] == 'r':
144 lfdirstate.normallookup(f)
124 lfdirstate.normallookup(f)
145 else:
125 else:
146 lfdirstate.add(f)
126 lfdirstate.add(f)
147 lfdirstate.write()
127 lfdirstate.write()
148 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
128 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
149 standins) if f in m.files()]
129 standins) if f in m.files()]
150 finally:
130 finally:
151 wlock.release()
131 wlock.release()
152
132
153 installnormalfilesmatchfn(repo[None].manifest())
133 installnormalfilesmatchfn(repo[None].manifest())
154 result = orig(ui, repo, *pats, **opts)
134 result = orig(ui, repo, *pats, **opts)
155 restorematchfn()
135 restorematchfn()
156
136
157 return (result == 1 or bad) and 1 or 0
137 return (result == 1 or bad) and 1 or 0
158
138
159 def override_remove(orig, ui, repo, *pats, **opts):
139 def override_remove(orig, ui, repo, *pats, **opts):
160 manifest = repo[None].manifest()
140 manifest = repo[None].manifest()
161 installnormalfilesmatchfn(manifest)
141 installnormalfilesmatchfn(manifest)
162 orig(ui, repo, *pats, **opts)
142 orig(ui, repo, *pats, **opts)
163 restorematchfn()
143 restorematchfn()
164
144
165 after, force = opts.get('after'), opts.get('force')
145 after, force = opts.get('after'), opts.get('force')
166 if not pats and not after:
146 if not pats and not after:
167 raise util.Abort(_('no files specified'))
147 raise util.Abort(_('no files specified'))
168 try:
169 # Mercurial >= 1.9
170 m = scmutil.match(repo[None], pats, opts)
148 m = scmutil.match(repo[None], pats, opts)
171 except ImportError:
172 # Mercurial <= 1.8
173 m = cmdutil.match(repo, pats, opts)
174 try:
149 try:
175 repo.lfstatus = True
150 repo.lfstatus = True
176 s = repo.status(match=m, clean=True)
151 s = repo.status(match=m, clean=True)
177 finally:
152 finally:
178 repo.lfstatus = False
153 repo.lfstatus = False
179 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
154 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
180 in manifest] for list in [s[0], s[1], s[3], s[6]]]
155 in manifest] for list in [s[0], s[1], s[3], s[6]]]
181
156
182 def warn(files, reason):
157 def warn(files, reason):
183 for f in files:
158 for f in files:
184 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
159 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
185 % (m.rel(f), reason))
160 % (m.rel(f), reason))
186
161
187 if force:
162 if force:
188 remove, forget = modified + deleted + clean, added
163 remove, forget = modified + deleted + clean, added
189 elif after:
164 elif after:
190 remove, forget = deleted, []
165 remove, forget = deleted, []
191 warn(modified + added + clean, _('still exists'))
166 warn(modified + added + clean, _('still exists'))
192 else:
167 else:
193 remove, forget = deleted + clean, []
168 remove, forget = deleted + clean, []
194 warn(modified, _('is modified'))
169 warn(modified, _('is modified'))
195 warn(added, _('has been marked for add'))
170 warn(added, _('has been marked for add'))
196
171
197 for f in sorted(remove + forget):
172 for f in sorted(remove + forget):
198 if ui.verbose or not m.exact(f):
173 if ui.verbose or not m.exact(f):
199 ui.status(_('removing %s\n') % m.rel(f))
174 ui.status(_('removing %s\n') % m.rel(f))
200
175
201 # Need to lock because standin files are deleted then removed from the
176 # Need to lock because standin files are deleted then removed from the
202 # repository and we could race inbetween.
177 # repository and we could race inbetween.
203 wlock = repo.wlock()
178 wlock = repo.wlock()
204 try:
179 try:
205 lfdirstate = lfutil.openlfdirstate(ui, repo)
180 lfdirstate = lfutil.openlfdirstate(ui, repo)
206 for f in remove:
181 for f in remove:
207 if not after:
182 if not after:
208 os.unlink(repo.wjoin(f))
183 os.unlink(repo.wjoin(f))
209 currentdir = os.path.split(f)[0]
184 currentdir = os.path.split(f)[0]
210 while currentdir and not os.listdir(repo.wjoin(currentdir)):
185 while currentdir and not os.listdir(repo.wjoin(currentdir)):
211 os.rmdir(repo.wjoin(currentdir))
186 os.rmdir(repo.wjoin(currentdir))
212 currentdir = os.path.split(currentdir)[0]
187 currentdir = os.path.split(currentdir)[0]
213 lfdirstate.remove(f)
188 lfdirstate.remove(f)
214 lfdirstate.write()
189 lfdirstate.write()
215
190
216 forget = [lfutil.standin(f) for f in forget]
191 forget = [lfutil.standin(f) for f in forget]
217 remove = [lfutil.standin(f) for f in remove]
192 remove = [lfutil.standin(f) for f in remove]
218 lfutil.repo_forget(repo, forget)
193 lfutil.repo_forget(repo, forget)
219 lfutil.repo_remove(repo, remove, unlink=True)
194 lfutil.repo_remove(repo, remove, unlink=True)
220 finally:
195 finally:
221 wlock.release()
196 wlock.release()
222
197
223 def override_status(orig, ui, repo, *pats, **opts):
198 def override_status(orig, ui, repo, *pats, **opts):
224 try:
199 try:
225 repo.lfstatus = True
200 repo.lfstatus = True
226 return orig(ui, repo, *pats, **opts)
201 return orig(ui, repo, *pats, **opts)
227 finally:
202 finally:
228 repo.lfstatus = False
203 repo.lfstatus = False
229
204
230 def override_log(orig, ui, repo, *pats, **opts):
205 def override_log(orig, ui, repo, *pats, **opts):
231 try:
206 try:
232 repo.lfstatus = True
207 repo.lfstatus = True
233 orig(ui, repo, *pats, **opts)
208 orig(ui, repo, *pats, **opts)
234 finally:
209 finally:
235 repo.lfstatus = False
210 repo.lfstatus = False
236
211
237 def override_verify(orig, ui, repo, *pats, **opts):
212 def override_verify(orig, ui, repo, *pats, **opts):
238 large = opts.pop('large', False)
213 large = opts.pop('large', False)
239 all = opts.pop('lfa', False)
214 all = opts.pop('lfa', False)
240 contents = opts.pop('lfc', False)
215 contents = opts.pop('lfc', False)
241
216
242 result = orig(ui, repo, *pats, **opts)
217 result = orig(ui, repo, *pats, **opts)
243 if large:
218 if large:
244 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
219 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
245 return result
220 return result
246
221
247 # Override needs to refresh standins so that update's normal merge
222 # Override needs to refresh standins so that update's normal merge
248 # will go through properly. Then the other update hook (overriding repo.update)
223 # will go through properly. Then the other update hook (overriding repo.update)
249 # will get the new files. Filemerge is also overriden so that the merge
224 # will get the new files. Filemerge is also overriden so that the merge
250 # will merge standins correctly.
225 # will merge standins correctly.
251 def override_update(orig, ui, repo, *pats, **opts):
226 def override_update(orig, ui, repo, *pats, **opts):
252 lfdirstate = lfutil.openlfdirstate(ui, repo)
227 lfdirstate = lfutil.openlfdirstate(ui, repo)
253 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
228 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
254 False, False)
229 False, False)
255 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
230 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
256
231
257 # Need to lock between the standins getting updated and their lfiles
232 # Need to lock between the standins getting updated and their lfiles
258 # getting updated
233 # getting updated
259 wlock = repo.wlock()
234 wlock = repo.wlock()
260 try:
235 try:
261 if opts['check']:
236 if opts['check']:
262 mod = len(modified) > 0
237 mod = len(modified) > 0
263 for lfile in unsure:
238 for lfile in unsure:
264 standin = lfutil.standin(lfile)
239 standin = lfutil.standin(lfile)
265 if repo['.'][standin].data().strip() != \
240 if repo['.'][standin].data().strip() != \
266 lfutil.hashfile(repo.wjoin(lfile)):
241 lfutil.hashfile(repo.wjoin(lfile)):
267 mod = True
242 mod = True
268 else:
243 else:
269 lfdirstate.normal(lfile)
244 lfdirstate.normal(lfile)
270 lfdirstate.write()
245 lfdirstate.write()
271 if mod:
246 if mod:
272 raise util.Abort(_('uncommitted local changes'))
247 raise util.Abort(_('uncommitted local changes'))
273 # XXX handle removed differently
248 # XXX handle removed differently
274 if not opts['clean']:
249 if not opts['clean']:
275 for lfile in unsure + modified + added:
250 for lfile in unsure + modified + added:
276 lfutil.updatestandin(repo, lfutil.standin(lfile))
251 lfutil.updatestandin(repo, lfutil.standin(lfile))
277 finally:
252 finally:
278 wlock.release()
253 wlock.release()
279 return orig(ui, repo, *pats, **opts)
254 return orig(ui, repo, *pats, **opts)
280
255
281 # Override filemerge to prompt the user about how they wish to merge lfiles.
256 # Override filemerge to prompt the user about how they wish to merge lfiles.
282 # This will handle identical edits, and copy/rename + edit without prompting
257 # This will handle identical edits, and copy/rename + edit without prompting
283 # the user.
258 # the user.
284 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
259 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
285 # Use better variable names here. Because this is a wrapper we cannot
260 # Use better variable names here. Because this is a wrapper we cannot
286 # change the variable names in the function declaration.
261 # change the variable names in the function declaration.
287 fcdest, fcother, fcancestor = fcd, fco, fca
262 fcdest, fcother, fcancestor = fcd, fco, fca
288 if not lfutil.isstandin(orig):
263 if not lfutil.isstandin(orig):
289 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
264 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
290 else:
265 else:
291 if not fcother.cmp(fcdest): # files identical?
266 if not fcother.cmp(fcdest): # files identical?
292 return None
267 return None
293
268
294 # backwards, use working dir parent as ancestor
269 # backwards, use working dir parent as ancestor
295 if fcancestor == fcother:
270 if fcancestor == fcother:
296 fcancestor = fcdest.parents()[0]
271 fcancestor = fcdest.parents()[0]
297
272
298 if orig != fcother.path():
273 if orig != fcother.path():
299 repo.ui.status(_('merging %s and %s to %s\n')
274 repo.ui.status(_('merging %s and %s to %s\n')
300 % (lfutil.splitstandin(orig),
275 % (lfutil.splitstandin(orig),
301 lfutil.splitstandin(fcother.path()),
276 lfutil.splitstandin(fcother.path()),
302 lfutil.splitstandin(fcdest.path())))
277 lfutil.splitstandin(fcdest.path())))
303 else:
278 else:
304 repo.ui.status(_('merging %s\n')
279 repo.ui.status(_('merging %s\n')
305 % lfutil.splitstandin(fcdest.path()))
280 % lfutil.splitstandin(fcdest.path()))
306
281
307 if fcancestor.path() != fcother.path() and fcother.data() == \
282 if fcancestor.path() != fcother.path() and fcother.data() == \
308 fcancestor.data():
283 fcancestor.data():
309 return 0
284 return 0
310 if fcancestor.path() != fcdest.path() and fcdest.data() == \
285 if fcancestor.path() != fcdest.path() and fcdest.data() == \
311 fcancestor.data():
286 fcancestor.data():
312 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
287 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
313 return 0
288 return 0
314
289
315 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
290 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
316 'keep (l)ocal or take (o)ther?') %
291 'keep (l)ocal or take (o)ther?') %
317 lfutil.splitstandin(orig),
292 lfutil.splitstandin(orig),
318 (_('&Local'), _('&Other')), 0) == 0:
293 (_('&Local'), _('&Other')), 0) == 0:
319 return 0
294 return 0
320 else:
295 else:
321 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
296 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
322 return 0
297 return 0
323
298
324 # Copy first changes the matchers to match standins instead of lfiles.
299 # Copy first changes the matchers to match standins instead of lfiles.
325 # Then it overrides util.copyfile in that function it checks if the destination
300 # Then it overrides util.copyfile in that function it checks if the destination
326 # lfile already exists. It also keeps a list of copied files so that the lfiles
301 # lfile already exists. It also keeps a list of copied files so that the lfiles
327 # can be copied and the dirstate updated.
302 # can be copied and the dirstate updated.
328 def override_copy(orig, ui, repo, pats, opts, rename=False):
303 def override_copy(orig, ui, repo, pats, opts, rename=False):
329 # doesn't remove lfile on rename
304 # doesn't remove lfile on rename
330 if len(pats) < 2:
305 if len(pats) < 2:
331 # this isn't legal, let the original function deal with it
306 # this isn't legal, let the original function deal with it
332 return orig(ui, repo, pats, opts, rename)
307 return orig(ui, repo, pats, opts, rename)
333
308
334 def makestandin(relpath):
309 def makestandin(relpath):
335 try:
336 # Mercurial >= 1.9
337 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
310 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
338 except ImportError:
339 # Mercurial <= 1.8
340 path = util.canonpath(repo.root, repo.getcwd(), relpath)
341 return os.path.join(os.path.relpath('.', repo.getcwd()),
311 return os.path.join(os.path.relpath('.', repo.getcwd()),
342 lfutil.standin(path))
312 lfutil.standin(path))
343
313
344 try:
345 # Mercurial >= 1.9
346 fullpats = scmutil.expandpats(pats)
314 fullpats = scmutil.expandpats(pats)
347 except ImportError:
348 # Mercurial <= 1.8
349 fullpats = cmdutil.expandpats(pats)
350 dest = fullpats[-1]
315 dest = fullpats[-1]
351
316
352 if os.path.isdir(dest):
317 if os.path.isdir(dest):
353 if not os.path.isdir(makestandin(dest)):
318 if not os.path.isdir(makestandin(dest)):
354 os.makedirs(makestandin(dest))
319 os.makedirs(makestandin(dest))
355 # This could copy both lfiles and normal files in one command, but we don't
320 # This could copy both lfiles and normal files in one command, but we don't
356 # want to do that first replace their matcher to only match normal files
321 # want to do that first replace their matcher to only match normal files
357 # and run it then replace it to just match lfiles and run it again
322 # and run it then replace it to just match lfiles and run it again
358 nonormalfiles = False
323 nonormalfiles = False
359 nolfiles = False
324 nolfiles = False
360 try:
325 try:
361 installnormalfilesmatchfn(repo[None].manifest())
326 installnormalfilesmatchfn(repo[None].manifest())
362 result = orig(ui, repo, pats, opts, rename)
327 result = orig(ui, repo, pats, opts, rename)
363 except util.Abort, e:
328 except util.Abort, e:
364 if str(e) != 'no files to copy':
329 if str(e) != 'no files to copy':
365 raise e
330 raise e
366 else:
331 else:
367 nonormalfiles = True
332 nonormalfiles = True
368 result = 0
333 result = 0
369 finally:
334 finally:
370 restorematchfn()
335 restorematchfn()
371
336
372 # The first rename can cause our current working directory to be removed.
337 # The first rename can cause our current working directory to be removed.
373 # In that case there is nothing left to copy/rename so just quit.
338 # In that case there is nothing left to copy/rename so just quit.
374 try:
339 try:
375 repo.getcwd()
340 repo.getcwd()
376 except OSError:
341 except OSError:
377 return result
342 return result
378
343
379 try:
344 try:
380 # When we call orig below it creates the standins but we don't add them
345 # When we call orig below it creates the standins but we don't add them
381 # to the dir state until later so lock during that time.
346 # to the dir state until later so lock during that time.
382 wlock = repo.wlock()
347 wlock = repo.wlock()
383
348
384 manifest = repo[None].manifest()
349 manifest = repo[None].manifest()
385 oldmatch = None # for the closure
350 oldmatch = None # for the closure
386 def override_match(repo, pats=[], opts={}, globbed=False,
351 def override_match(repo, pats=[], opts={}, globbed=False,
387 default='relpath'):
352 default='relpath'):
388 newpats = []
353 newpats = []
389 # The patterns were previously mangled to add the standin
354 # The patterns were previously mangled to add the standin
390 # directory; we need to remove that now
355 # directory; we need to remove that now
391 for pat in pats:
356 for pat in pats:
392 if match_.patkind(pat) is None and lfutil.shortname in pat:
357 if match_.patkind(pat) is None and lfutil.shortname in pat:
393 newpats.append(pat.replace(lfutil.shortname, ''))
358 newpats.append(pat.replace(lfutil.shortname, ''))
394 else:
359 else:
395 newpats.append(pat)
360 newpats.append(pat)
396 match = oldmatch(repo, newpats, opts, globbed, default)
361 match = oldmatch(repo, newpats, opts, globbed, default)
397 m = copy.copy(match)
362 m = copy.copy(match)
398 lfile = lambda f: lfutil.standin(f) in manifest
363 lfile = lambda f: lfutil.standin(f) in manifest
399 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
364 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
400 m._fmap = set(m._files)
365 m._fmap = set(m._files)
401 orig_matchfn = m.matchfn
366 orig_matchfn = m.matchfn
402 m.matchfn = lambda f: lfutil.isstandin(f) and \
367 m.matchfn = lambda f: lfutil.isstandin(f) and \
403 lfile(lfutil.splitstandin(f)) and \
368 lfile(lfutil.splitstandin(f)) and \
404 orig_matchfn(lfutil.splitstandin(f)) or None
369 orig_matchfn(lfutil.splitstandin(f)) or None
405 return m
370 return m
406 oldmatch = installmatchfn(override_match)
371 oldmatch = installmatchfn(override_match)
407 listpats = []
372 listpats = []
408 for pat in pats:
373 for pat in pats:
409 if match_.patkind(pat) is not None:
374 if match_.patkind(pat) is not None:
410 listpats.append(pat)
375 listpats.append(pat)
411 else:
376 else:
412 listpats.append(makestandin(pat))
377 listpats.append(makestandin(pat))
413
378
414 try:
379 try:
415 origcopyfile = util.copyfile
380 origcopyfile = util.copyfile
416 copiedfiles = []
381 copiedfiles = []
417 def override_copyfile(src, dest):
382 def override_copyfile(src, dest):
418 if lfutil.shortname in src and lfutil.shortname in dest:
383 if lfutil.shortname in src and lfutil.shortname in dest:
419 destlfile = dest.replace(lfutil.shortname, '')
384 destlfile = dest.replace(lfutil.shortname, '')
420 if not opts['force'] and os.path.exists(destlfile):
385 if not opts['force'] and os.path.exists(destlfile):
421 raise IOError('',
386 raise IOError('',
422 _('destination largefile already exists'))
387 _('destination largefile already exists'))
423 copiedfiles.append((src, dest))
388 copiedfiles.append((src, dest))
424 origcopyfile(src, dest)
389 origcopyfile(src, dest)
425
390
426 util.copyfile = override_copyfile
391 util.copyfile = override_copyfile
427 result += orig(ui, repo, listpats, opts, rename)
392 result += orig(ui, repo, listpats, opts, rename)
428 finally:
393 finally:
429 util.copyfile = origcopyfile
394 util.copyfile = origcopyfile
430
395
431 lfdirstate = lfutil.openlfdirstate(ui, repo)
396 lfdirstate = lfutil.openlfdirstate(ui, repo)
432 for (src, dest) in copiedfiles:
397 for (src, dest) in copiedfiles:
433 if lfutil.shortname in src and lfutil.shortname in dest:
398 if lfutil.shortname in src and lfutil.shortname in dest:
434 srclfile = src.replace(lfutil.shortname, '')
399 srclfile = src.replace(lfutil.shortname, '')
435 destlfile = dest.replace(lfutil.shortname, '')
400 destlfile = dest.replace(lfutil.shortname, '')
436 destlfiledir = os.path.dirname(destlfile) or '.'
401 destlfiledir = os.path.dirname(destlfile) or '.'
437 if not os.path.isdir(destlfiledir):
402 if not os.path.isdir(destlfiledir):
438 os.makedirs(destlfiledir)
403 os.makedirs(destlfiledir)
439 if rename:
404 if rename:
440 os.rename(srclfile, destlfile)
405 os.rename(srclfile, destlfile)
441 lfdirstate.remove(os.path.relpath(srclfile,
406 lfdirstate.remove(os.path.relpath(srclfile,
442 repo.root))
407 repo.root))
443 else:
408 else:
444 util.copyfile(srclfile, destlfile)
409 util.copyfile(srclfile, destlfile)
445 lfdirstate.add(os.path.relpath(destlfile,
410 lfdirstate.add(os.path.relpath(destlfile,
446 repo.root))
411 repo.root))
447 lfdirstate.write()
412 lfdirstate.write()
448 except util.Abort, e:
413 except util.Abort, e:
449 if str(e) != 'no files to copy':
414 if str(e) != 'no files to copy':
450 raise e
415 raise e
451 else:
416 else:
452 nolfiles = True
417 nolfiles = True
453 finally:
418 finally:
454 restorematchfn()
419 restorematchfn()
455 wlock.release()
420 wlock.release()
456
421
457 if nolfiles and nonormalfiles:
422 if nolfiles and nonormalfiles:
458 raise util.Abort(_('no files to copy'))
423 raise util.Abort(_('no files to copy'))
459
424
460 return result
425 return result
461
426
462 # When the user calls revert, we have to be careful to not revert any changes
427 # When the user calls revert, we have to be careful to not revert any changes
463 # to other lfiles accidentally. This means we have to keep track of the lfiles
428 # to other lfiles accidentally. This means we have to keep track of the lfiles
464 # that are being reverted so we only pull down the necessary lfiles.
429 # that are being reverted so we only pull down the necessary lfiles.
465 #
430 #
466 # Standins are only updated (to match the hash of lfiles) before commits.
431 # Standins are only updated (to match the hash of lfiles) before commits.
467 # Update the standins then run the original revert (changing the matcher to hit
432 # Update the standins then run the original revert (changing the matcher to hit
468 # standins instead of lfiles). Based on the resulting standins update the
433 # standins instead of lfiles). Based on the resulting standins update the
469 # lfiles. Then return the standins to their proper state
434 # lfiles. Then return the standins to their proper state
470 def override_revert(orig, ui, repo, *pats, **opts):
435 def override_revert(orig, ui, repo, *pats, **opts):
471 # Because we put the standins in a bad state (by updating them) and then
436 # Because we put the standins in a bad state (by updating them) and then
472 # return them to a correct state we need to lock to prevent others from
437 # return them to a correct state we need to lock to prevent others from
473 # changing them in their incorrect state.
438 # changing them in their incorrect state.
474 wlock = repo.wlock()
439 wlock = repo.wlock()
475 try:
440 try:
476 lfdirstate = lfutil.openlfdirstate(ui, repo)
441 lfdirstate = lfutil.openlfdirstate(ui, repo)
477 (modified, added, removed, missing, unknown, ignored, clean) = \
442 (modified, added, removed, missing, unknown, ignored, clean) = \
478 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
443 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
479 for lfile in modified:
444 for lfile in modified:
480 lfutil.updatestandin(repo, lfutil.standin(lfile))
445 lfutil.updatestandin(repo, lfutil.standin(lfile))
481
446
482 try:
447 try:
483 ctx = repo[opts.get('rev')]
448 ctx = repo[opts.get('rev')]
484 oldmatch = None # for the closure
449 oldmatch = None # for the closure
485 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
450 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
486 default='relpath'):
451 default='relpath'):
487 if util.safehasattr(ctxorrepo, 'match'):
452 if util.safehasattr(ctxorrepo, 'match'):
488 ctx0 = ctxorrepo
453 ctx0 = ctxorrepo
489 else:
454 else:
490 ctx0 = ctxorrepo[None]
455 ctx0 = ctxorrepo[None]
491 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
456 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
492 m = copy.copy(match)
457 m = copy.copy(match)
493 def tostandin(f):
458 def tostandin(f):
494 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
459 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
495 return lfutil.standin(f)
460 return lfutil.standin(f)
496 elif lfutil.standin(f) in repo[None]:
461 elif lfutil.standin(f) in repo[None]:
497 return None
462 return None
498 return f
463 return f
499 m._files = [tostandin(f) for f in m._files]
464 m._files = [tostandin(f) for f in m._files]
500 m._files = [f for f in m._files if f is not None]
465 m._files = [f for f in m._files if f is not None]
501 m._fmap = set(m._files)
466 m._fmap = set(m._files)
502 orig_matchfn = m.matchfn
467 orig_matchfn = m.matchfn
503 def matchfn(f):
468 def matchfn(f):
504 if lfutil.isstandin(f):
469 if lfutil.isstandin(f):
505 # We need to keep track of what lfiles are being
470 # We need to keep track of what lfiles are being
506 # matched so we know which ones to update later
471 # matched so we know which ones to update later
507 # (otherwise we revert changes to other lfiles
472 # (otherwise we revert changes to other lfiles
508 # accidentally). This is repo specific, so duckpunch
473 # accidentally). This is repo specific, so duckpunch
509 # the repo object to keep the list of lfiles for us
474 # the repo object to keep the list of lfiles for us
510 # later.
475 # later.
511 if orig_matchfn(lfutil.splitstandin(f)) and \
476 if orig_matchfn(lfutil.splitstandin(f)) and \
512 (f in repo[None] or f in ctx):
477 (f in repo[None] or f in ctx):
513 lfileslist = getattr(repo, '_lfilestoupdate', [])
478 lfileslist = getattr(repo, '_lfilestoupdate', [])
514 lfileslist.append(lfutil.splitstandin(f))
479 lfileslist.append(lfutil.splitstandin(f))
515 repo._lfilestoupdate = lfileslist
480 repo._lfilestoupdate = lfileslist
516 return True
481 return True
517 else:
482 else:
518 return False
483 return False
519 return orig_matchfn(f)
484 return orig_matchfn(f)
520 m.matchfn = matchfn
485 m.matchfn = matchfn
521 return m
486 return m
522 oldmatch = installmatchfn(override_match)
487 oldmatch = installmatchfn(override_match)
523 try:
524 # Mercurial >= 1.9
525 scmutil.match
488 scmutil.match
526 matches = override_match(repo[None], pats, opts)
489 matches = override_match(repo[None], pats, opts)
527 except ImportError:
528 # Mercurial <= 1.8
529 matches = override_match(repo, pats, opts)
530 orig(ui, repo, *pats, **opts)
490 orig(ui, repo, *pats, **opts)
531 finally:
491 finally:
532 restorematchfn()
492 restorematchfn()
533 lfileslist = getattr(repo, '_lfilestoupdate', [])
493 lfileslist = getattr(repo, '_lfilestoupdate', [])
534 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
494 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
535 printmessage=False)
495 printmessage=False)
536 # Empty out the lfiles list so we start fresh next time
496 # Empty out the lfiles list so we start fresh next time
537 repo._lfilestoupdate = []
497 repo._lfilestoupdate = []
538 for lfile in modified:
498 for lfile in modified:
539 if lfile in lfileslist:
499 if lfile in lfileslist:
540 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
500 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
541 in repo['.']:
501 in repo['.']:
542 lfutil.writestandin(repo, lfutil.standin(lfile),
502 lfutil.writestandin(repo, lfutil.standin(lfile),
543 repo['.'][lfile].data().strip(),
503 repo['.'][lfile].data().strip(),
544 'x' in repo['.'][lfile].flags())
504 'x' in repo['.'][lfile].flags())
545 lfdirstate = lfutil.openlfdirstate(ui, repo)
505 lfdirstate = lfutil.openlfdirstate(ui, repo)
546 for lfile in added:
506 for lfile in added:
547 standin = lfutil.standin(lfile)
507 standin = lfutil.standin(lfile)
548 if standin not in ctx and (standin in matches or opts.get('all')):
508 if standin not in ctx and (standin in matches or opts.get('all')):
549 if lfile in lfdirstate:
509 if lfile in lfdirstate:
550 try:
551 # Mercurial >= 1.9
552 lfdirstate.drop(lfile)
510 lfdirstate.drop(lfile)
553 except AttributeError:
554 # Mercurial <= 1.8
555 lfdirstate.forget(lfile)
556 util.unlinkpath(repo.wjoin(standin))
511 util.unlinkpath(repo.wjoin(standin))
557 lfdirstate.write()
512 lfdirstate.write()
558 finally:
513 finally:
559 wlock.release()
514 wlock.release()
560
515
561 def hg_update(orig, repo, node):
516 def hg_update(orig, repo, node):
562 result = orig(repo, node)
517 result = orig(repo, node)
563 # XXX check if it worked first
518 # XXX check if it worked first
564 lfcommands.updatelfiles(repo.ui, repo)
519 lfcommands.updatelfiles(repo.ui, repo)
565 return result
520 return result
566
521
567 def hg_clean(orig, repo, node, show_stats=True):
522 def hg_clean(orig, repo, node, show_stats=True):
568 result = orig(repo, node, show_stats)
523 result = orig(repo, node, show_stats)
569 lfcommands.updatelfiles(repo.ui, repo)
524 lfcommands.updatelfiles(repo.ui, repo)
570 return result
525 return result
571
526
572 def hg_merge(orig, repo, node, force=None, remind=True):
527 def hg_merge(orig, repo, node, force=None, remind=True):
573 result = orig(repo, node, force, remind)
528 result = orig(repo, node, force, remind)
574 lfcommands.updatelfiles(repo.ui, repo)
529 lfcommands.updatelfiles(repo.ui, repo)
575 return result
530 return result
576
531
577 # When we rebase a repository with remotely changed lfiles, we need
532 # When we rebase a repository with remotely changed lfiles, we need
578 # to take some extra care so that the lfiles are correctly updated
533 # to take some extra care so that the lfiles are correctly updated
579 # in the working copy
534 # in the working copy
580 def override_pull(orig, ui, repo, source=None, **opts):
535 def override_pull(orig, ui, repo, source=None, **opts):
581 if opts.get('rebase', False):
536 if opts.get('rebase', False):
582 repo._isrebasing = True
537 repo._isrebasing = True
583 try:
538 try:
584 if opts.get('update'):
539 if opts.get('update'):
585 del opts['update']
540 del opts['update']
586 ui.debug('--update and --rebase are not compatible, ignoring '
541 ui.debug('--update and --rebase are not compatible, ignoring '
587 'the update flag\n')
542 'the update flag\n')
588 del opts['rebase']
543 del opts['rebase']
589 try:
590 # Mercurial >= 1.9
591 cmdutil.bailifchanged(repo)
544 cmdutil.bailifchanged(repo)
592 except AttributeError:
593 # Mercurial <= 1.8
594 cmdutil.bail_if_changed(repo)
595 revsprepull = len(repo)
545 revsprepull = len(repo)
596 origpostincoming = commands.postincoming
546 origpostincoming = commands.postincoming
597 def _dummy(*args, **kwargs):
547 def _dummy(*args, **kwargs):
598 pass
548 pass
599 commands.postincoming = _dummy
549 commands.postincoming = _dummy
600 repo.lfpullsource = source
550 repo.lfpullsource = source
601 if not source:
551 if not source:
602 source = 'default'
552 source = 'default'
603 try:
553 try:
604 result = commands.pull(ui, repo, source, **opts)
554 result = commands.pull(ui, repo, source, **opts)
605 finally:
555 finally:
606 commands.postincoming = origpostincoming
556 commands.postincoming = origpostincoming
607 revspostpull = len(repo)
557 revspostpull = len(repo)
608 if revspostpull > revsprepull:
558 if revspostpull > revsprepull:
609 result = result or rebase.rebase(ui, repo)
559 result = result or rebase.rebase(ui, repo)
610 finally:
560 finally:
611 repo._isrebasing = False
561 repo._isrebasing = False
612 else:
562 else:
613 repo.lfpullsource = source
563 repo.lfpullsource = source
614 if not source:
564 if not source:
615 source = 'default'
565 source = 'default'
616 result = orig(ui, repo, source, **opts)
566 result = orig(ui, repo, source, **opts)
617 return result
567 return result
618
568
619 def override_rebase(orig, ui, repo, **opts):
569 def override_rebase(orig, ui, repo, **opts):
620 repo._isrebasing = True
570 repo._isrebasing = True
621 try:
571 try:
622 orig(ui, repo, **opts)
572 orig(ui, repo, **opts)
623 finally:
573 finally:
624 repo._isrebasing = False
574 repo._isrebasing = False
625
575
626 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
576 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
627 prefix=None, mtime=None, subrepos=None):
577 prefix=None, mtime=None, subrepos=None):
628 # No need to lock because we are only reading history and lfile caches
578 # No need to lock because we are only reading history and lfile caches
629 # neither of which are modified
579 # neither of which are modified
630
580
631 lfcommands.cachelfiles(repo.ui, repo, node)
581 lfcommands.cachelfiles(repo.ui, repo, node)
632
582
633 if kind not in archival.archivers:
583 if kind not in archival.archivers:
634 raise util.Abort(_("unknown archive type '%s'") % kind)
584 raise util.Abort(_("unknown archive type '%s'") % kind)
635
585
636 ctx = repo[node]
586 ctx = repo[node]
637
587
638 # In Mercurial <= 1.5 the prefix is passed to the archiver so try that
639 # if that doesn't work we are probably in Mercurial >= 1.6 where the
640 # prefix is not handled by the archiver
641 try:
642 archiver = archival.archivers[kind](dest, prefix, mtime or \
643 ctx.date()[0])
644
645 def write(name, mode, islink, getdata):
646 if matchfn and not matchfn(name):
647 return
648 data = getdata()
649 if decode:
650 data = repo.wwritedata(name, data)
651 archiver.addfile(name, mode, islink, data)
652 except TypeError:
653 if kind == 'files':
588 if kind == 'files':
654 if prefix:
589 if prefix:
655 raise util.Abort(
590 raise util.Abort(
656 _('cannot give prefix when archiving to files'))
591 _('cannot give prefix when archiving to files'))
657 else:
592 else:
658 prefix = archival.tidyprefix(dest, kind, prefix)
593 prefix = archival.tidyprefix(dest, kind, prefix)
659
594
660 def write(name, mode, islink, getdata):
595 def write(name, mode, islink, getdata):
661 if matchfn and not matchfn(name):
596 if matchfn and not matchfn(name):
662 return
597 return
663 data = getdata()
598 data = getdata()
664 if decode:
599 if decode:
665 data = repo.wwritedata(name, data)
600 data = repo.wwritedata(name, data)
666 archiver.addfile(prefix + name, mode, islink, data)
601 archiver.addfile(prefix + name, mode, islink, data)
667
602
668 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
603 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
669
604
670 if repo.ui.configbool("ui", "archivemeta", True):
605 if repo.ui.configbool("ui", "archivemeta", True):
671 def metadata():
606 def metadata():
672 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
607 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
673 hex(repo.changelog.node(0)), hex(node), ctx.branch())
608 hex(repo.changelog.node(0)), hex(node), ctx.branch())
674
609
675 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
610 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
676 if repo.tagtype(t) == 'global')
611 if repo.tagtype(t) == 'global')
677 if not tags:
612 if not tags:
678 repo.ui.pushbuffer()
613 repo.ui.pushbuffer()
679 opts = {'template': '{latesttag}\n{latesttagdistance}',
614 opts = {'template': '{latesttag}\n{latesttagdistance}',
680 'style': '', 'patch': None, 'git': None}
615 'style': '', 'patch': None, 'git': None}
681 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
616 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
682 ltags, dist = repo.ui.popbuffer().split('\n')
617 ltags, dist = repo.ui.popbuffer().split('\n')
683 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
618 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
684 tags += 'latesttagdistance: %s\n' % dist
619 tags += 'latesttagdistance: %s\n' % dist
685
620
686 return base + tags
621 return base + tags
687
622
688 write('.hg_archival.txt', 0644, False, metadata)
623 write('.hg_archival.txt', 0644, False, metadata)
689
624
690 for f in ctx:
625 for f in ctx:
691 ff = ctx.flags(f)
626 ff = ctx.flags(f)
692 getdata = ctx[f].data
627 getdata = ctx[f].data
693 if lfutil.isstandin(f):
628 if lfutil.isstandin(f):
694 path = lfutil.findfile(repo, getdata().strip())
629 path = lfutil.findfile(repo, getdata().strip())
695 f = lfutil.splitstandin(f)
630 f = lfutil.splitstandin(f)
696
631
697 def getdatafn():
632 def getdatafn():
698 try:
633 try:
699 fd = open(path, 'rb')
634 fd = open(path, 'rb')
700 return fd.read()
635 return fd.read()
701 finally:
636 finally:
702 fd.close()
637 fd.close()
703
638
704 getdata = getdatafn
639 getdata = getdatafn
705 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
640 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
706
641
707 if subrepos:
642 if subrepos:
708 for subpath in ctx.substate:
643 for subpath in ctx.substate:
709 sub = ctx.sub(subpath)
644 sub = ctx.sub(subpath)
710 try:
645 try:
711 sub.archive(repo.ui, archiver, prefix)
646 sub.archive(repo.ui, archiver, prefix)
712 except TypeError:
647 except TypeError:
713 sub.archive(archiver, prefix)
648 sub.archive(archiver, prefix)
714
649
715 archiver.done()
650 archiver.done()
716
651
717 # If a lfile is modified the change is not reflected in its standin until a
652 # If a lfile is modified the change is not reflected in its standin until a
718 # commit. cmdutil.bailifchanged raises an exception if the repo has
653 # commit. cmdutil.bailifchanged raises an exception if the repo has
719 # uncommitted changes. Wrap it to also check if lfiles were changed. This is
654 # uncommitted changes. Wrap it to also check if lfiles were changed. This is
720 # used by bisect and backout.
655 # used by bisect and backout.
721 def override_bailifchanged(orig, repo):
656 def override_bailifchanged(orig, repo):
722 orig(repo)
657 orig(repo)
723 repo.lfstatus = True
658 repo.lfstatus = True
724 modified, added, removed, deleted = repo.status()[:4]
659 modified, added, removed, deleted = repo.status()[:4]
725 repo.lfstatus = False
660 repo.lfstatus = False
726 if modified or added or removed or deleted:
661 if modified or added or removed or deleted:
727 raise util.Abort(_('outstanding uncommitted changes'))
662 raise util.Abort(_('outstanding uncommitted changes'))
728
663
729 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
664 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
730 def override_fetch(orig, ui, repo, *pats, **opts):
665 def override_fetch(orig, ui, repo, *pats, **opts):
731 repo.lfstatus = True
666 repo.lfstatus = True
732 modified, added, removed, deleted = repo.status()[:4]
667 modified, added, removed, deleted = repo.status()[:4]
733 repo.lfstatus = False
668 repo.lfstatus = False
734 if modified or added or removed or deleted:
669 if modified or added or removed or deleted:
735 raise util.Abort(_('outstanding uncommitted changes'))
670 raise util.Abort(_('outstanding uncommitted changes'))
736 return orig(ui, repo, *pats, **opts)
671 return orig(ui, repo, *pats, **opts)
737
672
738 def override_forget(orig, ui, repo, *pats, **opts):
673 def override_forget(orig, ui, repo, *pats, **opts):
739 installnormalfilesmatchfn(repo[None].manifest())
674 installnormalfilesmatchfn(repo[None].manifest())
740 orig(ui, repo, *pats, **opts)
675 orig(ui, repo, *pats, **opts)
741 restorematchfn()
676 restorematchfn()
742 try:
743 # Mercurial >= 1.9
744 m = scmutil.match(repo[None], pats, opts)
677 m = scmutil.match(repo[None], pats, opts)
745 except ImportError:
746 # Mercurial <= 1.8
747 m = cmdutil.match(repo, pats, opts)
748
678
749 try:
679 try:
750 repo.lfstatus = True
680 repo.lfstatus = True
751 s = repo.status(match=m, clean=True)
681 s = repo.status(match=m, clean=True)
752 finally:
682 finally:
753 repo.lfstatus = False
683 repo.lfstatus = False
754 forget = sorted(s[0] + s[1] + s[3] + s[6])
684 forget = sorted(s[0] + s[1] + s[3] + s[6])
755 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
685 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
756
686
757 for f in forget:
687 for f in forget:
758 if lfutil.standin(f) not in repo.dirstate and not \
688 if lfutil.standin(f) not in repo.dirstate and not \
759 os.path.isdir(m.rel(lfutil.standin(f))):
689 os.path.isdir(m.rel(lfutil.standin(f))):
760 ui.warn(_('not removing %s: file is already untracked\n')
690 ui.warn(_('not removing %s: file is already untracked\n')
761 % m.rel(f))
691 % m.rel(f))
762
692
763 for f in forget:
693 for f in forget:
764 if ui.verbose or not m.exact(f):
694 if ui.verbose or not m.exact(f):
765 ui.status(_('removing %s\n') % m.rel(f))
695 ui.status(_('removing %s\n') % m.rel(f))
766
696
767 # Need to lock because standin files are deleted then removed from the
697 # Need to lock because standin files are deleted then removed from the
768 # repository and we could race inbetween.
698 # repository and we could race inbetween.
769 wlock = repo.wlock()
699 wlock = repo.wlock()
770 try:
700 try:
771 lfdirstate = lfutil.openlfdirstate(ui, repo)
701 lfdirstate = lfutil.openlfdirstate(ui, repo)
772 for f in forget:
702 for f in forget:
773 if lfdirstate[f] == 'a':
703 if lfdirstate[f] == 'a':
774 lfdirstate.drop(f)
704 lfdirstate.drop(f)
775 else:
705 else:
776 lfdirstate.remove(f)
706 lfdirstate.remove(f)
777 lfdirstate.write()
707 lfdirstate.write()
778 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
708 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
779 unlink=True)
709 unlink=True)
780 finally:
710 finally:
781 wlock.release()
711 wlock.release()
782
712
783 def getoutgoinglfiles(ui, repo, dest=None, **opts):
713 def getoutgoinglfiles(ui, repo, dest=None, **opts):
784 dest = ui.expandpath(dest or 'default-push', dest or 'default')
714 dest = ui.expandpath(dest or 'default-push', dest or 'default')
785 dest, branches = hg.parseurl(dest, opts.get('branch'))
715 dest, branches = hg.parseurl(dest, opts.get('branch'))
786 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
716 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
787 if revs:
717 if revs:
788 revs = [repo.lookup(rev) for rev in revs]
718 revs = [repo.lookup(rev) for rev in revs]
789
719
790 # Mercurial <= 1.5 had remoteui in cmdutil, then it moved to hg
791 try:
792 remoteui = cmdutil.remoteui
793 except AttributeError:
794 remoteui = hg.remoteui
720 remoteui = hg.remoteui
795
721
796 try:
722 try:
797 remote = hg.repository(remoteui(repo, opts), dest)
723 remote = hg.repository(remoteui(repo, opts), dest)
798 except error.RepoError:
724 except error.RepoError:
799 return None
725 return None
800 o = lfutil.findoutgoing(repo, remote, False)
726 o = lfutil.findoutgoing(repo, remote, False)
801 if not o:
727 if not o:
802 return None
728 return None
803 o = repo.changelog.nodesbetween(o, revs)[0]
729 o = repo.changelog.nodesbetween(o, revs)[0]
804 if opts.get('newest_first'):
730 if opts.get('newest_first'):
805 o.reverse()
731 o.reverse()
806
732
807 toupload = set()
733 toupload = set()
808 for n in o:
734 for n in o:
809 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
735 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
810 ctx = repo[n]
736 ctx = repo[n]
811 files = set(ctx.files())
737 files = set(ctx.files())
812 if len(parents) == 2:
738 if len(parents) == 2:
813 mc = ctx.manifest()
739 mc = ctx.manifest()
814 mp1 = ctx.parents()[0].manifest()
740 mp1 = ctx.parents()[0].manifest()
815 mp2 = ctx.parents()[1].manifest()
741 mp2 = ctx.parents()[1].manifest()
816 for f in mp1:
742 for f in mp1:
817 if f not in mc:
743 if f not in mc:
818 files.add(f)
744 files.add(f)
819 for f in mp2:
745 for f in mp2:
820 if f not in mc:
746 if f not in mc:
821 files.add(f)
747 files.add(f)
822 for f in mc:
748 for f in mc:
823 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
749 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
824 files.add(f)
750 files.add(f)
825 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
751 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
826 and f in ctx]))
752 and f in ctx]))
827 return toupload
753 return toupload
828
754
829 def override_outgoing(orig, ui, repo, dest=None, **opts):
755 def override_outgoing(orig, ui, repo, dest=None, **opts):
830 orig(ui, repo, dest, **opts)
756 orig(ui, repo, dest, **opts)
831
757
832 if opts.pop('large', None):
758 if opts.pop('large', None):
833 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
759 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
834 if toupload is None:
760 if toupload is None:
835 ui.status(_('largefiles: No remote repo\n'))
761 ui.status(_('largefiles: No remote repo\n'))
836 else:
762 else:
837 ui.status(_('largefiles to upload:\n'))
763 ui.status(_('largefiles to upload:\n'))
838 for file in toupload:
764 for file in toupload:
839 ui.status(lfutil.splitstandin(file) + '\n')
765 ui.status(lfutil.splitstandin(file) + '\n')
840 ui.status('\n')
766 ui.status('\n')
841
767
842 def override_summary(orig, ui, repo, *pats, **opts):
768 def override_summary(orig, ui, repo, *pats, **opts):
843 orig(ui, repo, *pats, **opts)
769 orig(ui, repo, *pats, **opts)
844
770
845 if opts.pop('large', None):
771 if opts.pop('large', None):
846 toupload = getoutgoinglfiles(ui, repo, None, **opts)
772 toupload = getoutgoinglfiles(ui, repo, None, **opts)
847 if toupload is None:
773 if toupload is None:
848 ui.status(_('largefiles: No remote repo\n'))
774 ui.status(_('largefiles: No remote repo\n'))
849 else:
775 else:
850 ui.status(_('largefiles: %d to upload\n') % len(toupload))
776 ui.status(_('largefiles: %d to upload\n') % len(toupload))
851
777
852 def override_addremove(orig, ui, repo, *pats, **opts):
778 def override_addremove(orig, ui, repo, *pats, **opts):
853 # Check if the parent or child has lfiles if they do don't allow it. If
779 # Check if the parent or child has lfiles if they do don't allow it. If
854 # there is a symlink in the manifest then getting the manifest throws an
780 # there is a symlink in the manifest then getting the manifest throws an
855 # exception catch it and let addremove deal with it. This happens in
781 # exception catch it and let addremove deal with it. This happens in
856 # Mercurial's test test-addremove-symlink
782 # Mercurial's test test-addremove-symlink
857 try:
783 try:
858 manifesttip = set(repo['tip'].manifest())
784 manifesttip = set(repo['tip'].manifest())
859 except util.Abort:
785 except util.Abort:
860 manifesttip = set()
786 manifesttip = set()
861 try:
787 try:
862 manifestworking = set(repo[None].manifest())
788 manifestworking = set(repo[None].manifest())
863 except util.Abort:
789 except util.Abort:
864 manifestworking = set()
790 manifestworking = set()
865
791
866 # Manifests are only iterable so turn them into sets then union
792 # Manifests are only iterable so turn them into sets then union
867 for file in manifesttip.union(manifestworking):
793 for file in manifesttip.union(manifestworking):
868 if file.startswith(lfutil.shortname):
794 if file.startswith(lfutil.shortname):
869 raise util.Abort(
795 raise util.Abort(
870 _('addremove cannot be run on a repo with largefiles'))
796 _('addremove cannot be run on a repo with largefiles'))
871
797
872 return orig(ui, repo, *pats, **opts)
798 return orig(ui, repo, *pats, **opts)
873
799
874 # Calling purge with --all will cause the lfiles to be deleted.
800 # Calling purge with --all will cause the lfiles to be deleted.
875 # Override repo.status to prevent this from happening.
801 # Override repo.status to prevent this from happening.
876 def override_purge(orig, ui, repo, *dirs, **opts):
802 def override_purge(orig, ui, repo, *dirs, **opts):
877 oldstatus = repo.status
803 oldstatus = repo.status
878 def override_status(node1='.', node2=None, match=None, ignored=False,
804 def override_status(node1='.', node2=None, match=None, ignored=False,
879 clean=False, unknown=False, listsubrepos=False):
805 clean=False, unknown=False, listsubrepos=False):
880 r = oldstatus(node1, node2, match, ignored, clean, unknown,
806 r = oldstatus(node1, node2, match, ignored, clean, unknown,
881 listsubrepos)
807 listsubrepos)
882 lfdirstate = lfutil.openlfdirstate(ui, repo)
808 lfdirstate = lfutil.openlfdirstate(ui, repo)
883 modified, added, removed, deleted, unknown, ignored, clean = r
809 modified, added, removed, deleted, unknown, ignored, clean = r
884 unknown = [f for f in unknown if lfdirstate[f] == '?']
810 unknown = [f for f in unknown if lfdirstate[f] == '?']
885 ignored = [f for f in ignored if lfdirstate[f] == '?']
811 ignored = [f for f in ignored if lfdirstate[f] == '?']
886 return modified, added, removed, deleted, unknown, ignored, clean
812 return modified, added, removed, deleted, unknown, ignored, clean
887 repo.status = override_status
813 repo.status = override_status
888 orig(ui, repo, *dirs, **opts)
814 orig(ui, repo, *dirs, **opts)
889 repo.status = oldstatus
815 repo.status = oldstatus
890
816
891 def override_rollback(orig, ui, repo, **opts):
817 def override_rollback(orig, ui, repo, **opts):
892 result = orig(ui, repo, **opts)
818 result = orig(ui, repo, **opts)
893 merge.update(repo, node=None, branchmerge=False, force=True,
819 merge.update(repo, node=None, branchmerge=False, force=True,
894 partial=lfutil.isstandin)
820 partial=lfutil.isstandin)
895 lfdirstate = lfutil.openlfdirstate(ui, repo)
821 lfdirstate = lfutil.openlfdirstate(ui, repo)
896 lfiles = lfutil.listlfiles(repo)
822 lfiles = lfutil.listlfiles(repo)
897 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
823 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
898 for file in lfiles:
824 for file in lfiles:
899 if file in oldlfiles:
825 if file in oldlfiles:
900 lfdirstate.normallookup(file)
826 lfdirstate.normallookup(file)
901 else:
827 else:
902 lfdirstate.add(file)
828 lfdirstate.add(file)
903 lfdirstate.write()
829 lfdirstate.write()
904 return result
830 return result
@@ -1,162 +1,158 b''
1 # Copyright 2011 Fog Creek Software
1 # Copyright 2011 Fog Creek Software
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 import os
6 import os
7 import tempfile
7 import tempfile
8 import urllib2
8 import urllib2
9
9
10 from mercurial import error, httprepo, util, wireproto
10 from mercurial import error, httprepo, util, wireproto
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 import lfutil
13 import lfutil
14
14
15 LARGEFILES_REQUIRED_MSG = '\nThis repository uses the largefiles extension.' \
15 LARGEFILES_REQUIRED_MSG = '\nThis repository uses the largefiles extension.' \
16 '\n\nPlease enable it in your Mercurial config ' \
16 '\n\nPlease enable it in your Mercurial config ' \
17 'file.\n'
17 'file.\n'
18
18
19 def putlfile(repo, proto, sha):
19 def putlfile(repo, proto, sha):
20 """putlfile puts a largefile into a repository's local cache and into the
20 """putlfile puts a largefile into a repository's local cache and into the
21 system cache."""
21 system cache."""
22 f = None
22 f = None
23 proto.redirect()
23 proto.redirect()
24 try:
24 try:
25 try:
25 try:
26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
27 proto.getfile(f)
27 proto.getfile(f)
28 f.seek(0)
28 f.seek(0)
29 if sha != lfutil.hexsha1(f):
29 if sha != lfutil.hexsha1(f):
30 return wireproto.pushres(1)
30 return wireproto.pushres(1)
31 lfutil.copytocacheabsolute(repo, f.name, sha)
31 lfutil.copytocacheabsolute(repo, f.name, sha)
32 except IOError:
32 except IOError:
33 repo.ui.warn(
33 repo.ui.warn(
34 _('error: could not put received data into largefile store'))
34 _('error: could not put received data into largefile store'))
35 return wireproto.pushres(1)
35 return wireproto.pushres(1)
36 finally:
36 finally:
37 if f:
37 if f:
38 f.close()
38 f.close()
39
39
40 return wireproto.pushres(0)
40 return wireproto.pushres(0)
41
41
42 def getlfile(repo, proto, sha):
42 def getlfile(repo, proto, sha):
43 """getlfile retrieves a largefile from the repository-local cache or system
43 """getlfile retrieves a largefile from the repository-local cache or system
44 cache."""
44 cache."""
45 filename = lfutil.findfile(repo, sha)
45 filename = lfutil.findfile(repo, sha)
46 if not filename:
46 if not filename:
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
48 f = open(filename, 'rb')
48 f = open(filename, 'rb')
49 length = os.fstat(f.fileno())[6]
49 length = os.fstat(f.fileno())[6]
50 # since we can't set an HTTP content-length header here, and mercurial core
50 # since we can't set an HTTP content-length header here, and mercurial core
51 # provides no way to give the length of a streamres (and reading the entire
51 # provides no way to give the length of a streamres (and reading the entire
52 # file into RAM would be ill-advised), we just send the length on the first
52 # file into RAM would be ill-advised), we just send the length on the first
53 # line of the response, like the ssh proto does for string responses.
53 # line of the response, like the ssh proto does for string responses.
54 def generator():
54 def generator():
55 yield '%d\n' % length
55 yield '%d\n' % length
56 for chunk in f:
56 for chunk in f:
57 yield chunk
57 yield chunk
58 return wireproto.streamres(generator())
58 return wireproto.streamres(generator())
59
59
60 def statlfile(repo, proto, sha):
60 def statlfile(repo, proto, sha):
61 """statlfile sends '2\n' if the largefile is missing, '1\n' if it has a
61 """statlfile sends '2\n' if the largefile is missing, '1\n' if it has a
62 mismatched checksum, or '0\n' if it is in good condition"""
62 mismatched checksum, or '0\n' if it is in good condition"""
63 filename = lfutil.findfile(repo, sha)
63 filename = lfutil.findfile(repo, sha)
64 if not filename:
64 if not filename:
65 return '2\n'
65 return '2\n'
66 fd = None
66 fd = None
67 try:
67 try:
68 fd = open(filename, 'rb')
68 fd = open(filename, 'rb')
69 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
69 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
70 finally:
70 finally:
71 if fd:
71 if fd:
72 fd.close()
72 fd.close()
73
73
74 def wirereposetup(ui, repo):
74 def wirereposetup(ui, repo):
75 class lfileswirerepository(repo.__class__):
75 class lfileswirerepository(repo.__class__):
76 def putlfile(self, sha, fd):
76 def putlfile(self, sha, fd):
77 # unfortunately, httprepository._callpush tries to convert its
77 # unfortunately, httprepository._callpush tries to convert its
78 # input file-like into a bundle before sending it, so we can't use
78 # input file-like into a bundle before sending it, so we can't use
79 # it ...
79 # it ...
80 if issubclass(self.__class__, httprepo.httprepository):
80 if issubclass(self.__class__, httprepo.httprepository):
81 try:
81 try:
82 return int(self._call('putlfile', data=fd, sha=sha,
82 return int(self._call('putlfile', data=fd, sha=sha,
83 headers={'content-type':'application/mercurial-0.1'}))
83 headers={'content-type':'application/mercurial-0.1'}))
84 except (ValueError, urllib2.HTTPError):
84 except (ValueError, urllib2.HTTPError):
85 return 1
85 return 1
86 # ... but we can't use sshrepository._call because the data=
86 # ... but we can't use sshrepository._call because the data=
87 # argument won't get sent, and _callpush does exactly what we want
87 # argument won't get sent, and _callpush does exactly what we want
88 # in this case: send the data straight through
88 # in this case: send the data straight through
89 else:
89 else:
90 try:
90 try:
91 ret, output = self._callpush("putlfile", fd, sha=sha)
91 ret, output = self._callpush("putlfile", fd, sha=sha)
92 if ret == "":
92 if ret == "":
93 raise error.ResponseError(_('putlfile failed:'),
93 raise error.ResponseError(_('putlfile failed:'),
94 output)
94 output)
95 return int(ret)
95 return int(ret)
96 except IOError:
96 except IOError:
97 return 1
97 return 1
98 except ValueError:
98 except ValueError:
99 raise error.ResponseError(
99 raise error.ResponseError(
100 _('putlfile failed (unexpected response):'), ret)
100 _('putlfile failed (unexpected response):'), ret)
101
101
102 def getlfile(self, sha):
102 def getlfile(self, sha):
103 stream = self._callstream("getlfile", sha=sha)
103 stream = self._callstream("getlfile", sha=sha)
104 length = stream.readline()
104 length = stream.readline()
105 try:
105 try:
106 length = int(length)
106 length = int(length)
107 except ValueError:
107 except ValueError:
108 self._abort(error.ResponseError(_("unexpected response:"),
108 self._abort(error.ResponseError(_("unexpected response:"),
109 length))
109 length))
110 return (length, stream)
110 return (length, stream)
111
111
112 def statlfile(self, sha):
112 def statlfile(self, sha):
113 try:
113 try:
114 return int(self._call("statlfile", sha=sha))
114 return int(self._call("statlfile", sha=sha))
115 except (ValueError, urllib2.HTTPError):
115 except (ValueError, urllib2.HTTPError):
116 # if the server returns anything but an integer followed by a
116 # if the server returns anything but an integer followed by a
117 # newline, newline, it's not speaking our language; if we get
117 # newline, newline, it's not speaking our language; if we get
118 # an HTTP error, we can't be sure the largefile is present;
118 # an HTTP error, we can't be sure the largefile is present;
119 # either way, consider it missing
119 # either way, consider it missing
120 return 2
120 return 2
121
121
122 repo.__class__ = lfileswirerepository
122 repo.__class__ = lfileswirerepository
123
123
124 # advertise the largefiles=serve capability
124 # advertise the largefiles=serve capability
125 def capabilities(repo, proto):
125 def capabilities(repo, proto):
126 return capabilities_orig(repo, proto) + ' largefiles=serve'
126 return capabilities_orig(repo, proto) + ' largefiles=serve'
127
127
128 # duplicate what Mercurial's new out-of-band errors mechanism does, because
128 # duplicate what Mercurial's new out-of-band errors mechanism does, because
129 # clients old and new alike both handle it well
129 # clients old and new alike both handle it well
130 def webproto_refuseclient(self, message):
130 def webproto_refuseclient(self, message):
131 self.req.header([('Content-Type', 'application/hg-error')])
131 self.req.header([('Content-Type', 'application/hg-error')])
132 return message
132 return message
133
133
134 def sshproto_refuseclient(self, message):
134 def sshproto_refuseclient(self, message):
135 self.ui.write_err('%s\n-\n' % message)
135 self.ui.write_err('%s\n-\n' % message)
136 self.fout.write('\n')
136 self.fout.write('\n')
137 self.fout.flush()
137 self.fout.flush()
138
138
139 return ''
139 return ''
140
140
141 def heads(repo, proto):
141 def heads(repo, proto):
142 if lfutil.islfilesrepo(repo):
142 if lfutil.islfilesrepo(repo):
143 try:
144 # Mercurial >= f4522df38c65
145 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
143 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
146 except AttributeError:
147 return proto.refuseclient(LARGEFILES_REQUIRED_MSG)
148 return wireproto.heads(repo, proto)
144 return wireproto.heads(repo, proto)
149
145
150 def sshrepo_callstream(self, cmd, **args):
146 def sshrepo_callstream(self, cmd, **args):
151 if cmd == 'heads' and self.capable('largefiles'):
147 if cmd == 'heads' and self.capable('largefiles'):
152 cmd = 'lheads'
148 cmd = 'lheads'
153 if cmd == 'batch' and self.capable('largefiles'):
149 if cmd == 'batch' and self.capable('largefiles'):
154 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
150 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
155 return ssh_oldcallstream(self, cmd, **args)
151 return ssh_oldcallstream(self, cmd, **args)
156
152
157 def httprepo_callstream(self, cmd, **args):
153 def httprepo_callstream(self, cmd, **args):
158 if cmd == 'heads' and self.capable('largefiles'):
154 if cmd == 'heads' and self.capable('largefiles'):
159 cmd = 'lheads'
155 cmd = 'lheads'
160 if cmd == 'batch' and self.capable('largefiles'):
156 if cmd == 'batch' and self.capable('largefiles'):
161 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
157 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
162 return http_oldcallstream(self, cmd, **args)
158 return http_oldcallstream(self, cmd, **args)
@@ -1,411 +1,397 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13 import re
13 import re
14
14
15 from mercurial import context, error, manifest, match as match_, \
15 from mercurial import context, error, manifest, match as match_, \
16 node, util
16 node, util
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 import lfcommands
19 import lfcommands
20 import proto
20 import proto
21 import lfutil
21 import lfutil
22
22
23 def reposetup(ui, repo):
23 def reposetup(ui, repo):
24 # wire repositories should be given new wireproto functions but not the
24 # wire repositories should be given new wireproto functions but not the
25 # other largefiles modifications
25 # other largefiles modifications
26 if not repo.local():
26 if not repo.local():
27 return proto.wirereposetup(ui, repo)
27 return proto.wirereposetup(ui, repo)
28
28
29 for name in ('status', 'commitctx', 'commit', 'push'):
29 for name in ('status', 'commitctx', 'commit', 'push'):
30 method = getattr(repo, name)
30 method = getattr(repo, name)
31 #if not (isinstance(method, types.MethodType) and
31 #if not (isinstance(method, types.MethodType) and
32 # method.im_func is repo.__class__.commitctx.im_func):
32 # method.im_func is repo.__class__.commitctx.im_func):
33 if isinstance(method, types.FunctionType) and method.func_name == \
33 if isinstance(method, types.FunctionType) and method.func_name == \
34 'wrap':
34 'wrap':
35 ui.warn(_('largefiles: repo method %r appears to have already been'
35 ui.warn(_('largefiles: repo method %r appears to have already been'
36 ' wrapped by another extension: '
36 ' wrapped by another extension: '
37 'largefiles may behave incorrectly\n')
37 'largefiles may behave incorrectly\n')
38 % name)
38 % name)
39
39
40 class lfiles_repo(repo.__class__):
40 class lfiles_repo(repo.__class__):
41 lfstatus = False
41 lfstatus = False
42 def status_nolfiles(self, *args, **kwargs):
42 def status_nolfiles(self, *args, **kwargs):
43 return super(lfiles_repo, self).status(*args, **kwargs)
43 return super(lfiles_repo, self).status(*args, **kwargs)
44
44
45 # When lfstatus is set, return a context that gives the names of lfiles
45 # When lfstatus is set, return a context that gives the names of lfiles
46 # instead of their corresponding standins and identifies the lfiles as
46 # instead of their corresponding standins and identifies the lfiles as
47 # always binary, regardless of their actual contents.
47 # always binary, regardless of their actual contents.
48 def __getitem__(self, changeid):
48 def __getitem__(self, changeid):
49 ctx = super(lfiles_repo, self).__getitem__(changeid)
49 ctx = super(lfiles_repo, self).__getitem__(changeid)
50 if self.lfstatus:
50 if self.lfstatus:
51 class lfiles_manifestdict(manifest.manifestdict):
51 class lfiles_manifestdict(manifest.manifestdict):
52 def __contains__(self, filename):
52 def __contains__(self, filename):
53 if super(lfiles_manifestdict,
53 if super(lfiles_manifestdict,
54 self).__contains__(filename):
54 self).__contains__(filename):
55 return True
55 return True
56 return super(lfiles_manifestdict,
56 return super(lfiles_manifestdict,
57 self).__contains__(lfutil.shortname+'/' + filename)
57 self).__contains__(lfutil.shortname+'/' + filename)
58 class lfiles_ctx(ctx.__class__):
58 class lfiles_ctx(ctx.__class__):
59 def files(self):
59 def files(self):
60 filenames = super(lfiles_ctx, self).files()
60 filenames = super(lfiles_ctx, self).files()
61 return [re.sub('^\\'+lfutil.shortname+'/', '',
61 return [re.sub('^\\'+lfutil.shortname+'/', '',
62 filename) for filename in filenames]
62 filename) for filename in filenames]
63 def manifest(self):
63 def manifest(self):
64 man1 = super(lfiles_ctx, self).manifest()
64 man1 = super(lfiles_ctx, self).manifest()
65 man1.__class__ = lfiles_manifestdict
65 man1.__class__ = lfiles_manifestdict
66 return man1
66 return man1
67 def filectx(self, path, fileid=None, filelog=None):
67 def filectx(self, path, fileid=None, filelog=None):
68 try:
68 try:
69 result = super(lfiles_ctx, self).filectx(path,
69 result = super(lfiles_ctx, self).filectx(path,
70 fileid, filelog)
70 fileid, filelog)
71 except error.LookupError:
71 except error.LookupError:
72 # Adding a null character will cause Mercurial to
72 # Adding a null character will cause Mercurial to
73 # identify this as a binary file.
73 # identify this as a binary file.
74 result = super(lfiles_ctx, self).filectx(
74 result = super(lfiles_ctx, self).filectx(
75 lfutil.shortname + '/' + path, fileid,
75 lfutil.shortname + '/' + path, fileid,
76 filelog)
76 filelog)
77 olddata = result.data
77 olddata = result.data
78 result.data = lambda: olddata() + '\0'
78 result.data = lambda: olddata() + '\0'
79 return result
79 return result
80 ctx.__class__ = lfiles_ctx
80 ctx.__class__ = lfiles_ctx
81 return ctx
81 return ctx
82
82
83 # Figure out the status of big files and insert them into the
83 # Figure out the status of big files and insert them into the
84 # appropriate list in the result. Also removes standin files from
84 # appropriate list in the result. Also removes standin files from
85 # the listing. This function reverts to the original status if
85 # the listing. This function reverts to the original status if
86 # self.lfstatus is False
86 # self.lfstatus is False
87 def status(self, node1='.', node2=None, match=None, ignored=False,
87 def status(self, node1='.', node2=None, match=None, ignored=False,
88 clean=False, unknown=False, listsubrepos=False):
88 clean=False, unknown=False, listsubrepos=False):
89 listignored, listclean, listunknown = ignored, clean, unknown
89 listignored, listclean, listunknown = ignored, clean, unknown
90 if not self.lfstatus:
90 if not self.lfstatus:
91 try:
91 try:
92 return super(lfiles_repo, self).status(node1, node2, match,
92 return super(lfiles_repo, self).status(node1, node2, match,
93 listignored, listclean, listunknown, listsubrepos)
93 listignored, listclean, listunknown, listsubrepos)
94 except TypeError:
94 except TypeError:
95 return super(lfiles_repo, self).status(node1, node2, match,
95 return super(lfiles_repo, self).status(node1, node2, match,
96 listignored, listclean, listunknown)
96 listignored, listclean, listunknown)
97 else:
97 else:
98 # some calls in this function rely on the old version of status
98 # some calls in this function rely on the old version of status
99 self.lfstatus = False
99 self.lfstatus = False
100 if isinstance(node1, context.changectx):
100 if isinstance(node1, context.changectx):
101 ctx1 = node1
101 ctx1 = node1
102 else:
102 else:
103 ctx1 = repo[node1]
103 ctx1 = repo[node1]
104 if isinstance(node2, context.changectx):
104 if isinstance(node2, context.changectx):
105 ctx2 = node2
105 ctx2 = node2
106 else:
106 else:
107 ctx2 = repo[node2]
107 ctx2 = repo[node2]
108 working = ctx2.rev() is None
108 working = ctx2.rev() is None
109 parentworking = working and ctx1 == self['.']
109 parentworking = working and ctx1 == self['.']
110
110
111 def inctx(file, ctx):
111 def inctx(file, ctx):
112 try:
112 try:
113 if ctx.rev() is None:
113 if ctx.rev() is None:
114 return file in ctx.manifest()
114 return file in ctx.manifest()
115 ctx[file]
115 ctx[file]
116 return True
116 return True
117 except KeyError:
117 except KeyError:
118 return False
118 return False
119
119
120 # create a copy of match that matches standins instead of
120 # create a copy of match that matches standins instead of
121 # lfiles if matcher not set then it is the always matcher so
121 # lfiles if matcher not set then it is the always matcher so
122 # overwrite that
122 # overwrite that
123 if match is None:
123 if match is None:
124 match = match_.always(self.root, self.getcwd())
124 match = match_.always(self.root, self.getcwd())
125
125
126 def tostandin(file):
126 def tostandin(file):
127 if inctx(lfutil.standin(file), ctx2):
127 if inctx(lfutil.standin(file), ctx2):
128 return lfutil.standin(file)
128 return lfutil.standin(file)
129 return file
129 return file
130
130
131 m = copy.copy(match)
131 m = copy.copy(match)
132 m._files = [tostandin(f) for f in m._files]
132 m._files = [tostandin(f) for f in m._files]
133
133
134 # get ignored clean and unknown but remove them later if they
134 # get ignored clean and unknown but remove them later if they
135 # were not asked for
135 # were not asked for
136 try:
136 try:
137 result = super(lfiles_repo, self).status(node1, node2, m,
137 result = super(lfiles_repo, self).status(node1, node2, m,
138 True, True, True, listsubrepos)
138 True, True, True, listsubrepos)
139 except TypeError:
139 except TypeError:
140 result = super(lfiles_repo, self).status(node1, node2, m,
140 result = super(lfiles_repo, self).status(node1, node2, m,
141 True, True, True)
141 True, True, True)
142 if working:
142 if working:
143 # Hold the wlock while we read lfiles and update the
143 # Hold the wlock while we read lfiles and update the
144 # lfdirstate
144 # lfdirstate
145 wlock = repo.wlock()
145 wlock = repo.wlock()
146 try:
146 try:
147 # Any non lfiles that were explicitly listed must be
147 # Any non lfiles that were explicitly listed must be
148 # taken out or lfdirstate.status will report an error.
148 # taken out or lfdirstate.status will report an error.
149 # The status of these files was already computed using
149 # The status of these files was already computed using
150 # super's status.
150 # super's status.
151 lfdirstate = lfutil.openlfdirstate(ui, self)
151 lfdirstate = lfutil.openlfdirstate(ui, self)
152 match._files = [f for f in match._files if f in
152 match._files = [f for f in match._files if f in
153 lfdirstate]
153 lfdirstate]
154 s = lfdirstate.status(match, [], listignored,
154 s = lfdirstate.status(match, [], listignored,
155 listclean, listunknown)
155 listclean, listunknown)
156 (unsure, modified, added, removed, missing, unknown,
156 (unsure, modified, added, removed, missing, unknown,
157 ignored, clean) = s
157 ignored, clean) = s
158 if parentworking:
158 if parentworking:
159 for lfile in unsure:
159 for lfile in unsure:
160 if ctx1[lfutil.standin(lfile)].data().strip() \
160 if ctx1[lfutil.standin(lfile)].data().strip() \
161 != lfutil.hashfile(self.wjoin(lfile)):
161 != lfutil.hashfile(self.wjoin(lfile)):
162 modified.append(lfile)
162 modified.append(lfile)
163 else:
163 else:
164 clean.append(lfile)
164 clean.append(lfile)
165 lfdirstate.normal(lfile)
165 lfdirstate.normal(lfile)
166 lfdirstate.write()
166 lfdirstate.write()
167 else:
167 else:
168 tocheck = unsure + modified + added + clean
168 tocheck = unsure + modified + added + clean
169 modified, added, clean = [], [], []
169 modified, added, clean = [], [], []
170
170
171 for lfile in tocheck:
171 for lfile in tocheck:
172 standin = lfutil.standin(lfile)
172 standin = lfutil.standin(lfile)
173 if inctx(standin, ctx1):
173 if inctx(standin, ctx1):
174 if ctx1[standin].data().strip() != \
174 if ctx1[standin].data().strip() != \
175 lfutil.hashfile(self.wjoin(lfile)):
175 lfutil.hashfile(self.wjoin(lfile)):
176 modified.append(lfile)
176 modified.append(lfile)
177 else:
177 else:
178 clean.append(lfile)
178 clean.append(lfile)
179 else:
179 else:
180 added.append(lfile)
180 added.append(lfile)
181 finally:
181 finally:
182 wlock.release()
182 wlock.release()
183
183
184 for standin in ctx1.manifest():
184 for standin in ctx1.manifest():
185 if not lfutil.isstandin(standin):
185 if not lfutil.isstandin(standin):
186 continue
186 continue
187 lfile = lfutil.splitstandin(standin)
187 lfile = lfutil.splitstandin(standin)
188 if not match(lfile):
188 if not match(lfile):
189 continue
189 continue
190 if lfile not in lfdirstate:
190 if lfile not in lfdirstate:
191 removed.append(lfile)
191 removed.append(lfile)
192 # Handle unknown and ignored differently
192 # Handle unknown and ignored differently
193 lfiles = (modified, added, removed, missing, [], [], clean)
193 lfiles = (modified, added, removed, missing, [], [], clean)
194 result = list(result)
194 result = list(result)
195 # Unknown files
195 # Unknown files
196 result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
196 result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
197 and not lfutil.isstandin(f)]
197 and not lfutil.isstandin(f)]
198 # Ignored files must be ignored by both the dirstate and
198 # Ignored files must be ignored by both the dirstate and
199 # lfdirstate
199 # lfdirstate
200 result[5] = set(ignored).intersection(set(result[5]))
200 result[5] = set(ignored).intersection(set(result[5]))
201 # combine normal files and lfiles
201 # combine normal files and lfiles
202 normals = [[fn for fn in filelist if not \
202 normals = [[fn for fn in filelist if not \
203 lfutil.isstandin(fn)] for filelist in result]
203 lfutil.isstandin(fn)] for filelist in result]
204 result = [sorted(list1 + list2) for (list1, list2) in \
204 result = [sorted(list1 + list2) for (list1, list2) in \
205 zip(normals, lfiles)]
205 zip(normals, lfiles)]
206 else:
206 else:
207 def toname(f):
207 def toname(f):
208 if lfutil.isstandin(f):
208 if lfutil.isstandin(f):
209 return lfutil.splitstandin(f)
209 return lfutil.splitstandin(f)
210 return f
210 return f
211 result = [[toname(f) for f in items] for items in result]
211 result = [[toname(f) for f in items] for items in result]
212
212
213 if not listunknown:
213 if not listunknown:
214 result[4] = []
214 result[4] = []
215 if not listignored:
215 if not listignored:
216 result[5] = []
216 result[5] = []
217 if not listclean:
217 if not listclean:
218 result[6] = []
218 result[6] = []
219 self.lfstatus = True
219 self.lfstatus = True
220 return result
220 return result
221
221
222 # This call happens after a commit has occurred. Copy all of the lfiles
222 # This call happens after a commit has occurred. Copy all of the lfiles
223 # into the cache
223 # into the cache
224 def commitctx(self, *args, **kwargs):
224 def commitctx(self, *args, **kwargs):
225 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
225 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
226 ctx = self[node]
226 ctx = self[node]
227 for filename in ctx.files():
227 for filename in ctx.files():
228 if lfutil.isstandin(filename) and filename in ctx.manifest():
228 if lfutil.isstandin(filename) and filename in ctx.manifest():
229 realfile = lfutil.splitstandin(filename)
229 realfile = lfutil.splitstandin(filename)
230 lfutil.copytocache(self, ctx.node(), realfile)
230 lfutil.copytocache(self, ctx.node(), realfile)
231
231
232 return node
232 return node
233
233
234 # This call happens before a commit has occurred. The lfile standins
234 # This call happens before a commit has occurred. The lfile standins
235 # have not had their contents updated (to reflect the hash of their
235 # have not had their contents updated (to reflect the hash of their
236 # lfile). Do that here.
236 # lfile). Do that here.
237 def commit(self, text="", user=None, date=None, match=None,
237 def commit(self, text="", user=None, date=None, match=None,
238 force=False, editor=False, extra={}):
238 force=False, editor=False, extra={}):
239 orig = super(lfiles_repo, self).commit
239 orig = super(lfiles_repo, self).commit
240
240
241 wlock = repo.wlock()
241 wlock = repo.wlock()
242 try:
242 try:
243 if getattr(repo, "_isrebasing", False):
243 if getattr(repo, "_isrebasing", False):
244 # We have to take the time to pull down the new lfiles now.
244 # We have to take the time to pull down the new lfiles now.
245 # Otherwise if we are rebasing, any lfiles that were
245 # Otherwise if we are rebasing, any lfiles that were
246 # modified in the changesets we are rebasing on top of get
246 # modified in the changesets we are rebasing on top of get
247 # overwritten either by the rebase or in the first commit
247 # overwritten either by the rebase or in the first commit
248 # after the rebase.
248 # after the rebase.
249 lfcommands.updatelfiles(repo.ui, repo)
249 lfcommands.updatelfiles(repo.ui, repo)
250 # Case 1: user calls commit with no specific files or
250 # Case 1: user calls commit with no specific files or
251 # include/exclude patterns: refresh and commit everything.
251 # include/exclude patterns: refresh and commit everything.
252 if (match is None) or (not match.anypats() and not \
252 if (match is None) or (not match.anypats() and not \
253 match.files()):
253 match.files()):
254 lfiles = lfutil.listlfiles(self)
254 lfiles = lfutil.listlfiles(self)
255 lfdirstate = lfutil.openlfdirstate(ui, self)
255 lfdirstate = lfutil.openlfdirstate(ui, self)
256 # this only loops through lfiles that exist (not
256 # this only loops through lfiles that exist (not
257 # removed/renamed)
257 # removed/renamed)
258 for lfile in lfiles:
258 for lfile in lfiles:
259 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
259 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
260 # this handles the case where a rebase is being
260 # this handles the case where a rebase is being
261 # performed and the working copy is not updated
261 # performed and the working copy is not updated
262 # yet.
262 # yet.
263 if os.path.exists(self.wjoin(lfile)):
263 if os.path.exists(self.wjoin(lfile)):
264 lfutil.updatestandin(self,
264 lfutil.updatestandin(self,
265 lfutil.standin(lfile))
265 lfutil.standin(lfile))
266 lfdirstate.normal(lfile)
266 lfdirstate.normal(lfile)
267 for lfile in lfdirstate:
267 for lfile in lfdirstate:
268 if not os.path.exists(
268 if not os.path.exists(
269 repo.wjoin(lfutil.standin(lfile))):
269 repo.wjoin(lfutil.standin(lfile))):
270 try:
271 # Mercurial >= 1.9
272 lfdirstate.drop(lfile)
270 lfdirstate.drop(lfile)
273 except AttributeError:
274 # Mercurial <= 1.8
275 lfdirstate.forget(lfile)
276 lfdirstate.write()
271 lfdirstate.write()
277
272
278 return orig(text=text, user=user, date=date, match=match,
273 return orig(text=text, user=user, date=date, match=match,
279 force=force, editor=editor, extra=extra)
274 force=force, editor=editor, extra=extra)
280
275
281 for file in match.files():
276 for file in match.files():
282 if lfutil.isstandin(file):
277 if lfutil.isstandin(file):
283 raise util.Abort(
278 raise util.Abort(
284 "Don't commit largefile standin. Commit largefile.")
279 "Don't commit largefile standin. Commit largefile.")
285
280
286 # Case 2: user calls commit with specified patterns: refresh
281 # Case 2: user calls commit with specified patterns: refresh
287 # any matching big files.
282 # any matching big files.
288 smatcher = lfutil.composestandinmatcher(self, match)
283 smatcher = lfutil.composestandinmatcher(self, match)
289 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
284 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
290
285
291 # No matching big files: get out of the way and pass control to
286 # No matching big files: get out of the way and pass control to
292 # the usual commit() method.
287 # the usual commit() method.
293 if not standins:
288 if not standins:
294 return orig(text=text, user=user, date=date, match=match,
289 return orig(text=text, user=user, date=date, match=match,
295 force=force, editor=editor, extra=extra)
290 force=force, editor=editor, extra=extra)
296
291
297 # Refresh all matching big files. It's possible that the
292 # Refresh all matching big files. It's possible that the
298 # commit will end up failing, in which case the big files will
293 # commit will end up failing, in which case the big files will
299 # stay refreshed. No harm done: the user modified them and
294 # stay refreshed. No harm done: the user modified them and
300 # asked to commit them, so sooner or later we're going to
295 # asked to commit them, so sooner or later we're going to
301 # refresh the standins. Might as well leave them refreshed.
296 # refresh the standins. Might as well leave them refreshed.
302 lfdirstate = lfutil.openlfdirstate(ui, self)
297 lfdirstate = lfutil.openlfdirstate(ui, self)
303 for standin in standins:
298 for standin in standins:
304 lfile = lfutil.splitstandin(standin)
299 lfile = lfutil.splitstandin(standin)
305 if lfdirstate[lfile] <> 'r':
300 if lfdirstate[lfile] <> 'r':
306 lfutil.updatestandin(self, standin)
301 lfutil.updatestandin(self, standin)
307 lfdirstate.normal(lfile)
302 lfdirstate.normal(lfile)
308 else:
303 else:
309 try:
310 # Mercurial >= 1.9
311 lfdirstate.drop(lfile)
304 lfdirstate.drop(lfile)
312 except AttributeError:
313 # Mercurial <= 1.8
314 lfdirstate.forget(lfile)
315 lfdirstate.write()
305 lfdirstate.write()
316
306
317 # Cook up a new matcher that only matches regular files or
307 # Cook up a new matcher that only matches regular files or
318 # standins corresponding to the big files requested by the
308 # standins corresponding to the big files requested by the
319 # user. Have to modify _files to prevent commit() from
309 # user. Have to modify _files to prevent commit() from
320 # complaining "not tracked" for big files.
310 # complaining "not tracked" for big files.
321 lfiles = lfutil.listlfiles(repo)
311 lfiles = lfutil.listlfiles(repo)
322 match = copy.copy(match)
312 match = copy.copy(match)
323 orig_matchfn = match.matchfn
313 orig_matchfn = match.matchfn
324
314
325 # Check both the list of lfiles and the list of standins
315 # Check both the list of lfiles and the list of standins
326 # because if a lfile was removed, it won't be in the list of
316 # because if a lfile was removed, it won't be in the list of
327 # lfiles at this point
317 # lfiles at this point
328 match._files += sorted(standins)
318 match._files += sorted(standins)
329
319
330 actualfiles = []
320 actualfiles = []
331 for f in match._files:
321 for f in match._files:
332 fstandin = lfutil.standin(f)
322 fstandin = lfutil.standin(f)
333
323
334 # Ignore known lfiles and standins
324 # Ignore known lfiles and standins
335 if f in lfiles or fstandin in standins:
325 if f in lfiles or fstandin in standins:
336 continue
326 continue
337
327
338 # Append directory separator to avoid collisions
328 # Append directory separator to avoid collisions
339 if not fstandin.endswith(os.sep):
329 if not fstandin.endswith(os.sep):
340 fstandin += os.sep
330 fstandin += os.sep
341
331
342 # Prevalidate matching standin directories
332 # Prevalidate matching standin directories
343 if lfutil.any_(st for st in match._files if \
333 if lfutil.any_(st for st in match._files if \
344 st.startswith(fstandin)):
334 st.startswith(fstandin)):
345 continue
335 continue
346 actualfiles.append(f)
336 actualfiles.append(f)
347 match._files = actualfiles
337 match._files = actualfiles
348
338
349 def matchfn(f):
339 def matchfn(f):
350 if orig_matchfn(f):
340 if orig_matchfn(f):
351 return f not in lfiles
341 return f not in lfiles
352 else:
342 else:
353 return f in standins
343 return f in standins
354
344
355 match.matchfn = matchfn
345 match.matchfn = matchfn
356 return orig(text=text, user=user, date=date, match=match,
346 return orig(text=text, user=user, date=date, match=match,
357 force=force, editor=editor, extra=extra)
347 force=force, editor=editor, extra=extra)
358 finally:
348 finally:
359 wlock.release()
349 wlock.release()
360
350
361 def push(self, remote, force=False, revs=None, newbranch=False):
351 def push(self, remote, force=False, revs=None, newbranch=False):
362 o = lfutil.findoutgoing(repo, remote, force)
352 o = lfutil.findoutgoing(repo, remote, force)
363 if o:
353 if o:
364 toupload = set()
354 toupload = set()
365 o = repo.changelog.nodesbetween(o, revs)[0]
355 o = repo.changelog.nodesbetween(o, revs)[0]
366 for n in o:
356 for n in o:
367 parents = [p for p in repo.changelog.parents(n) if p != \
357 parents = [p for p in repo.changelog.parents(n) if p != \
368 node.nullid]
358 node.nullid]
369 ctx = repo[n]
359 ctx = repo[n]
370 files = set(ctx.files())
360 files = set(ctx.files())
371 if len(parents) == 2:
361 if len(parents) == 2:
372 mc = ctx.manifest()
362 mc = ctx.manifest()
373 mp1 = ctx.parents()[0].manifest()
363 mp1 = ctx.parents()[0].manifest()
374 mp2 = ctx.parents()[1].manifest()
364 mp2 = ctx.parents()[1].manifest()
375 for f in mp1:
365 for f in mp1:
376 if f not in mc:
366 if f not in mc:
377 files.add(f)
367 files.add(f)
378 for f in mp2:
368 for f in mp2:
379 if f not in mc:
369 if f not in mc:
380 files.add(f)
370 files.add(f)
381 for f in mc:
371 for f in mc:
382 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
372 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
383 None):
373 None):
384 files.add(f)
374 files.add(f)
385
375
386 toupload = toupload.union(set([ctx[f].data().strip() for f\
376 toupload = toupload.union(set([ctx[f].data().strip() for f\
387 in files if lfutil.isstandin(f) and f in ctx]))
377 in files if lfutil.isstandin(f) and f in ctx]))
388 lfcommands.uploadlfiles(ui, self, remote, toupload)
378 lfcommands.uploadlfiles(ui, self, remote, toupload)
389 # Mercurial >= 1.6 takes the newbranch argument, try that first.
390 try:
391 return super(lfiles_repo, self).push(remote, force, revs,
379 return super(lfiles_repo, self).push(remote, force, revs,
392 newbranch)
380 newbranch)
393 except TypeError:
394 return super(lfiles_repo, self).push(remote, force, revs)
395
381
396 repo.__class__ = lfiles_repo
382 repo.__class__ = lfiles_repo
397
383
398 def checkrequireslfiles(ui, repo, **kwargs):
384 def checkrequireslfiles(ui, repo, **kwargs):
399 if 'largefiles' not in repo.requirements and lfutil.any_(
385 if 'largefiles' not in repo.requirements and lfutil.any_(
400 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
386 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
401 # work around bug in mercurial 1.9 whereby requirements is a list
387 # work around bug in mercurial 1.9 whereby requirements is a list
402 # on newly-cloned repos
388 # on newly-cloned repos
403 repo.requirements = set(repo.requirements)
389 repo.requirements = set(repo.requirements)
404
390
405 repo.requirements |= set(['largefiles'])
391 repo.requirements |= set(['largefiles'])
406 repo._writerequirements()
392 repo._writerequirements()
407
393
408 checkrequireslfiles(ui, repo)
394 checkrequireslfiles(ui, repo)
409
395
410 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
396 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
411 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
397 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
General Comments 0
You need to be logged in to leave comments. Login now