##// END OF EJS Templates
largefiles: more work on cleaning up comments...
Greg Ward -
r15254:dd03d3a9 default
parent child Browse files
Show More
@@ -1,482 +1,482 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
23 '''convert a normal repository to a largefiles repository
24
24
25 Convert repository SOURCE to a new repository DEST, identical to
25 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
26 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
27 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
28 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
29 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
30 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
31 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
32 configuration as ``largefiles.size``.
33
33
34 After running this command you will need to make sure that
34 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
35 largefiles is enabled anywhere you intend to push the new
36 repository.
36 repository.
37
37
38 Use --tonormal to convert largefiles back to normal files; after
38 Use --tonormal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
39 this, the DEST repository can be used without largefiles at all.'''
40
40
41 if opts['tonormal']:
41 if opts['tonormal']:
42 tolfile = False
42 tolfile = False
43 else:
43 else:
44 tolfile = True
44 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46 try:
46 try:
47 rsrc = hg.repository(ui, src)
47 rsrc = hg.repository(ui, src)
48 if not rsrc.local():
48 if not rsrc.local():
49 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 raise util.Abort(_('%s is not a local Mercurial repo') % src)
50 except error.RepoError, err:
50 except error.RepoError, err:
51 ui.traceback()
51 ui.traceback()
52 raise util.Abort(err.args[0])
52 raise util.Abort(err.args[0])
53 if os.path.exists(dest):
53 if os.path.exists(dest):
54 if not os.path.isdir(dest):
54 if not os.path.isdir(dest):
55 raise util.Abort(_('destination %s already exists') % dest)
55 raise util.Abort(_('destination %s already exists') % dest)
56 elif os.listdir(dest):
56 elif os.listdir(dest):
57 raise util.Abort(_('destination %s is not empty') % dest)
57 raise util.Abort(_('destination %s is not empty') % dest)
58 try:
58 try:
59 ui.status(_('initializing destination %s\n') % dest)
59 ui.status(_('initializing destination %s\n') % dest)
60 rdst = hg.repository(ui, dest, create=True)
60 rdst = hg.repository(ui, dest, create=True)
61 if not rdst.local():
61 if not rdst.local():
62 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
62 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
63 except error.RepoError:
63 except error.RepoError:
64 ui.traceback()
64 ui.traceback()
65 raise util.Abort(_('%s is not a repo') % dest)
65 raise util.Abort(_('%s is not a repo') % dest)
66
66
67 success = False
67 success = False
68 try:
68 try:
69 # Lock destination to prevent modification while it is converted to.
69 # Lock destination to prevent modification while it is converted to.
70 # Don't need to lock src because we are just reading from its history
70 # Don't need to lock src because we are just reading from its history
71 # which can't change.
71 # which can't change.
72 dst_lock = rdst.lock()
72 dst_lock = rdst.lock()
73
73
74 # Get a list of all changesets in the source. The easy way to do this
74 # Get a list of all changesets in the source. The easy way to do this
75 # is to simply walk the changelog, using changelog.nodesbewteen().
75 # is to simply walk the changelog, using changelog.nodesbewteen().
76 # Take a look at mercurial/revlog.py:639 for more details.
76 # Take a look at mercurial/revlog.py:639 for more details.
77 # Use a generator instead of a list to decrease memory usage
77 # Use a generator instead of a list to decrease memory usage
78 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
78 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
79 rsrc.heads())[0])
79 rsrc.heads())[0])
80 revmap = {node.nullid: node.nullid}
80 revmap = {node.nullid: node.nullid}
81 if tolfile:
81 if tolfile:
82 lfiles = set()
82 lfiles = set()
83 normalfiles = set()
83 normalfiles = set()
84 if not pats:
84 if not pats:
85 pats = ui.config(lfutil.longname, 'patterns', default=())
85 pats = ui.config(lfutil.longname, 'patterns', default=())
86 if pats:
86 if pats:
87 pats = pats.split(' ')
87 pats = pats.split(' ')
88 if pats:
88 if pats:
89 matcher = match_.match(rsrc.root, '', list(pats))
89 matcher = match_.match(rsrc.root, '', list(pats))
90 else:
90 else:
91 matcher = None
91 matcher = None
92
92
93 lfiletohash = {}
93 lfiletohash = {}
94 for ctx in ctxs:
94 for ctx in ctxs:
95 ui.progress(_('converting revisions'), ctx.rev(),
95 ui.progress(_('converting revisions'), ctx.rev(),
96 unit=_('revision'), total=rsrc['tip'].rev())
96 unit=_('revision'), total=rsrc['tip'].rev())
97 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
97 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
98 lfiles, normalfiles, matcher, size, lfiletohash)
98 lfiles, normalfiles, matcher, size, lfiletohash)
99 ui.progress(_('converting revisions'), None)
99 ui.progress(_('converting revisions'), None)
100
100
101 if os.path.exists(rdst.wjoin(lfutil.shortname)):
101 if os.path.exists(rdst.wjoin(lfutil.shortname)):
102 shutil.rmtree(rdst.wjoin(lfutil.shortname))
102 shutil.rmtree(rdst.wjoin(lfutil.shortname))
103
103
104 for f in lfiletohash.keys():
104 for f in lfiletohash.keys():
105 if os.path.isfile(rdst.wjoin(f)):
105 if os.path.isfile(rdst.wjoin(f)):
106 os.unlink(rdst.wjoin(f))
106 os.unlink(rdst.wjoin(f))
107 try:
107 try:
108 os.removedirs(os.path.dirname(rdst.wjoin(f)))
108 os.removedirs(os.path.dirname(rdst.wjoin(f)))
109 except OSError:
109 except OSError:
110 pass
110 pass
111
111
112 else:
112 else:
113 for ctx in ctxs:
113 for ctx in ctxs:
114 ui.progress(_('converting revisions'), ctx.rev(),
114 ui.progress(_('converting revisions'), ctx.rev(),
115 unit=_('revision'), total=rsrc['tip'].rev())
115 unit=_('revision'), total=rsrc['tip'].rev())
116 _addchangeset(ui, rsrc, rdst, ctx, revmap)
116 _addchangeset(ui, rsrc, rdst, ctx, revmap)
117
117
118 ui.progress(_('converting revisions'), None)
118 ui.progress(_('converting revisions'), None)
119 success = True
119 success = True
120 finally:
120 finally:
121 if not success:
121 if not success:
122 # we failed, remove the new directory
122 # we failed, remove the new directory
123 shutil.rmtree(rdst.root)
123 shutil.rmtree(rdst.root)
124 dst_lock.release()
124 dst_lock.release()
125
125
126 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
126 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
127 # Convert src parents to dst parents
127 # Convert src parents to dst parents
128 parents = []
128 parents = []
129 for p in ctx.parents():
129 for p in ctx.parents():
130 parents.append(revmap[p.node()])
130 parents.append(revmap[p.node()])
131 while len(parents) < 2:
131 while len(parents) < 2:
132 parents.append(node.nullid)
132 parents.append(node.nullid)
133
133
134 # Generate list of changed files
134 # Generate list of changed files
135 files = set(ctx.files())
135 files = set(ctx.files())
136 if node.nullid not in parents:
136 if node.nullid not in parents:
137 mc = ctx.manifest()
137 mc = ctx.manifest()
138 mp1 = ctx.parents()[0].manifest()
138 mp1 = ctx.parents()[0].manifest()
139 mp2 = ctx.parents()[1].manifest()
139 mp2 = ctx.parents()[1].manifest()
140 files |= (set(mp1) | set(mp2)) - set(mc)
140 files |= (set(mp1) | set(mp2)) - set(mc)
141 for f in mc:
141 for f in mc:
142 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
142 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
143 files.add(f)
143 files.add(f)
144
144
145 def getfilectx(repo, memctx, f):
145 def getfilectx(repo, memctx, f):
146 if lfutil.standin(f) in files:
146 if lfutil.standin(f) in files:
147 # if the file isn't in the manifest then it was removed
147 # if the file isn't in the manifest then it was removed
148 # or renamed, raise IOError to indicate this
148 # or renamed, raise IOError to indicate this
149 try:
149 try:
150 fctx = ctx.filectx(lfutil.standin(f))
150 fctx = ctx.filectx(lfutil.standin(f))
151 except error.LookupError:
151 except error.LookupError:
152 raise IOError()
152 raise IOError()
153 renamed = fctx.renamed()
153 renamed = fctx.renamed()
154 if renamed:
154 if renamed:
155 renamed = lfutil.splitstandin(renamed[0])
155 renamed = lfutil.splitstandin(renamed[0])
156
156
157 hash = fctx.data().strip()
157 hash = fctx.data().strip()
158 path = lfutil.findfile(rsrc, hash)
158 path = lfutil.findfile(rsrc, hash)
159 ### TODO: What if the file is not cached?
159 ### TODO: What if the file is not cached?
160 data = ''
160 data = ''
161 fd = None
161 fd = None
162 try:
162 try:
163 fd = open(path, 'rb')
163 fd = open(path, 'rb')
164 data = fd.read()
164 data = fd.read()
165 finally:
165 finally:
166 if fd:
166 if fd:
167 fd.close()
167 fd.close()
168 return context.memfilectx(f, data, 'l' in fctx.flags(),
168 return context.memfilectx(f, data, 'l' in fctx.flags(),
169 'x' in fctx.flags(), renamed)
169 'x' in fctx.flags(), renamed)
170 else:
170 else:
171 try:
171 try:
172 fctx = ctx.filectx(f)
172 fctx = ctx.filectx(f)
173 except error.LookupError:
173 except error.LookupError:
174 raise IOError()
174 raise IOError()
175 renamed = fctx.renamed()
175 renamed = fctx.renamed()
176 if renamed:
176 if renamed:
177 renamed = renamed[0]
177 renamed = renamed[0]
178 data = fctx.data()
178 data = fctx.data()
179 if f == '.hgtags':
179 if f == '.hgtags':
180 newdata = []
180 newdata = []
181 for line in data.splitlines():
181 for line in data.splitlines():
182 id, name = line.split(' ', 1)
182 id, name = line.split(' ', 1)
183 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
183 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
184 name))
184 name))
185 data = ''.join(newdata)
185 data = ''.join(newdata)
186 return context.memfilectx(f, data, 'l' in fctx.flags(),
186 return context.memfilectx(f, data, 'l' in fctx.flags(),
187 'x' in fctx.flags(), renamed)
187 'x' in fctx.flags(), renamed)
188
188
189 dstfiles = []
189 dstfiles = []
190 for file in files:
190 for file in files:
191 if lfutil.isstandin(file):
191 if lfutil.isstandin(file):
192 dstfiles.append(lfutil.splitstandin(file))
192 dstfiles.append(lfutil.splitstandin(file))
193 else:
193 else:
194 dstfiles.append(file)
194 dstfiles.append(file)
195 # Commit
195 # Commit
196 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
196 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
197 getfilectx, ctx.user(), ctx.date(), ctx.extra())
197 getfilectx, ctx.user(), ctx.date(), ctx.extra())
198 ret = rdst.commitctx(mctx)
198 ret = rdst.commitctx(mctx)
199 rdst.dirstate.setparents(ret)
199 rdst.dirstate.setparents(ret)
200 revmap[ctx.node()] = rdst.changelog.tip()
200 revmap[ctx.node()] = rdst.changelog.tip()
201
201
202 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
202 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
203 matcher, size, lfiletohash):
203 matcher, size, lfiletohash):
204 # Convert src parents to dst parents
204 # Convert src parents to dst parents
205 parents = []
205 parents = []
206 for p in ctx.parents():
206 for p in ctx.parents():
207 parents.append(revmap[p.node()])
207 parents.append(revmap[p.node()])
208 while len(parents) < 2:
208 while len(parents) < 2:
209 parents.append(node.nullid)
209 parents.append(node.nullid)
210
210
211 # Generate list of changed files
211 # Generate list of changed files
212 files = set(ctx.files())
212 files = set(ctx.files())
213 if node.nullid not in parents:
213 if node.nullid not in parents:
214 mc = ctx.manifest()
214 mc = ctx.manifest()
215 mp1 = ctx.parents()[0].manifest()
215 mp1 = ctx.parents()[0].manifest()
216 mp2 = ctx.parents()[1].manifest()
216 mp2 = ctx.parents()[1].manifest()
217 files |= (set(mp1) | set(mp2)) - set(mc)
217 files |= (set(mp1) | set(mp2)) - set(mc)
218 for f in mc:
218 for f in mc:
219 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
219 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
220 files.add(f)
220 files.add(f)
221
221
222 dstfiles = []
222 dstfiles = []
223 for f in files:
223 for f in files:
224 if f not in lfiles and f not in normalfiles:
224 if f not in lfiles and f not in normalfiles:
225 islfile = _islfile(f, ctx, matcher, size)
225 islfile = _islfile(f, ctx, matcher, size)
226 # If this file was renamed or copied then copy
226 # If this file was renamed or copied then copy
227 # the lfileness of its predecessor
227 # the lfileness of its predecessor
228 if f in ctx.manifest():
228 if f in ctx.manifest():
229 fctx = ctx.filectx(f)
229 fctx = ctx.filectx(f)
230 renamed = fctx.renamed()
230 renamed = fctx.renamed()
231 renamedlfile = renamed and renamed[0] in lfiles
231 renamedlfile = renamed and renamed[0] in lfiles
232 islfile |= renamedlfile
232 islfile |= renamedlfile
233 if 'l' in fctx.flags():
233 if 'l' in fctx.flags():
234 if renamedlfile:
234 if renamedlfile:
235 raise util.Abort(
235 raise util.Abort(
236 _('Renamed/copied largefile %s becomes symlink')
236 _('Renamed/copied largefile %s becomes symlink')
237 % f)
237 % f)
238 islfile = False
238 islfile = False
239 if islfile:
239 if islfile:
240 lfiles.add(f)
240 lfiles.add(f)
241 else:
241 else:
242 normalfiles.add(f)
242 normalfiles.add(f)
243
243
244 if f in lfiles:
244 if f in lfiles:
245 dstfiles.append(lfutil.standin(f))
245 dstfiles.append(lfutil.standin(f))
246 # lfile in manifest if it has not been removed/renamed
246 # largefile in manifest if it has not been removed/renamed
247 if f in ctx.manifest():
247 if f in ctx.manifest():
248 if 'l' in ctx.filectx(f).flags():
248 if 'l' in ctx.filectx(f).flags():
249 if renamed and renamed[0] in lfiles:
249 if renamed and renamed[0] in lfiles:
250 raise util.Abort(_('largefile %s becomes symlink') % f)
250 raise util.Abort(_('largefile %s becomes symlink') % f)
251
251
252 # lfile was modified, update standins
252 # largefile was modified, update standins
253 fullpath = rdst.wjoin(f)
253 fullpath = rdst.wjoin(f)
254 lfutil.createdir(os.path.dirname(fullpath))
254 lfutil.createdir(os.path.dirname(fullpath))
255 m = util.sha1('')
255 m = util.sha1('')
256 m.update(ctx[f].data())
256 m.update(ctx[f].data())
257 hash = m.hexdigest()
257 hash = m.hexdigest()
258 if f not in lfiletohash or lfiletohash[f] != hash:
258 if f not in lfiletohash or lfiletohash[f] != hash:
259 try:
259 try:
260 fd = open(fullpath, 'wb')
260 fd = open(fullpath, 'wb')
261 fd.write(ctx[f].data())
261 fd.write(ctx[f].data())
262 finally:
262 finally:
263 if fd:
263 if fd:
264 fd.close()
264 fd.close()
265 executable = 'x' in ctx[f].flags()
265 executable = 'x' in ctx[f].flags()
266 os.chmod(fullpath, lfutil.getmode(executable))
266 os.chmod(fullpath, lfutil.getmode(executable))
267 lfutil.writestandin(rdst, lfutil.standin(f), hash,
267 lfutil.writestandin(rdst, lfutil.standin(f), hash,
268 executable)
268 executable)
269 lfiletohash[f] = hash
269 lfiletohash[f] = hash
270 else:
270 else:
271 # normal file
271 # normal file
272 dstfiles.append(f)
272 dstfiles.append(f)
273
273
274 def getfilectx(repo, memctx, f):
274 def getfilectx(repo, memctx, f):
275 if lfutil.isstandin(f):
275 if lfutil.isstandin(f):
276 # if the file isn't in the manifest then it was removed
276 # if the file isn't in the manifest then it was removed
277 # or renamed, raise IOError to indicate this
277 # or renamed, raise IOError to indicate this
278 srcfname = lfutil.splitstandin(f)
278 srcfname = lfutil.splitstandin(f)
279 try:
279 try:
280 fctx = ctx.filectx(srcfname)
280 fctx = ctx.filectx(srcfname)
281 except error.LookupError:
281 except error.LookupError:
282 raise IOError()
282 raise IOError()
283 renamed = fctx.renamed()
283 renamed = fctx.renamed()
284 if renamed:
284 if renamed:
285 # standin is always a lfile because lfileness
285 # standin is always a largefile because largefile-ness
286 # doesn't change after rename or copy
286 # doesn't change after rename or copy
287 renamed = lfutil.standin(renamed[0])
287 renamed = lfutil.standin(renamed[0])
288
288
289 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
289 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
290 fctx.flags(), 'x' in fctx.flags(), renamed)
290 fctx.flags(), 'x' in fctx.flags(), renamed)
291 else:
291 else:
292 try:
292 try:
293 fctx = ctx.filectx(f)
293 fctx = ctx.filectx(f)
294 except error.LookupError:
294 except error.LookupError:
295 raise IOError()
295 raise IOError()
296 renamed = fctx.renamed()
296 renamed = fctx.renamed()
297 if renamed:
297 if renamed:
298 renamed = renamed[0]
298 renamed = renamed[0]
299
299
300 data = fctx.data()
300 data = fctx.data()
301 if f == '.hgtags':
301 if f == '.hgtags':
302 newdata = []
302 newdata = []
303 for line in data.splitlines():
303 for line in data.splitlines():
304 id, name = line.split(' ', 1)
304 id, name = line.split(' ', 1)
305 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
305 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
306 name))
306 name))
307 data = ''.join(newdata)
307 data = ''.join(newdata)
308 return context.memfilectx(f, data, 'l' in fctx.flags(),
308 return context.memfilectx(f, data, 'l' in fctx.flags(),
309 'x' in fctx.flags(), renamed)
309 'x' in fctx.flags(), renamed)
310
310
311 # Commit
311 # Commit
312 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
312 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
313 getfilectx, ctx.user(), ctx.date(), ctx.extra())
313 getfilectx, ctx.user(), ctx.date(), ctx.extra())
314 ret = rdst.commitctx(mctx)
314 ret = rdst.commitctx(mctx)
315 rdst.dirstate.setparents(ret)
315 rdst.dirstate.setparents(ret)
316 revmap[ctx.node()] = rdst.changelog.tip()
316 revmap[ctx.node()] = rdst.changelog.tip()
317
317
318 def _islfile(file, ctx, matcher, size):
318 def _islfile(file, ctx, matcher, size):
319 '''Return true if file should be considered a largefile, i.e.
319 '''Return true if file should be considered a largefile, i.e.
320 matcher matches it or it is larger than size.'''
320 matcher matches it or it is larger than size.'''
321 # never store special .hg* files as largefiles
321 # never store special .hg* files as largefiles
322 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
322 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
323 return False
323 return False
324 if matcher and matcher(file):
324 if matcher and matcher(file):
325 return True
325 return True
326 try:
326 try:
327 return ctx.filectx(file).size() >= size * 1024 * 1024
327 return ctx.filectx(file).size() >= size * 1024 * 1024
328 except error.LookupError:
328 except error.LookupError:
329 return False
329 return False
330
330
331 def uploadlfiles(ui, rsrc, rdst, files):
331 def uploadlfiles(ui, rsrc, rdst, files):
332 '''upload largefiles to the central store'''
332 '''upload largefiles to the central store'''
333
333
334 # Don't upload locally. All largefiles are in the system wide cache
334 # Don't upload locally. All largefiles are in the system wide cache
335 # so the other repo can just get them from there.
335 # so the other repo can just get them from there.
336 if not files or rdst.local():
336 if not files or rdst.local():
337 return
337 return
338
338
339 store = basestore._openstore(rsrc, rdst, put=True)
339 store = basestore._openstore(rsrc, rdst, put=True)
340
340
341 at = 0
341 at = 0
342 files = filter(lambda h: not store.exists(h), files)
342 files = filter(lambda h: not store.exists(h), files)
343 for hash in files:
343 for hash in files:
344 ui.progress(_('uploading largefiles'), at, unit='largefile',
344 ui.progress(_('uploading largefiles'), at, unit='largefile',
345 total=len(files))
345 total=len(files))
346 source = lfutil.findfile(rsrc, hash)
346 source = lfutil.findfile(rsrc, hash)
347 if not source:
347 if not source:
348 raise util.Abort(_('largefile %s missing from store'
348 raise util.Abort(_('largefile %s missing from store'
349 ' (needs to be uploaded)') % hash)
349 ' (needs to be uploaded)') % hash)
350 # XXX check for errors here
350 # XXX check for errors here
351 store.put(source, hash)
351 store.put(source, hash)
352 at += 1
352 at += 1
353 ui.progress(_('uploading largefiles'), None)
353 ui.progress(_('uploading largefiles'), None)
354
354
355 def verifylfiles(ui, repo, all=False, contents=False):
355 def verifylfiles(ui, repo, all=False, contents=False):
356 '''Verify that every big file revision in the current changeset
356 '''Verify that every big file revision in the current changeset
357 exists in the central store. With --contents, also verify that
357 exists in the central store. With --contents, also verify that
358 the contents of each big file revision are correct (SHA-1 hash
358 the contents of each big file revision are correct (SHA-1 hash
359 matches the revision ID). With --all, check every changeset in
359 matches the revision ID). With --all, check every changeset in
360 this repository.'''
360 this repository.'''
361 if all:
361 if all:
362 # Pass a list to the function rather than an iterator because we know a
362 # Pass a list to the function rather than an iterator because we know a
363 # list will work.
363 # list will work.
364 revs = range(len(repo))
364 revs = range(len(repo))
365 else:
365 else:
366 revs = ['.']
366 revs = ['.']
367
367
368 store = basestore._openstore(repo)
368 store = basestore._openstore(repo)
369 return store.verify(revs, contents=contents)
369 return store.verify(revs, contents=contents)
370
370
371 def cachelfiles(ui, repo, node):
371 def cachelfiles(ui, repo, node):
372 '''cachelfiles ensures that all largefiles needed by the specified revision
372 '''cachelfiles ensures that all largefiles needed by the specified revision
373 are present in the repository's largefile cache.
373 are present in the repository's largefile cache.
374
374
375 returns a tuple (cached, missing). cached is the list of files downloaded
375 returns a tuple (cached, missing). cached is the list of files downloaded
376 by this operation; missing is the list of files that were needed but could
376 by this operation; missing is the list of files that were needed but could
377 not be found.'''
377 not be found.'''
378 lfiles = lfutil.listlfiles(repo, node)
378 lfiles = lfutil.listlfiles(repo, node)
379 toget = []
379 toget = []
380
380
381 for lfile in lfiles:
381 for lfile in lfiles:
382 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
382 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
383 # if it exists and its hash matches, it might have been locally
383 # if it exists and its hash matches, it might have been locally
384 # modified before updating and the user chose 'local'. in this case,
384 # modified before updating and the user chose 'local'. in this case,
385 # it will not be in any store, so don't look for it.
385 # it will not be in any store, so don't look for it.
386 if (not os.path.exists(repo.wjoin(lfile)) \
386 if (not os.path.exists(repo.wjoin(lfile)) \
387 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
387 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
388 not lfutil.findfile(repo, expectedhash):
388 not lfutil.findfile(repo, expectedhash):
389 toget.append((lfile, expectedhash))
389 toget.append((lfile, expectedhash))
390
390
391 if toget:
391 if toget:
392 store = basestore._openstore(repo)
392 store = basestore._openstore(repo)
393 ret = store.get(toget)
393 ret = store.get(toget)
394 return ret
394 return ret
395
395
396 return ([], [])
396 return ([], [])
397
397
398 def updatelfiles(ui, repo, filelist=None, printmessage=True):
398 def updatelfiles(ui, repo, filelist=None, printmessage=True):
399 wlock = repo.wlock()
399 wlock = repo.wlock()
400 try:
400 try:
401 lfdirstate = lfutil.openlfdirstate(ui, repo)
401 lfdirstate = lfutil.openlfdirstate(ui, repo)
402 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
402 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
403
403
404 if filelist is not None:
404 if filelist is not None:
405 lfiles = [f for f in lfiles if f in filelist]
405 lfiles = [f for f in lfiles if f in filelist]
406
406
407 printed = False
407 printed = False
408 if printmessage and lfiles:
408 if printmessage and lfiles:
409 ui.status(_('getting changed largefiles\n'))
409 ui.status(_('getting changed largefiles\n'))
410 printed = True
410 printed = True
411 cachelfiles(ui, repo, '.')
411 cachelfiles(ui, repo, '.')
412
412
413 updated, removed = 0, 0
413 updated, removed = 0, 0
414 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
414 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
415 # increment the appropriate counter according to _updatelfile's
415 # increment the appropriate counter according to _updatelfile's
416 # return value
416 # return value
417 updated += i > 0 and i or 0
417 updated += i > 0 and i or 0
418 removed -= i < 0 and i or 0
418 removed -= i < 0 and i or 0
419 if printmessage and (removed or updated) and not printed:
419 if printmessage and (removed or updated) and not printed:
420 ui.status(_('getting changed largefiles\n'))
420 ui.status(_('getting changed largefiles\n'))
421 printed = True
421 printed = True
422
422
423 lfdirstate.write()
423 lfdirstate.write()
424 if printed and printmessage:
424 if printed and printmessage:
425 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
425 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
426 removed))
426 removed))
427 finally:
427 finally:
428 wlock.release()
428 wlock.release()
429
429
430 def _updatelfile(repo, lfdirstate, lfile):
430 def _updatelfile(repo, lfdirstate, lfile):
431 '''updates a single largefile and copies the state of its standin from
431 '''updates a single largefile and copies the state of its standin from
432 the repository's dirstate to its state in the lfdirstate.
432 the repository's dirstate to its state in the lfdirstate.
433
433
434 returns 1 if the file was modified, -1 if the file was removed, 0 if the
434 returns 1 if the file was modified, -1 if the file was removed, 0 if the
435 file was unchanged, and None if the needed largefile was missing from the
435 file was unchanged, and None if the needed largefile was missing from the
436 cache.'''
436 cache.'''
437 ret = 0
437 ret = 0
438 abslfile = repo.wjoin(lfile)
438 abslfile = repo.wjoin(lfile)
439 absstandin = repo.wjoin(lfutil.standin(lfile))
439 absstandin = repo.wjoin(lfutil.standin(lfile))
440 if os.path.exists(absstandin):
440 if os.path.exists(absstandin):
441 if os.path.exists(absstandin+'.orig'):
441 if os.path.exists(absstandin+'.orig'):
442 shutil.copyfile(abslfile, abslfile+'.orig')
442 shutil.copyfile(abslfile, abslfile+'.orig')
443 expecthash = lfutil.readstandin(repo, lfile)
443 expecthash = lfutil.readstandin(repo, lfile)
444 if expecthash != '' and \
444 if expecthash != '' and \
445 (not os.path.exists(abslfile) or \
445 (not os.path.exists(abslfile) or \
446 expecthash != lfutil.hashfile(abslfile)):
446 expecthash != lfutil.hashfile(abslfile)):
447 if not lfutil.copyfromcache(repo, expecthash, lfile):
447 if not lfutil.copyfromcache(repo, expecthash, lfile):
448 return None # don't try to set the mode or update the dirstate
448 return None # don't try to set the mode or update the dirstate
449 ret = 1
449 ret = 1
450 mode = os.stat(absstandin).st_mode
450 mode = os.stat(absstandin).st_mode
451 if mode != os.stat(abslfile).st_mode:
451 if mode != os.stat(abslfile).st_mode:
452 os.chmod(abslfile, mode)
452 os.chmod(abslfile, mode)
453 ret = 1
453 ret = 1
454 else:
454 else:
455 if os.path.exists(abslfile):
455 if os.path.exists(abslfile):
456 os.unlink(abslfile)
456 os.unlink(abslfile)
457 ret = -1
457 ret = -1
458 state = repo.dirstate[lfutil.standin(lfile)]
458 state = repo.dirstate[lfutil.standin(lfile)]
459 if state == 'n':
459 if state == 'n':
460 lfdirstate.normal(lfile)
460 lfdirstate.normal(lfile)
461 elif state == 'r':
461 elif state == 'r':
462 lfdirstate.remove(lfile)
462 lfdirstate.remove(lfile)
463 elif state == 'a':
463 elif state == 'a':
464 lfdirstate.add(lfile)
464 lfdirstate.add(lfile)
465 elif state == '?':
465 elif state == '?':
466 lfdirstate.drop(lfile)
466 lfdirstate.drop(lfile)
467 return ret
467 return ret
468
468
469 # -- hg commands declarations ------------------------------------------------
469 # -- hg commands declarations ------------------------------------------------
470
470
471
471
472 cmdtable = {
472 cmdtable = {
473 'lfconvert': (lfconvert,
473 'lfconvert': (lfconvert,
474 [('s', 'size', '',
474 [('s', 'size', '',
475 _('minimum size (MB) for files to be converted '
475 _('minimum size (MB) for files to be converted '
476 'as largefiles'),
476 'as largefiles'),
477 'SIZE'),
477 'SIZE'),
478 ('', 'tonormal', False,
478 ('', 'tonormal', False,
479 _('convert from a largefiles repo to a normal repo')),
479 _('convert from a largefiles repo to a normal repo')),
480 ],
480 ],
481 _('hg lfconvert SOURCE DEST [FILE ...]')),
481 _('hg lfconvert SOURCE DEST [FILE ...]')),
482 }
482 }
@@ -1,824 +1,826 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
15 archival, error, merge
15 archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19 import lfutil
19 import lfutil
20
20
21 try:
21 try:
22 from mercurial import scmutil
22 from mercurial import scmutil
23 except ImportError:
23 except ImportError:
24 pass
24 pass
25
25
26 import lfutil
26 import lfutil
27 import lfcommands
27 import lfcommands
28
28
29 def installnormalfilesmatchfn(manifest):
29 def installnormalfilesmatchfn(manifest):
30 '''overrides scmutil.match so that the matcher it returns will ignore all
30 '''overrides scmutil.match so that the matcher it returns will ignore all
31 largefiles'''
31 largefiles'''
32 oldmatch = None # for the closure
32 oldmatch = None # for the closure
33 def override_match(repo, pats=[], opts={}, globbed=False,
33 def override_match(repo, pats=[], opts={}, globbed=False,
34 default='relpath'):
34 default='relpath'):
35 match = oldmatch(repo, pats, opts, globbed, default)
35 match = oldmatch(repo, pats, opts, globbed, default)
36 m = copy.copy(match)
36 m = copy.copy(match)
37 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
37 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
38 manifest)
38 manifest)
39 m._files = filter(notlfile, m._files)
39 m._files = filter(notlfile, m._files)
40 m._fmap = set(m._files)
40 m._fmap = set(m._files)
41 orig_matchfn = m.matchfn
41 orig_matchfn = m.matchfn
42 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
42 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
43 return m
43 return m
44 oldmatch = installmatchfn(override_match)
44 oldmatch = installmatchfn(override_match)
45
45
46 def installmatchfn(f):
46 def installmatchfn(f):
47 oldmatch = scmutil.match
47 oldmatch = scmutil.match
48 setattr(f, 'oldmatch', oldmatch)
48 setattr(f, 'oldmatch', oldmatch)
49 scmutil.match = f
49 scmutil.match = f
50 return oldmatch
50 return oldmatch
51
51
52 def restorematchfn():
52 def restorematchfn():
53 '''restores scmutil.match to what it was before installnormalfilesmatchfn
53 '''restores scmutil.match to what it was before installnormalfilesmatchfn
54 was called. no-op if scmutil.match is its original function.
54 was called. no-op if scmutil.match is its original function.
55
55
56 Note that n calls to installnormalfilesmatchfn will require n calls to
56 Note that n calls to installnormalfilesmatchfn will require n calls to
57 restore matchfn to reverse'''
57 restore matchfn to reverse'''
58 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
58 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
59
59
60 # -- Wrappers: modify existing commands --------------------------------
60 # -- Wrappers: modify existing commands --------------------------------
61
61
62 # Add works by going through the files that the user wanted to add and
62 # Add works by going through the files that the user wanted to add and
63 # checking if they should be added as largefiles. Then it makes a new
63 # checking if they should be added as largefiles. Then it makes a new
64 # matcher which matches only the normal files and runs the original
64 # matcher which matches only the normal files and runs the original
65 # version of add.
65 # version of add.
66 def override_add(orig, ui, repo, *pats, **opts):
66 def override_add(orig, ui, repo, *pats, **opts):
67 large = opts.pop('large', None)
67 large = opts.pop('large', None)
68 lfsize = lfutil.getminsize(
68 lfsize = lfutil.getminsize(
69 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
69 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
70
70
71 lfmatcher = None
71 lfmatcher = None
72 if os.path.exists(repo.wjoin(lfutil.shortname)):
72 if os.path.exists(repo.wjoin(lfutil.shortname)):
73 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
73 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
74 if lfpats:
74 if lfpats:
75 lfmatcher = match_.match(repo.root, '', list(lfpats))
75 lfmatcher = match_.match(repo.root, '', list(lfpats))
76
76
77 lfnames = []
77 lfnames = []
78 m = scmutil.match(repo[None], pats, opts)
78 m = scmutil.match(repo[None], pats, opts)
79 m.bad = lambda x, y: None
79 m.bad = lambda x, y: None
80 wctx = repo[None]
80 wctx = repo[None]
81 for f in repo.walk(m):
81 for f in repo.walk(m):
82 exact = m.exact(f)
82 exact = m.exact(f)
83 lfile = lfutil.standin(f) in wctx
83 lfile = lfutil.standin(f) in wctx
84 nfile = f in wctx
84 nfile = f in wctx
85 exists = lfile or nfile
85 exists = lfile or nfile
86
86
87 # Don't warn the user when they attempt to add a normal tracked file.
87 # Don't warn the user when they attempt to add a normal tracked file.
88 # The normal add code will do that for us.
88 # The normal add code will do that for us.
89 if exact and exists:
89 if exact and exists:
90 if lfile:
90 if lfile:
91 ui.warn(_('%s already a largefile\n') % f)
91 ui.warn(_('%s already a largefile\n') % f)
92 continue
92 continue
93
93
94 if exact or not exists:
94 if exact or not exists:
95 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
95 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
96 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
96 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
97 lfnames.append(f)
97 lfnames.append(f)
98 if ui.verbose or not exact:
98 if ui.verbose or not exact:
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100
100
101 bad = []
101 bad = []
102 standins = []
102 standins = []
103
103
104 # Need to lock, otherwise there could be a race condition between
104 # Need to lock, otherwise there could be a race condition between
105 # when standins are created and added to the repo.
105 # when standins are created and added to the repo.
106 wlock = repo.wlock()
106 wlock = repo.wlock()
107 try:
107 try:
108 if not opts.get('dry_run'):
108 if not opts.get('dry_run'):
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 for f in lfnames:
110 for f in lfnames:
111 standinname = lfutil.standin(f)
111 standinname = lfutil.standin(f)
112 lfutil.writestandin(repo, standinname, hash='',
112 lfutil.writestandin(repo, standinname, hash='',
113 executable=lfutil.getexecutable(repo.wjoin(f)))
113 executable=lfutil.getexecutable(repo.wjoin(f)))
114 standins.append(standinname)
114 standins.append(standinname)
115 if lfdirstate[f] == 'r':
115 if lfdirstate[f] == 'r':
116 lfdirstate.normallookup(f)
116 lfdirstate.normallookup(f)
117 else:
117 else:
118 lfdirstate.add(f)
118 lfdirstate.add(f)
119 lfdirstate.write()
119 lfdirstate.write()
120 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
120 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
121 standins) if f in m.files()]
121 standins) if f in m.files()]
122 finally:
122 finally:
123 wlock.release()
123 wlock.release()
124
124
125 installnormalfilesmatchfn(repo[None].manifest())
125 installnormalfilesmatchfn(repo[None].manifest())
126 result = orig(ui, repo, *pats, **opts)
126 result = orig(ui, repo, *pats, **opts)
127 restorematchfn()
127 restorematchfn()
128
128
129 return (result == 1 or bad) and 1 or 0
129 return (result == 1 or bad) and 1 or 0
130
130
131 def override_remove(orig, ui, repo, *pats, **opts):
131 def override_remove(orig, ui, repo, *pats, **opts):
132 manifest = repo[None].manifest()
132 manifest = repo[None].manifest()
133 installnormalfilesmatchfn(manifest)
133 installnormalfilesmatchfn(manifest)
134 orig(ui, repo, *pats, **opts)
134 orig(ui, repo, *pats, **opts)
135 restorematchfn()
135 restorematchfn()
136
136
137 after, force = opts.get('after'), opts.get('force')
137 after, force = opts.get('after'), opts.get('force')
138 if not pats and not after:
138 if not pats and not after:
139 raise util.Abort(_('no files specified'))
139 raise util.Abort(_('no files specified'))
140 m = scmutil.match(repo[None], pats, opts)
140 m = scmutil.match(repo[None], pats, opts)
141 try:
141 try:
142 repo.lfstatus = True
142 repo.lfstatus = True
143 s = repo.status(match=m, clean=True)
143 s = repo.status(match=m, clean=True)
144 finally:
144 finally:
145 repo.lfstatus = False
145 repo.lfstatus = False
146 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
146 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
147 in manifest] for list in [s[0], s[1], s[3], s[6]]]
147 in manifest] for list in [s[0], s[1], s[3], s[6]]]
148
148
149 def warn(files, reason):
149 def warn(files, reason):
150 for f in files:
150 for f in files:
151 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
151 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
152 % (m.rel(f), reason))
152 % (m.rel(f), reason))
153
153
154 if force:
154 if force:
155 remove, forget = modified + deleted + clean, added
155 remove, forget = modified + deleted + clean, added
156 elif after:
156 elif after:
157 remove, forget = deleted, []
157 remove, forget = deleted, []
158 warn(modified + added + clean, _('still exists'))
158 warn(modified + added + clean, _('still exists'))
159 else:
159 else:
160 remove, forget = deleted + clean, []
160 remove, forget = deleted + clean, []
161 warn(modified, _('is modified'))
161 warn(modified, _('is modified'))
162 warn(added, _('has been marked for add'))
162 warn(added, _('has been marked for add'))
163
163
164 for f in sorted(remove + forget):
164 for f in sorted(remove + forget):
165 if ui.verbose or not m.exact(f):
165 if ui.verbose or not m.exact(f):
166 ui.status(_('removing %s\n') % m.rel(f))
166 ui.status(_('removing %s\n') % m.rel(f))
167
167
168 # Need to lock because standin files are deleted then removed from the
168 # Need to lock because standin files are deleted then removed from the
169 # repository and we could race inbetween.
169 # repository and we could race inbetween.
170 wlock = repo.wlock()
170 wlock = repo.wlock()
171 try:
171 try:
172 lfdirstate = lfutil.openlfdirstate(ui, repo)
172 lfdirstate = lfutil.openlfdirstate(ui, repo)
173 for f in remove:
173 for f in remove:
174 if not after:
174 if not after:
175 os.unlink(repo.wjoin(f))
175 os.unlink(repo.wjoin(f))
176 currentdir = os.path.split(f)[0]
176 currentdir = os.path.split(f)[0]
177 while currentdir and not os.listdir(repo.wjoin(currentdir)):
177 while currentdir and not os.listdir(repo.wjoin(currentdir)):
178 os.rmdir(repo.wjoin(currentdir))
178 os.rmdir(repo.wjoin(currentdir))
179 currentdir = os.path.split(currentdir)[0]
179 currentdir = os.path.split(currentdir)[0]
180 lfdirstate.remove(f)
180 lfdirstate.remove(f)
181 lfdirstate.write()
181 lfdirstate.write()
182
182
183 forget = [lfutil.standin(f) for f in forget]
183 forget = [lfutil.standin(f) for f in forget]
184 remove = [lfutil.standin(f) for f in remove]
184 remove = [lfutil.standin(f) for f in remove]
185 lfutil.repo_forget(repo, forget)
185 lfutil.repo_forget(repo, forget)
186 lfutil.repo_remove(repo, remove, unlink=True)
186 lfutil.repo_remove(repo, remove, unlink=True)
187 finally:
187 finally:
188 wlock.release()
188 wlock.release()
189
189
190 def override_status(orig, ui, repo, *pats, **opts):
190 def override_status(orig, ui, repo, *pats, **opts):
191 try:
191 try:
192 repo.lfstatus = True
192 repo.lfstatus = True
193 return orig(ui, repo, *pats, **opts)
193 return orig(ui, repo, *pats, **opts)
194 finally:
194 finally:
195 repo.lfstatus = False
195 repo.lfstatus = False
196
196
197 def override_log(orig, ui, repo, *pats, **opts):
197 def override_log(orig, ui, repo, *pats, **opts):
198 try:
198 try:
199 repo.lfstatus = True
199 repo.lfstatus = True
200 orig(ui, repo, *pats, **opts)
200 orig(ui, repo, *pats, **opts)
201 finally:
201 finally:
202 repo.lfstatus = False
202 repo.lfstatus = False
203
203
204 def override_verify(orig, ui, repo, *pats, **opts):
204 def override_verify(orig, ui, repo, *pats, **opts):
205 large = opts.pop('large', False)
205 large = opts.pop('large', False)
206 all = opts.pop('lfa', False)
206 all = opts.pop('lfa', False)
207 contents = opts.pop('lfc', False)
207 contents = opts.pop('lfc', False)
208
208
209 result = orig(ui, repo, *pats, **opts)
209 result = orig(ui, repo, *pats, **opts)
210 if large:
210 if large:
211 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
211 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
212 return result
212 return result
213
213
214 # Override needs to refresh standins so that update's normal merge
214 # Override needs to refresh standins so that update's normal merge
215 # will go through properly. Then the other update hook (overriding repo.update)
215 # will go through properly. Then the other update hook (overriding repo.update)
216 # will get the new files. Filemerge is also overriden so that the merge
216 # will get the new files. Filemerge is also overriden so that the merge
217 # will merge standins correctly.
217 # will merge standins correctly.
218 def override_update(orig, ui, repo, *pats, **opts):
218 def override_update(orig, ui, repo, *pats, **opts):
219 lfdirstate = lfutil.openlfdirstate(ui, repo)
219 lfdirstate = lfutil.openlfdirstate(ui, repo)
220 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
220 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
221 False, False)
221 False, False)
222 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
222 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
223
223
224 # Need to lock between the standins getting updated and their
224 # Need to lock between the standins getting updated and their
225 # largefiles getting updated
225 # largefiles getting updated
226 wlock = repo.wlock()
226 wlock = repo.wlock()
227 try:
227 try:
228 if opts['check']:
228 if opts['check']:
229 mod = len(modified) > 0
229 mod = len(modified) > 0
230 for lfile in unsure:
230 for lfile in unsure:
231 standin = lfutil.standin(lfile)
231 standin = lfutil.standin(lfile)
232 if repo['.'][standin].data().strip() != \
232 if repo['.'][standin].data().strip() != \
233 lfutil.hashfile(repo.wjoin(lfile)):
233 lfutil.hashfile(repo.wjoin(lfile)):
234 mod = True
234 mod = True
235 else:
235 else:
236 lfdirstate.normal(lfile)
236 lfdirstate.normal(lfile)
237 lfdirstate.write()
237 lfdirstate.write()
238 if mod:
238 if mod:
239 raise util.Abort(_('uncommitted local changes'))
239 raise util.Abort(_('uncommitted local changes'))
240 # XXX handle removed differently
240 # XXX handle removed differently
241 if not opts['clean']:
241 if not opts['clean']:
242 for lfile in unsure + modified + added:
242 for lfile in unsure + modified + added:
243 lfutil.updatestandin(repo, lfutil.standin(lfile))
243 lfutil.updatestandin(repo, lfutil.standin(lfile))
244 finally:
244 finally:
245 wlock.release()
245 wlock.release()
246 return orig(ui, repo, *pats, **opts)
246 return orig(ui, repo, *pats, **opts)
247
247
248 # Override filemerge to prompt the user about how they wish to merge
248 # Override filemerge to prompt the user about how they wish to merge
249 # largefiles. This will handle identical edits, and copy/rename +
249 # largefiles. This will handle identical edits, and copy/rename +
250 # edit without prompting the user.
250 # edit without prompting the user.
251 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
251 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
252 # Use better variable names here. Because this is a wrapper we cannot
252 # Use better variable names here. Because this is a wrapper we cannot
253 # change the variable names in the function declaration.
253 # change the variable names in the function declaration.
254 fcdest, fcother, fcancestor = fcd, fco, fca
254 fcdest, fcother, fcancestor = fcd, fco, fca
255 if not lfutil.isstandin(orig):
255 if not lfutil.isstandin(orig):
256 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
256 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
257 else:
257 else:
258 if not fcother.cmp(fcdest): # files identical?
258 if not fcother.cmp(fcdest): # files identical?
259 return None
259 return None
260
260
261 # backwards, use working dir parent as ancestor
261 # backwards, use working dir parent as ancestor
262 if fcancestor == fcother:
262 if fcancestor == fcother:
263 fcancestor = fcdest.parents()[0]
263 fcancestor = fcdest.parents()[0]
264
264
265 if orig != fcother.path():
265 if orig != fcother.path():
266 repo.ui.status(_('merging %s and %s to %s\n')
266 repo.ui.status(_('merging %s and %s to %s\n')
267 % (lfutil.splitstandin(orig),
267 % (lfutil.splitstandin(orig),
268 lfutil.splitstandin(fcother.path()),
268 lfutil.splitstandin(fcother.path()),
269 lfutil.splitstandin(fcdest.path())))
269 lfutil.splitstandin(fcdest.path())))
270 else:
270 else:
271 repo.ui.status(_('merging %s\n')
271 repo.ui.status(_('merging %s\n')
272 % lfutil.splitstandin(fcdest.path()))
272 % lfutil.splitstandin(fcdest.path()))
273
273
274 if fcancestor.path() != fcother.path() and fcother.data() == \
274 if fcancestor.path() != fcother.path() and fcother.data() == \
275 fcancestor.data():
275 fcancestor.data():
276 return 0
276 return 0
277 if fcancestor.path() != fcdest.path() and fcdest.data() == \
277 if fcancestor.path() != fcdest.path() and fcdest.data() == \
278 fcancestor.data():
278 fcancestor.data():
279 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
279 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
280 return 0
280 return 0
281
281
282 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
282 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
283 'keep (l)ocal or take (o)ther?') %
283 'keep (l)ocal or take (o)ther?') %
284 lfutil.splitstandin(orig),
284 lfutil.splitstandin(orig),
285 (_('&Local'), _('&Other')), 0) == 0:
285 (_('&Local'), _('&Other')), 0) == 0:
286 return 0
286 return 0
287 else:
287 else:
288 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
288 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
289 return 0
289 return 0
290
290
291 # Copy first changes the matchers to match standins instead of
291 # Copy first changes the matchers to match standins instead of
292 # largefiles. Then it overrides util.copyfile in that function it
292 # largefiles. Then it overrides util.copyfile in that function it
293 # checks if the destination largefile already exists. It also keeps a
293 # checks if the destination largefile already exists. It also keeps a
294 # list of copied files so that the largefiles can be copied and the
294 # list of copied files so that the largefiles can be copied and the
295 # dirstate updated.
295 # dirstate updated.
296 def override_copy(orig, ui, repo, pats, opts, rename=False):
296 def override_copy(orig, ui, repo, pats, opts, rename=False):
297 # doesn't remove largefile on rename
297 # doesn't remove largefile on rename
298 if len(pats) < 2:
298 if len(pats) < 2:
299 # this isn't legal, let the original function deal with it
299 # this isn't legal, let the original function deal with it
300 return orig(ui, repo, pats, opts, rename)
300 return orig(ui, repo, pats, opts, rename)
301
301
302 def makestandin(relpath):
302 def makestandin(relpath):
303 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
303 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
304 return os.path.join(os.path.relpath('.', repo.getcwd()),
304 return os.path.join(os.path.relpath('.', repo.getcwd()),
305 lfutil.standin(path))
305 lfutil.standin(path))
306
306
307 fullpats = scmutil.expandpats(pats)
307 fullpats = scmutil.expandpats(pats)
308 dest = fullpats[-1]
308 dest = fullpats[-1]
309
309
310 if os.path.isdir(dest):
310 if os.path.isdir(dest):
311 if not os.path.isdir(makestandin(dest)):
311 if not os.path.isdir(makestandin(dest)):
312 os.makedirs(makestandin(dest))
312 os.makedirs(makestandin(dest))
313 # This could copy both largefiles and normal files in one command,
313 # This could copy both lfiles and normal files in one command,
314 # but we don't want to do that first replace their matcher to only
314 # but we don't want to do that. First replace their matcher to
315 # match normal files and run it then replace it to just match
315 # only match normal files and run it, then replace it to just
316 # lfiles and run it again
316 # match largefiles and run it again.
317 nonormalfiles = False
317 nonormalfiles = False
318 nolfiles = False
318 nolfiles = False
319 try:
319 try:
320 installnormalfilesmatchfn(repo[None].manifest())
320 installnormalfilesmatchfn(repo[None].manifest())
321 result = orig(ui, repo, pats, opts, rename)
321 result = orig(ui, repo, pats, opts, rename)
322 except util.Abort, e:
322 except util.Abort, e:
323 if str(e) != 'no files to copy':
323 if str(e) != 'no files to copy':
324 raise e
324 raise e
325 else:
325 else:
326 nonormalfiles = True
326 nonormalfiles = True
327 result = 0
327 result = 0
328 finally:
328 finally:
329 restorematchfn()
329 restorematchfn()
330
330
331 # The first rename can cause our current working directory to be removed.
331 # The first rename can cause our current working directory to be removed.
332 # In that case there is nothing left to copy/rename so just quit.
332 # In that case there is nothing left to copy/rename so just quit.
333 try:
333 try:
334 repo.getcwd()
334 repo.getcwd()
335 except OSError:
335 except OSError:
336 return result
336 return result
337
337
338 try:
338 try:
339 # When we call orig below it creates the standins but we don't add them
339 # When we call orig below it creates the standins but we don't add them
340 # to the dir state until later so lock during that time.
340 # to the dir state until later so lock during that time.
341 wlock = repo.wlock()
341 wlock = repo.wlock()
342
342
343 manifest = repo[None].manifest()
343 manifest = repo[None].manifest()
344 oldmatch = None # for the closure
344 oldmatch = None # for the closure
345 def override_match(repo, pats=[], opts={}, globbed=False,
345 def override_match(repo, pats=[], opts={}, globbed=False,
346 default='relpath'):
346 default='relpath'):
347 newpats = []
347 newpats = []
348 # The patterns were previously mangled to add the standin
348 # The patterns were previously mangled to add the standin
349 # directory; we need to remove that now
349 # directory; we need to remove that now
350 for pat in pats:
350 for pat in pats:
351 if match_.patkind(pat) is None and lfutil.shortname in pat:
351 if match_.patkind(pat) is None and lfutil.shortname in pat:
352 newpats.append(pat.replace(lfutil.shortname, ''))
352 newpats.append(pat.replace(lfutil.shortname, ''))
353 else:
353 else:
354 newpats.append(pat)
354 newpats.append(pat)
355 match = oldmatch(repo, newpats, opts, globbed, default)
355 match = oldmatch(repo, newpats, opts, globbed, default)
356 m = copy.copy(match)
356 m = copy.copy(match)
357 lfile = lambda f: lfutil.standin(f) in manifest
357 lfile = lambda f: lfutil.standin(f) in manifest
358 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
358 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
359 m._fmap = set(m._files)
359 m._fmap = set(m._files)
360 orig_matchfn = m.matchfn
360 orig_matchfn = m.matchfn
361 m.matchfn = lambda f: lfutil.isstandin(f) and \
361 m.matchfn = lambda f: lfutil.isstandin(f) and \
362 lfile(lfutil.splitstandin(f)) and \
362 lfile(lfutil.splitstandin(f)) and \
363 orig_matchfn(lfutil.splitstandin(f)) or None
363 orig_matchfn(lfutil.splitstandin(f)) or None
364 return m
364 return m
365 oldmatch = installmatchfn(override_match)
365 oldmatch = installmatchfn(override_match)
366 listpats = []
366 listpats = []
367 for pat in pats:
367 for pat in pats:
368 if match_.patkind(pat) is not None:
368 if match_.patkind(pat) is not None:
369 listpats.append(pat)
369 listpats.append(pat)
370 else:
370 else:
371 listpats.append(makestandin(pat))
371 listpats.append(makestandin(pat))
372
372
373 try:
373 try:
374 origcopyfile = util.copyfile
374 origcopyfile = util.copyfile
375 copiedfiles = []
375 copiedfiles = []
376 def override_copyfile(src, dest):
376 def override_copyfile(src, dest):
377 if lfutil.shortname in src and lfutil.shortname in dest:
377 if lfutil.shortname in src and lfutil.shortname in dest:
378 destlfile = dest.replace(lfutil.shortname, '')
378 destlfile = dest.replace(lfutil.shortname, '')
379 if not opts['force'] and os.path.exists(destlfile):
379 if not opts['force'] and os.path.exists(destlfile):
380 raise IOError('',
380 raise IOError('',
381 _('destination largefile already exists'))
381 _('destination largefile already exists'))
382 copiedfiles.append((src, dest))
382 copiedfiles.append((src, dest))
383 origcopyfile(src, dest)
383 origcopyfile(src, dest)
384
384
385 util.copyfile = override_copyfile
385 util.copyfile = override_copyfile
386 result += orig(ui, repo, listpats, opts, rename)
386 result += orig(ui, repo, listpats, opts, rename)
387 finally:
387 finally:
388 util.copyfile = origcopyfile
388 util.copyfile = origcopyfile
389
389
390 lfdirstate = lfutil.openlfdirstate(ui, repo)
390 lfdirstate = lfutil.openlfdirstate(ui, repo)
391 for (src, dest) in copiedfiles:
391 for (src, dest) in copiedfiles:
392 if lfutil.shortname in src and lfutil.shortname in dest:
392 if lfutil.shortname in src and lfutil.shortname in dest:
393 srclfile = src.replace(lfutil.shortname, '')
393 srclfile = src.replace(lfutil.shortname, '')
394 destlfile = dest.replace(lfutil.shortname, '')
394 destlfile = dest.replace(lfutil.shortname, '')
395 destlfiledir = os.path.dirname(destlfile) or '.'
395 destlfiledir = os.path.dirname(destlfile) or '.'
396 if not os.path.isdir(destlfiledir):
396 if not os.path.isdir(destlfiledir):
397 os.makedirs(destlfiledir)
397 os.makedirs(destlfiledir)
398 if rename:
398 if rename:
399 os.rename(srclfile, destlfile)
399 os.rename(srclfile, destlfile)
400 lfdirstate.remove(os.path.relpath(srclfile,
400 lfdirstate.remove(os.path.relpath(srclfile,
401 repo.root))
401 repo.root))
402 else:
402 else:
403 util.copyfile(srclfile, destlfile)
403 util.copyfile(srclfile, destlfile)
404 lfdirstate.add(os.path.relpath(destlfile,
404 lfdirstate.add(os.path.relpath(destlfile,
405 repo.root))
405 repo.root))
406 lfdirstate.write()
406 lfdirstate.write()
407 except util.Abort, e:
407 except util.Abort, e:
408 if str(e) != 'no files to copy':
408 if str(e) != 'no files to copy':
409 raise e
409 raise e
410 else:
410 else:
411 nolfiles = True
411 nolfiles = True
412 finally:
412 finally:
413 restorematchfn()
413 restorematchfn()
414 wlock.release()
414 wlock.release()
415
415
416 if nolfiles and nonormalfiles:
416 if nolfiles and nonormalfiles:
417 raise util.Abort(_('no files to copy'))
417 raise util.Abort(_('no files to copy'))
418
418
419 return result
419 return result
420
420
421 # When the user calls revert, we have to be careful to not revert any changes
421 # When the user calls revert, we have to be careful to not revert any
422 # to other lfiles accidentally. This means we have to keep track of the lfiles
422 # changes to other largefiles accidentally. This means we have to keep
423 # that are being reverted so we only pull down the necessary lfiles.
423 # track of the largefiles that are being reverted so we only pull down
424 # the necessary largefiles.
424 #
425 #
425 # Standins are only updated (to match the hash of lfiles) before commits.
426 # Standins are only updated (to match the hash of largefiles) before
426 # Update the standins then run the original revert (changing the matcher to hit
427 # commits. Update the standins then run the original revert, changing
427 # standins instead of lfiles). Based on the resulting standins update the
428 # the matcher to hit standins instead of largefiles. Based on the
428 # lfiles. Then return the standins to their proper state
429 # resulting standins update the largefiles. Then return the standins
430 # to their proper state
429 def override_revert(orig, ui, repo, *pats, **opts):
431 def override_revert(orig, ui, repo, *pats, **opts):
430 # Because we put the standins in a bad state (by updating them) and then
432 # Because we put the standins in a bad state (by updating them)
431 # return them to a correct state we need to lock to prevent others from
433 # and then return them to a correct state we need to lock to
432 # changing them in their incorrect state.
434 # prevent others from changing them in their incorrect state.
433 wlock = repo.wlock()
435 wlock = repo.wlock()
434 try:
436 try:
435 lfdirstate = lfutil.openlfdirstate(ui, repo)
437 lfdirstate = lfutil.openlfdirstate(ui, repo)
436 (modified, added, removed, missing, unknown, ignored, clean) = \
438 (modified, added, removed, missing, unknown, ignored, clean) = \
437 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
439 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
438 for lfile in modified:
440 for lfile in modified:
439 lfutil.updatestandin(repo, lfutil.standin(lfile))
441 lfutil.updatestandin(repo, lfutil.standin(lfile))
440
442
441 try:
443 try:
442 ctx = repo[opts.get('rev')]
444 ctx = repo[opts.get('rev')]
443 oldmatch = None # for the closure
445 oldmatch = None # for the closure
444 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
446 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
445 default='relpath'):
447 default='relpath'):
446 if util.safehasattr(ctxorrepo, 'match'):
448 if util.safehasattr(ctxorrepo, 'match'):
447 ctx0 = ctxorrepo
449 ctx0 = ctxorrepo
448 else:
450 else:
449 ctx0 = ctxorrepo[None]
451 ctx0 = ctxorrepo[None]
450 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
452 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
451 m = copy.copy(match)
453 m = copy.copy(match)
452 def tostandin(f):
454 def tostandin(f):
453 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
455 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
454 return lfutil.standin(f)
456 return lfutil.standin(f)
455 elif lfutil.standin(f) in repo[None]:
457 elif lfutil.standin(f) in repo[None]:
456 return None
458 return None
457 return f
459 return f
458 m._files = [tostandin(f) for f in m._files]
460 m._files = [tostandin(f) for f in m._files]
459 m._files = [f for f in m._files if f is not None]
461 m._files = [f for f in m._files if f is not None]
460 m._fmap = set(m._files)
462 m._fmap = set(m._files)
461 orig_matchfn = m.matchfn
463 orig_matchfn = m.matchfn
462 def matchfn(f):
464 def matchfn(f):
463 if lfutil.isstandin(f):
465 if lfutil.isstandin(f):
464 # We need to keep track of what lfiles are being
466 # We need to keep track of what largefiles are being
465 # matched so we know which ones to update later
467 # matched so we know which ones to update later --
466 # (otherwise we revert changes to other lfiles
468 # otherwise we accidentally revert changes to other
467 # accidentally). This is repo specific, so duckpunch
469 # largefiles. This is repo-specific, so duckpunch the
468 # the repo object to keep the list of lfiles for us
470 # repo object to keep the list of largefiles for us
469 # later.
471 # later.
470 if orig_matchfn(lfutil.splitstandin(f)) and \
472 if orig_matchfn(lfutil.splitstandin(f)) and \
471 (f in repo[None] or f in ctx):
473 (f in repo[None] or f in ctx):
472 lfileslist = getattr(repo, '_lfilestoupdate', [])
474 lfileslist = getattr(repo, '_lfilestoupdate', [])
473 lfileslist.append(lfutil.splitstandin(f))
475 lfileslist.append(lfutil.splitstandin(f))
474 repo._lfilestoupdate = lfileslist
476 repo._lfilestoupdate = lfileslist
475 return True
477 return True
476 else:
478 else:
477 return False
479 return False
478 return orig_matchfn(f)
480 return orig_matchfn(f)
479 m.matchfn = matchfn
481 m.matchfn = matchfn
480 return m
482 return m
481 oldmatch = installmatchfn(override_match)
483 oldmatch = installmatchfn(override_match)
482 scmutil.match
484 scmutil.match
483 matches = override_match(repo[None], pats, opts)
485 matches = override_match(repo[None], pats, opts)
484 orig(ui, repo, *pats, **opts)
486 orig(ui, repo, *pats, **opts)
485 finally:
487 finally:
486 restorematchfn()
488 restorematchfn()
487 lfileslist = getattr(repo, '_lfilestoupdate', [])
489 lfileslist = getattr(repo, '_lfilestoupdate', [])
488 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
490 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
489 printmessage=False)
491 printmessage=False)
490 # Empty out the lfiles list so we start fresh next time
492
493 # empty out the largefiles list so we start fresh next time
491 repo._lfilestoupdate = []
494 repo._lfilestoupdate = []
492 for lfile in modified:
495 for lfile in modified:
493 if lfile in lfileslist:
496 if lfile in lfileslist:
494 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
497 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
495 in repo['.']:
498 in repo['.']:
496 lfutil.writestandin(repo, lfutil.standin(lfile),
499 lfutil.writestandin(repo, lfutil.standin(lfile),
497 repo['.'][lfile].data().strip(),
500 repo['.'][lfile].data().strip(),
498 'x' in repo['.'][lfile].flags())
501 'x' in repo['.'][lfile].flags())
499 lfdirstate = lfutil.openlfdirstate(ui, repo)
502 lfdirstate = lfutil.openlfdirstate(ui, repo)
500 for lfile in added:
503 for lfile in added:
501 standin = lfutil.standin(lfile)
504 standin = lfutil.standin(lfile)
502 if standin not in ctx and (standin in matches or opts.get('all')):
505 if standin not in ctx and (standin in matches or opts.get('all')):
503 if lfile in lfdirstate:
506 if lfile in lfdirstate:
504 lfdirstate.drop(lfile)
507 lfdirstate.drop(lfile)
505 util.unlinkpath(repo.wjoin(standin))
508 util.unlinkpath(repo.wjoin(standin))
506 lfdirstate.write()
509 lfdirstate.write()
507 finally:
510 finally:
508 wlock.release()
511 wlock.release()
509
512
510 def hg_update(orig, repo, node):
513 def hg_update(orig, repo, node):
511 result = orig(repo, node)
514 result = orig(repo, node)
512 # XXX check if it worked first
515 # XXX check if it worked first
513 lfcommands.updatelfiles(repo.ui, repo)
516 lfcommands.updatelfiles(repo.ui, repo)
514 return result
517 return result
515
518
516 def hg_clean(orig, repo, node, show_stats=True):
519 def hg_clean(orig, repo, node, show_stats=True):
517 result = orig(repo, node, show_stats)
520 result = orig(repo, node, show_stats)
518 lfcommands.updatelfiles(repo.ui, repo)
521 lfcommands.updatelfiles(repo.ui, repo)
519 return result
522 return result
520
523
521 def hg_merge(orig, repo, node, force=None, remind=True):
524 def hg_merge(orig, repo, node, force=None, remind=True):
522 result = orig(repo, node, force, remind)
525 result = orig(repo, node, force, remind)
523 lfcommands.updatelfiles(repo.ui, repo)
526 lfcommands.updatelfiles(repo.ui, repo)
524 return result
527 return result
525
528
526 # When we rebase a repository with remotely changed lfiles, we need
529 # When we rebase a repository with remotely changed largefiles, we need to
527 # to take some extra care so that the lfiles are correctly updated
530 # take some extra care so that the largefiles are correctly updated in the
528 # in the working copy
531 # working copy
529 def override_pull(orig, ui, repo, source=None, **opts):
532 def override_pull(orig, ui, repo, source=None, **opts):
530 if opts.get('rebase', False):
533 if opts.get('rebase', False):
531 repo._isrebasing = True
534 repo._isrebasing = True
532 try:
535 try:
533 if opts.get('update'):
536 if opts.get('update'):
534 del opts['update']
537 del opts['update']
535 ui.debug('--update and --rebase are not compatible, ignoring '
538 ui.debug('--update and --rebase are not compatible, ignoring '
536 'the update flag\n')
539 'the update flag\n')
537 del opts['rebase']
540 del opts['rebase']
538 cmdutil.bailifchanged(repo)
541 cmdutil.bailifchanged(repo)
539 revsprepull = len(repo)
542 revsprepull = len(repo)
540 origpostincoming = commands.postincoming
543 origpostincoming = commands.postincoming
541 def _dummy(*args, **kwargs):
544 def _dummy(*args, **kwargs):
542 pass
545 pass
543 commands.postincoming = _dummy
546 commands.postincoming = _dummy
544 repo.lfpullsource = source
547 repo.lfpullsource = source
545 if not source:
548 if not source:
546 source = 'default'
549 source = 'default'
547 try:
550 try:
548 result = commands.pull(ui, repo, source, **opts)
551 result = commands.pull(ui, repo, source, **opts)
549 finally:
552 finally:
550 commands.postincoming = origpostincoming
553 commands.postincoming = origpostincoming
551 revspostpull = len(repo)
554 revspostpull = len(repo)
552 if revspostpull > revsprepull:
555 if revspostpull > revsprepull:
553 result = result or rebase.rebase(ui, repo)
556 result = result or rebase.rebase(ui, repo)
554 finally:
557 finally:
555 repo._isrebasing = False
558 repo._isrebasing = False
556 else:
559 else:
557 repo.lfpullsource = source
560 repo.lfpullsource = source
558 if not source:
561 if not source:
559 source = 'default'
562 source = 'default'
560 result = orig(ui, repo, source, **opts)
563 result = orig(ui, repo, source, **opts)
561 return result
564 return result
562
565
563 def override_rebase(orig, ui, repo, **opts):
566 def override_rebase(orig, ui, repo, **opts):
564 repo._isrebasing = True
567 repo._isrebasing = True
565 try:
568 try:
566 orig(ui, repo, **opts)
569 orig(ui, repo, **opts)
567 finally:
570 finally:
568 repo._isrebasing = False
571 repo._isrebasing = False
569
572
570 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
573 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
571 prefix=None, mtime=None, subrepos=None):
574 prefix=None, mtime=None, subrepos=None):
572 # No need to lock because we are only reading history and lfile caches
575 # No need to lock because we are only reading history and
573 # neither of which are modified
576 # largefile caches, neither of which are modified.
574
575 lfcommands.cachelfiles(repo.ui, repo, node)
577 lfcommands.cachelfiles(repo.ui, repo, node)
576
578
577 if kind not in archival.archivers:
579 if kind not in archival.archivers:
578 raise util.Abort(_("unknown archive type '%s'") % kind)
580 raise util.Abort(_("unknown archive type '%s'") % kind)
579
581
580 ctx = repo[node]
582 ctx = repo[node]
581
583
582 if kind == 'files':
584 if kind == 'files':
583 if prefix:
585 if prefix:
584 raise util.Abort(
586 raise util.Abort(
585 _('cannot give prefix when archiving to files'))
587 _('cannot give prefix when archiving to files'))
586 else:
588 else:
587 prefix = archival.tidyprefix(dest, kind, prefix)
589 prefix = archival.tidyprefix(dest, kind, prefix)
588
590
589 def write(name, mode, islink, getdata):
591 def write(name, mode, islink, getdata):
590 if matchfn and not matchfn(name):
592 if matchfn and not matchfn(name):
591 return
593 return
592 data = getdata()
594 data = getdata()
593 if decode:
595 if decode:
594 data = repo.wwritedata(name, data)
596 data = repo.wwritedata(name, data)
595 archiver.addfile(prefix + name, mode, islink, data)
597 archiver.addfile(prefix + name, mode, islink, data)
596
598
597 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
599 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
598
600
599 if repo.ui.configbool("ui", "archivemeta", True):
601 if repo.ui.configbool("ui", "archivemeta", True):
600 def metadata():
602 def metadata():
601 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
603 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
602 hex(repo.changelog.node(0)), hex(node), ctx.branch())
604 hex(repo.changelog.node(0)), hex(node), ctx.branch())
603
605
604 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
606 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
605 if repo.tagtype(t) == 'global')
607 if repo.tagtype(t) == 'global')
606 if not tags:
608 if not tags:
607 repo.ui.pushbuffer()
609 repo.ui.pushbuffer()
608 opts = {'template': '{latesttag}\n{latesttagdistance}',
610 opts = {'template': '{latesttag}\n{latesttagdistance}',
609 'style': '', 'patch': None, 'git': None}
611 'style': '', 'patch': None, 'git': None}
610 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
612 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
611 ltags, dist = repo.ui.popbuffer().split('\n')
613 ltags, dist = repo.ui.popbuffer().split('\n')
612 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
614 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
613 tags += 'latesttagdistance: %s\n' % dist
615 tags += 'latesttagdistance: %s\n' % dist
614
616
615 return base + tags
617 return base + tags
616
618
617 write('.hg_archival.txt', 0644, False, metadata)
619 write('.hg_archival.txt', 0644, False, metadata)
618
620
619 for f in ctx:
621 for f in ctx:
620 ff = ctx.flags(f)
622 ff = ctx.flags(f)
621 getdata = ctx[f].data
623 getdata = ctx[f].data
622 if lfutil.isstandin(f):
624 if lfutil.isstandin(f):
623 path = lfutil.findfile(repo, getdata().strip())
625 path = lfutil.findfile(repo, getdata().strip())
624 f = lfutil.splitstandin(f)
626 f = lfutil.splitstandin(f)
625
627
626 def getdatafn():
628 def getdatafn():
627 try:
629 try:
628 fd = open(path, 'rb')
630 fd = open(path, 'rb')
629 return fd.read()
631 return fd.read()
630 finally:
632 finally:
631 fd.close()
633 fd.close()
632
634
633 getdata = getdatafn
635 getdata = getdatafn
634 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
636 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
635
637
636 if subrepos:
638 if subrepos:
637 for subpath in ctx.substate:
639 for subpath in ctx.substate:
638 sub = ctx.sub(subpath)
640 sub = ctx.sub(subpath)
639 try:
641 try:
640 sub.archive(repo.ui, archiver, prefix)
642 sub.archive(repo.ui, archiver, prefix)
641 except TypeError:
643 except TypeError:
642 sub.archive(archiver, prefix)
644 sub.archive(archiver, prefix)
643
645
644 archiver.done()
646 archiver.done()
645
647
646 # If a lfile is modified the change is not reflected in its standin until a
648 # If a largefile is modified, the change is not reflected in its
647 # commit. cmdutil.bailifchanged raises an exception if the repo has
649 # standin until a commit. cmdutil.bailifchanged() raises an exception
648 # uncommitted changes. Wrap it to also check if lfiles were changed. This is
650 # if the repo has uncommitted changes. Wrap it to also check if
649 # used by bisect and backout.
651 # largefiles were changed. This is used by bisect and backout.
650 def override_bailifchanged(orig, repo):
652 def override_bailifchanged(orig, repo):
651 orig(repo)
653 orig(repo)
652 repo.lfstatus = True
654 repo.lfstatus = True
653 modified, added, removed, deleted = repo.status()[:4]
655 modified, added, removed, deleted = repo.status()[:4]
654 repo.lfstatus = False
656 repo.lfstatus = False
655 if modified or added or removed or deleted:
657 if modified or added or removed or deleted:
656 raise util.Abort(_('outstanding uncommitted changes'))
658 raise util.Abort(_('outstanding uncommitted changes'))
657
659
658 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
660 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
659 def override_fetch(orig, ui, repo, *pats, **opts):
661 def override_fetch(orig, ui, repo, *pats, **opts):
660 repo.lfstatus = True
662 repo.lfstatus = True
661 modified, added, removed, deleted = repo.status()[:4]
663 modified, added, removed, deleted = repo.status()[:4]
662 repo.lfstatus = False
664 repo.lfstatus = False
663 if modified or added or removed or deleted:
665 if modified or added or removed or deleted:
664 raise util.Abort(_('outstanding uncommitted changes'))
666 raise util.Abort(_('outstanding uncommitted changes'))
665 return orig(ui, repo, *pats, **opts)
667 return orig(ui, repo, *pats, **opts)
666
668
667 def override_forget(orig, ui, repo, *pats, **opts):
669 def override_forget(orig, ui, repo, *pats, **opts):
668 installnormalfilesmatchfn(repo[None].manifest())
670 installnormalfilesmatchfn(repo[None].manifest())
669 orig(ui, repo, *pats, **opts)
671 orig(ui, repo, *pats, **opts)
670 restorematchfn()
672 restorematchfn()
671 m = scmutil.match(repo[None], pats, opts)
673 m = scmutil.match(repo[None], pats, opts)
672
674
673 try:
675 try:
674 repo.lfstatus = True
676 repo.lfstatus = True
675 s = repo.status(match=m, clean=True)
677 s = repo.status(match=m, clean=True)
676 finally:
678 finally:
677 repo.lfstatus = False
679 repo.lfstatus = False
678 forget = sorted(s[0] + s[1] + s[3] + s[6])
680 forget = sorted(s[0] + s[1] + s[3] + s[6])
679 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
681 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
680
682
681 for f in forget:
683 for f in forget:
682 if lfutil.standin(f) not in repo.dirstate and not \
684 if lfutil.standin(f) not in repo.dirstate and not \
683 os.path.isdir(m.rel(lfutil.standin(f))):
685 os.path.isdir(m.rel(lfutil.standin(f))):
684 ui.warn(_('not removing %s: file is already untracked\n')
686 ui.warn(_('not removing %s: file is already untracked\n')
685 % m.rel(f))
687 % m.rel(f))
686
688
687 for f in forget:
689 for f in forget:
688 if ui.verbose or not m.exact(f):
690 if ui.verbose or not m.exact(f):
689 ui.status(_('removing %s\n') % m.rel(f))
691 ui.status(_('removing %s\n') % m.rel(f))
690
692
691 # Need to lock because standin files are deleted then removed from the
693 # Need to lock because standin files are deleted then removed from the
692 # repository and we could race inbetween.
694 # repository and we could race inbetween.
693 wlock = repo.wlock()
695 wlock = repo.wlock()
694 try:
696 try:
695 lfdirstate = lfutil.openlfdirstate(ui, repo)
697 lfdirstate = lfutil.openlfdirstate(ui, repo)
696 for f in forget:
698 for f in forget:
697 if lfdirstate[f] == 'a':
699 if lfdirstate[f] == 'a':
698 lfdirstate.drop(f)
700 lfdirstate.drop(f)
699 else:
701 else:
700 lfdirstate.remove(f)
702 lfdirstate.remove(f)
701 lfdirstate.write()
703 lfdirstate.write()
702 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
704 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
703 unlink=True)
705 unlink=True)
704 finally:
706 finally:
705 wlock.release()
707 wlock.release()
706
708
707 def getoutgoinglfiles(ui, repo, dest=None, **opts):
709 def getoutgoinglfiles(ui, repo, dest=None, **opts):
708 dest = ui.expandpath(dest or 'default-push', dest or 'default')
710 dest = ui.expandpath(dest or 'default-push', dest or 'default')
709 dest, branches = hg.parseurl(dest, opts.get('branch'))
711 dest, branches = hg.parseurl(dest, opts.get('branch'))
710 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
712 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
711 if revs:
713 if revs:
712 revs = [repo.lookup(rev) for rev in revs]
714 revs = [repo.lookup(rev) for rev in revs]
713
715
714 remoteui = hg.remoteui
716 remoteui = hg.remoteui
715
717
716 try:
718 try:
717 remote = hg.repository(remoteui(repo, opts), dest)
719 remote = hg.repository(remoteui(repo, opts), dest)
718 except error.RepoError:
720 except error.RepoError:
719 return None
721 return None
720 o = lfutil.findoutgoing(repo, remote, False)
722 o = lfutil.findoutgoing(repo, remote, False)
721 if not o:
723 if not o:
722 return None
724 return None
723 o = repo.changelog.nodesbetween(o, revs)[0]
725 o = repo.changelog.nodesbetween(o, revs)[0]
724 if opts.get('newest_first'):
726 if opts.get('newest_first'):
725 o.reverse()
727 o.reverse()
726
728
727 toupload = set()
729 toupload = set()
728 for n in o:
730 for n in o:
729 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
731 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
730 ctx = repo[n]
732 ctx = repo[n]
731 files = set(ctx.files())
733 files = set(ctx.files())
732 if len(parents) == 2:
734 if len(parents) == 2:
733 mc = ctx.manifest()
735 mc = ctx.manifest()
734 mp1 = ctx.parents()[0].manifest()
736 mp1 = ctx.parents()[0].manifest()
735 mp2 = ctx.parents()[1].manifest()
737 mp2 = ctx.parents()[1].manifest()
736 for f in mp1:
738 for f in mp1:
737 if f not in mc:
739 if f not in mc:
738 files.add(f)
740 files.add(f)
739 for f in mp2:
741 for f in mp2:
740 if f not in mc:
742 if f not in mc:
741 files.add(f)
743 files.add(f)
742 for f in mc:
744 for f in mc:
743 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
745 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
744 files.add(f)
746 files.add(f)
745 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
747 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
746 and f in ctx]))
748 and f in ctx]))
747 return toupload
749 return toupload
748
750
749 def override_outgoing(orig, ui, repo, dest=None, **opts):
751 def override_outgoing(orig, ui, repo, dest=None, **opts):
750 orig(ui, repo, dest, **opts)
752 orig(ui, repo, dest, **opts)
751
753
752 if opts.pop('large', None):
754 if opts.pop('large', None):
753 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
755 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
754 if toupload is None:
756 if toupload is None:
755 ui.status(_('largefiles: No remote repo\n'))
757 ui.status(_('largefiles: No remote repo\n'))
756 else:
758 else:
757 ui.status(_('largefiles to upload:\n'))
759 ui.status(_('largefiles to upload:\n'))
758 for file in toupload:
760 for file in toupload:
759 ui.status(lfutil.splitstandin(file) + '\n')
761 ui.status(lfutil.splitstandin(file) + '\n')
760 ui.status('\n')
762 ui.status('\n')
761
763
762 def override_summary(orig, ui, repo, *pats, **opts):
764 def override_summary(orig, ui, repo, *pats, **opts):
763 orig(ui, repo, *pats, **opts)
765 orig(ui, repo, *pats, **opts)
764
766
765 if opts.pop('large', None):
767 if opts.pop('large', None):
766 toupload = getoutgoinglfiles(ui, repo, None, **opts)
768 toupload = getoutgoinglfiles(ui, repo, None, **opts)
767 if toupload is None:
769 if toupload is None:
768 ui.status(_('largefiles: No remote repo\n'))
770 ui.status(_('largefiles: No remote repo\n'))
769 else:
771 else:
770 ui.status(_('largefiles: %d to upload\n') % len(toupload))
772 ui.status(_('largefiles: %d to upload\n') % len(toupload))
771
773
772 def override_addremove(orig, ui, repo, *pats, **opts):
774 def override_addremove(orig, ui, repo, *pats, **opts):
773 # Check if the parent or child has lfiles if they do don't allow it. If
775 # Check if the parent or child has largefiles; if so, disallow
774 # there is a symlink in the manifest then getting the manifest throws an
776 # addremove. If there is a symlink in the manifest then getting
775 # exception catch it and let addremove deal with it. This happens in
777 # the manifest throws an exception: catch it and let addremove
776 # Mercurial's test test-addremove-symlink
778 # deal with it.
777 try:
779 try:
778 manifesttip = set(repo['tip'].manifest())
780 manifesttip = set(repo['tip'].manifest())
779 except util.Abort:
781 except util.Abort:
780 manifesttip = set()
782 manifesttip = set()
781 try:
783 try:
782 manifestworking = set(repo[None].manifest())
784 manifestworking = set(repo[None].manifest())
783 except util.Abort:
785 except util.Abort:
784 manifestworking = set()
786 manifestworking = set()
785
787
786 # Manifests are only iterable so turn them into sets then union
788 # Manifests are only iterable so turn them into sets then union
787 for file in manifesttip.union(manifestworking):
789 for file in manifesttip.union(manifestworking):
788 if file.startswith(lfutil.shortname):
790 if file.startswith(lfutil.shortname):
789 raise util.Abort(
791 raise util.Abort(
790 _('addremove cannot be run on a repo with largefiles'))
792 _('addremove cannot be run on a repo with largefiles'))
791
793
792 return orig(ui, repo, *pats, **opts)
794 return orig(ui, repo, *pats, **opts)
793
795
794 # Calling purge with --all will cause the lfiles to be deleted.
796 # Calling purge with --all will cause the largefiles to be deleted.
795 # Override repo.status to prevent this from happening.
797 # Override repo.status to prevent this from happening.
796 def override_purge(orig, ui, repo, *dirs, **opts):
798 def override_purge(orig, ui, repo, *dirs, **opts):
797 oldstatus = repo.status
799 oldstatus = repo.status
798 def override_status(node1='.', node2=None, match=None, ignored=False,
800 def override_status(node1='.', node2=None, match=None, ignored=False,
799 clean=False, unknown=False, listsubrepos=False):
801 clean=False, unknown=False, listsubrepos=False):
800 r = oldstatus(node1, node2, match, ignored, clean, unknown,
802 r = oldstatus(node1, node2, match, ignored, clean, unknown,
801 listsubrepos)
803 listsubrepos)
802 lfdirstate = lfutil.openlfdirstate(ui, repo)
804 lfdirstate = lfutil.openlfdirstate(ui, repo)
803 modified, added, removed, deleted, unknown, ignored, clean = r
805 modified, added, removed, deleted, unknown, ignored, clean = r
804 unknown = [f for f in unknown if lfdirstate[f] == '?']
806 unknown = [f for f in unknown if lfdirstate[f] == '?']
805 ignored = [f for f in ignored if lfdirstate[f] == '?']
807 ignored = [f for f in ignored if lfdirstate[f] == '?']
806 return modified, added, removed, deleted, unknown, ignored, clean
808 return modified, added, removed, deleted, unknown, ignored, clean
807 repo.status = override_status
809 repo.status = override_status
808 orig(ui, repo, *dirs, **opts)
810 orig(ui, repo, *dirs, **opts)
809 repo.status = oldstatus
811 repo.status = oldstatus
810
812
811 def override_rollback(orig, ui, repo, **opts):
813 def override_rollback(orig, ui, repo, **opts):
812 result = orig(ui, repo, **opts)
814 result = orig(ui, repo, **opts)
813 merge.update(repo, node=None, branchmerge=False, force=True,
815 merge.update(repo, node=None, branchmerge=False, force=True,
814 partial=lfutil.isstandin)
816 partial=lfutil.isstandin)
815 lfdirstate = lfutil.openlfdirstate(ui, repo)
817 lfdirstate = lfutil.openlfdirstate(ui, repo)
816 lfiles = lfutil.listlfiles(repo)
818 lfiles = lfutil.listlfiles(repo)
817 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
819 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
818 for file in lfiles:
820 for file in lfiles:
819 if file in oldlfiles:
821 if file in oldlfiles:
820 lfdirstate.normallookup(file)
822 lfdirstate.normallookup(file)
821 else:
823 else:
822 lfdirstate.add(file)
824 lfdirstate.add(file)
823 lfdirstate.write()
825 lfdirstate.write()
824 return result
826 return result
@@ -1,412 +1,412 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13 import re
13 import re
14
14
15 from mercurial import context, error, manifest, match as match_, \
15 from mercurial import context, error, manifest, match as match_, \
16 node, util
16 node, util
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 import lfcommands
19 import lfcommands
20 import proto
20 import proto
21 import lfutil
21 import lfutil
22
22
23 def reposetup(ui, repo):
23 def reposetup(ui, repo):
24 # wire repositories should be given new wireproto functions but not the
24 # wire repositories should be given new wireproto functions but not the
25 # other largefiles modifications
25 # other largefiles modifications
26 if not repo.local():
26 if not repo.local():
27 return proto.wirereposetup(ui, repo)
27 return proto.wirereposetup(ui, repo)
28
28
29 for name in ('status', 'commitctx', 'commit', 'push'):
29 for name in ('status', 'commitctx', 'commit', 'push'):
30 method = getattr(repo, name)
30 method = getattr(repo, name)
31 #if not (isinstance(method, types.MethodType) and
31 #if not (isinstance(method, types.MethodType) and
32 # method.im_func is repo.__class__.commitctx.im_func):
32 # method.im_func is repo.__class__.commitctx.im_func):
33 if isinstance(method, types.FunctionType) and method.func_name == \
33 if isinstance(method, types.FunctionType) and method.func_name == \
34 'wrap':
34 'wrap':
35 ui.warn(_('largefiles: repo method %r appears to have already been'
35 ui.warn(_('largefiles: repo method %r appears to have already been'
36 ' wrapped by another extension: '
36 ' wrapped by another extension: '
37 'largefiles may behave incorrectly\n')
37 'largefiles may behave incorrectly\n')
38 % name)
38 % name)
39
39
40 class lfiles_repo(repo.__class__):
40 class lfiles_repo(repo.__class__):
41 lfstatus = False
41 lfstatus = False
42 def status_nolfiles(self, *args, **kwargs):
42 def status_nolfiles(self, *args, **kwargs):
43 return super(lfiles_repo, self).status(*args, **kwargs)
43 return super(lfiles_repo, self).status(*args, **kwargs)
44
44
45 # When lfstatus is set, return a context that gives the names
45 # When lfstatus is set, return a context that gives the names
46 # of largefiles instead of their corresponding standins and
46 # of largefiles instead of their corresponding standins and
47 # identifies the largefiles as always binary, regardless of
47 # identifies the largefiles as always binary, regardless of
48 # their actual contents.
48 # their actual contents.
49 def __getitem__(self, changeid):
49 def __getitem__(self, changeid):
50 ctx = super(lfiles_repo, self).__getitem__(changeid)
50 ctx = super(lfiles_repo, self).__getitem__(changeid)
51 if self.lfstatus:
51 if self.lfstatus:
52 class lfiles_manifestdict(manifest.manifestdict):
52 class lfiles_manifestdict(manifest.manifestdict):
53 def __contains__(self, filename):
53 def __contains__(self, filename):
54 if super(lfiles_manifestdict,
54 if super(lfiles_manifestdict,
55 self).__contains__(filename):
55 self).__contains__(filename):
56 return True
56 return True
57 return super(lfiles_manifestdict,
57 return super(lfiles_manifestdict,
58 self).__contains__(lfutil.shortname+'/' + filename)
58 self).__contains__(lfutil.shortname+'/' + filename)
59 class lfiles_ctx(ctx.__class__):
59 class lfiles_ctx(ctx.__class__):
60 def files(self):
60 def files(self):
61 filenames = super(lfiles_ctx, self).files()
61 filenames = super(lfiles_ctx, self).files()
62 return [re.sub('^\\'+lfutil.shortname+'/', '',
62 return [re.sub('^\\'+lfutil.shortname+'/', '',
63 filename) for filename in filenames]
63 filename) for filename in filenames]
64 def manifest(self):
64 def manifest(self):
65 man1 = super(lfiles_ctx, self).manifest()
65 man1 = super(lfiles_ctx, self).manifest()
66 man1.__class__ = lfiles_manifestdict
66 man1.__class__ = lfiles_manifestdict
67 return man1
67 return man1
68 def filectx(self, path, fileid=None, filelog=None):
68 def filectx(self, path, fileid=None, filelog=None):
69 try:
69 try:
70 result = super(lfiles_ctx, self).filectx(path,
70 result = super(lfiles_ctx, self).filectx(path,
71 fileid, filelog)
71 fileid, filelog)
72 except error.LookupError:
72 except error.LookupError:
73 # Adding a null character will cause Mercurial to
73 # Adding a null character will cause Mercurial to
74 # identify this as a binary file.
74 # identify this as a binary file.
75 result = super(lfiles_ctx, self).filectx(
75 result = super(lfiles_ctx, self).filectx(
76 lfutil.shortname + '/' + path, fileid,
76 lfutil.shortname + '/' + path, fileid,
77 filelog)
77 filelog)
78 olddata = result.data
78 olddata = result.data
79 result.data = lambda: olddata() + '\0'
79 result.data = lambda: olddata() + '\0'
80 return result
80 return result
81 ctx.__class__ = lfiles_ctx
81 ctx.__class__ = lfiles_ctx
82 return ctx
82 return ctx
83
83
84 # Figure out the status of big files and insert them into the
84 # Figure out the status of big files and insert them into the
85 # appropriate list in the result. Also removes standin files
85 # appropriate list in the result. Also removes standin files
86 # from the listing. Revert to the original status if
86 # from the listing. Revert to the original status if
87 # self.lfstatus is False.
87 # self.lfstatus is False.
88 def status(self, node1='.', node2=None, match=None, ignored=False,
88 def status(self, node1='.', node2=None, match=None, ignored=False,
89 clean=False, unknown=False, listsubrepos=False):
89 clean=False, unknown=False, listsubrepos=False):
90 listignored, listclean, listunknown = ignored, clean, unknown
90 listignored, listclean, listunknown = ignored, clean, unknown
91 if not self.lfstatus:
91 if not self.lfstatus:
92 try:
92 try:
93 return super(lfiles_repo, self).status(node1, node2, match,
93 return super(lfiles_repo, self).status(node1, node2, match,
94 listignored, listclean, listunknown, listsubrepos)
94 listignored, listclean, listunknown, listsubrepos)
95 except TypeError:
95 except TypeError:
96 return super(lfiles_repo, self).status(node1, node2, match,
96 return super(lfiles_repo, self).status(node1, node2, match,
97 listignored, listclean, listunknown)
97 listignored, listclean, listunknown)
98 else:
98 else:
99 # some calls in this function rely on the old version of status
99 # some calls in this function rely on the old version of status
100 self.lfstatus = False
100 self.lfstatus = False
101 if isinstance(node1, context.changectx):
101 if isinstance(node1, context.changectx):
102 ctx1 = node1
102 ctx1 = node1
103 else:
103 else:
104 ctx1 = repo[node1]
104 ctx1 = repo[node1]
105 if isinstance(node2, context.changectx):
105 if isinstance(node2, context.changectx):
106 ctx2 = node2
106 ctx2 = node2
107 else:
107 else:
108 ctx2 = repo[node2]
108 ctx2 = repo[node2]
109 working = ctx2.rev() is None
109 working = ctx2.rev() is None
110 parentworking = working and ctx1 == self['.']
110 parentworking = working and ctx1 == self['.']
111
111
112 def inctx(file, ctx):
112 def inctx(file, ctx):
113 try:
113 try:
114 if ctx.rev() is None:
114 if ctx.rev() is None:
115 return file in ctx.manifest()
115 return file in ctx.manifest()
116 ctx[file]
116 ctx[file]
117 return True
117 return True
118 except KeyError:
118 except KeyError:
119 return False
119 return False
120
120
121 # create a copy of match that matches standins instead of
122 # lfiles if matcher not set then it is the always matcher so
123 # overwrite that
124 if match is None:
121 if match is None:
125 match = match_.always(self.root, self.getcwd())
122 match = match_.always(self.root, self.getcwd())
126
123
124 # Create a copy of match that matches standins instead
125 # of largefiles.
127 def tostandin(file):
126 def tostandin(file):
128 if inctx(lfutil.standin(file), ctx2):
127 if inctx(lfutil.standin(file), ctx2):
129 return lfutil.standin(file)
128 return lfutil.standin(file)
130 return file
129 return file
131
130
132 m = copy.copy(match)
131 m = copy.copy(match)
133 m._files = [tostandin(f) for f in m._files]
132 m._files = [tostandin(f) for f in m._files]
134
133
135 # get ignored, clean, and unknown but remove them
134 # get ignored, clean, and unknown but remove them
136 # later if they were not asked for
135 # later if they were not asked for
137 try:
136 try:
138 result = super(lfiles_repo, self).status(node1, node2, m,
137 result = super(lfiles_repo, self).status(node1, node2, m,
139 True, True, True, listsubrepos)
138 True, True, True, listsubrepos)
140 except TypeError:
139 except TypeError:
141 result = super(lfiles_repo, self).status(node1, node2, m,
140 result = super(lfiles_repo, self).status(node1, node2, m,
142 True, True, True)
141 True, True, True)
143 if working:
142 if working:
144 # hold the wlock while we read largefiles and
143 # hold the wlock while we read largefiles and
145 # update the lfdirstate
144 # update the lfdirstate
146 wlock = repo.wlock()
145 wlock = repo.wlock()
147 try:
146 try:
148 # Any non-largefiles that were explicitly listed must be
147 # Any non-largefiles that were explicitly listed must be
149 # taken out or lfdirstate.status will report an error.
148 # taken out or lfdirstate.status will report an error.
150 # The status of these files was already computed using
149 # The status of these files was already computed using
151 # super's status.
150 # super's status.
152 lfdirstate = lfutil.openlfdirstate(ui, self)
151 lfdirstate = lfutil.openlfdirstate(ui, self)
153 match._files = [f for f in match._files if f in
152 match._files = [f for f in match._files if f in
154 lfdirstate]
153 lfdirstate]
155 s = lfdirstate.status(match, [], listignored,
154 s = lfdirstate.status(match, [], listignored,
156 listclean, listunknown)
155 listclean, listunknown)
157 (unsure, modified, added, removed, missing, unknown,
156 (unsure, modified, added, removed, missing, unknown,
158 ignored, clean) = s
157 ignored, clean) = s
159 if parentworking:
158 if parentworking:
160 for lfile in unsure:
159 for lfile in unsure:
161 if ctx1[lfutil.standin(lfile)].data().strip() \
160 if ctx1[lfutil.standin(lfile)].data().strip() \
162 != lfutil.hashfile(self.wjoin(lfile)):
161 != lfutil.hashfile(self.wjoin(lfile)):
163 modified.append(lfile)
162 modified.append(lfile)
164 else:
163 else:
165 clean.append(lfile)
164 clean.append(lfile)
166 lfdirstate.normal(lfile)
165 lfdirstate.normal(lfile)
167 lfdirstate.write()
166 lfdirstate.write()
168 else:
167 else:
169 tocheck = unsure + modified + added + clean
168 tocheck = unsure + modified + added + clean
170 modified, added, clean = [], [], []
169 modified, added, clean = [], [], []
171
170
172 for lfile in tocheck:
171 for lfile in tocheck:
173 standin = lfutil.standin(lfile)
172 standin = lfutil.standin(lfile)
174 if inctx(standin, ctx1):
173 if inctx(standin, ctx1):
175 if ctx1[standin].data().strip() != \
174 if ctx1[standin].data().strip() != \
176 lfutil.hashfile(self.wjoin(lfile)):
175 lfutil.hashfile(self.wjoin(lfile)):
177 modified.append(lfile)
176 modified.append(lfile)
178 else:
177 else:
179 clean.append(lfile)
178 clean.append(lfile)
180 else:
179 else:
181 added.append(lfile)
180 added.append(lfile)
182 finally:
181 finally:
183 wlock.release()
182 wlock.release()
184
183
185 for standin in ctx1.manifest():
184 for standin in ctx1.manifest():
186 if not lfutil.isstandin(standin):
185 if not lfutil.isstandin(standin):
187 continue
186 continue
188 lfile = lfutil.splitstandin(standin)
187 lfile = lfutil.splitstandin(standin)
189 if not match(lfile):
188 if not match(lfile):
190 continue
189 continue
191 if lfile not in lfdirstate:
190 if lfile not in lfdirstate:
192 removed.append(lfile)
191 removed.append(lfile)
193 # Handle unknown and ignored differently
192 # Handle unknown and ignored differently
194 lfiles = (modified, added, removed, missing, [], [], clean)
193 lfiles = (modified, added, removed, missing, [], [], clean)
195 result = list(result)
194 result = list(result)
196 # Unknown files
195 # Unknown files
197 result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
196 result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
198 and not lfutil.isstandin(f)]
197 and not lfutil.isstandin(f)]
199 # Ignored files must be ignored by both the dirstate and
198 # Ignored files must be ignored by both the dirstate and
200 # lfdirstate
199 # lfdirstate
201 result[5] = set(ignored).intersection(set(result[5]))
200 result[5] = set(ignored).intersection(set(result[5]))
202 # combine normal files and lfiles
201 # combine normal files and largefiles
203 normals = [[fn for fn in filelist if not \
202 normals = [[fn for fn in filelist if not \
204 lfutil.isstandin(fn)] for filelist in result]
203 lfutil.isstandin(fn)] for filelist in result]
205 result = [sorted(list1 + list2) for (list1, list2) in \
204 result = [sorted(list1 + list2) for (list1, list2) in \
206 zip(normals, lfiles)]
205 zip(normals, lfiles)]
207 else:
206 else:
208 def toname(f):
207 def toname(f):
209 if lfutil.isstandin(f):
208 if lfutil.isstandin(f):
210 return lfutil.splitstandin(f)
209 return lfutil.splitstandin(f)
211 return f
210 return f
212 result = [[toname(f) for f in items] for items in result]
211 result = [[toname(f) for f in items] for items in result]
213
212
214 if not listunknown:
213 if not listunknown:
215 result[4] = []
214 result[4] = []
216 if not listignored:
215 if not listignored:
217 result[5] = []
216 result[5] = []
218 if not listclean:
217 if not listclean:
219 result[6] = []
218 result[6] = []
220 self.lfstatus = True
219 self.lfstatus = True
221 return result
220 return result
222
221
223 # This call happens after a commit has occurred. Copy all of the lfiles
222 # As part of committing, copy all of the largefiles into the
224 # into the cache
223 # cache.
225 def commitctx(self, *args, **kwargs):
224 def commitctx(self, *args, **kwargs):
226 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
225 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
227 ctx = self[node]
226 ctx = self[node]
228 for filename in ctx.files():
227 for filename in ctx.files():
229 if lfutil.isstandin(filename) and filename in ctx.manifest():
228 if lfutil.isstandin(filename) and filename in ctx.manifest():
230 realfile = lfutil.splitstandin(filename)
229 realfile = lfutil.splitstandin(filename)
231 lfutil.copytocache(self, ctx.node(), realfile)
230 lfutil.copytocache(self, ctx.node(), realfile)
232
231
233 return node
232 return node
234
233
235 # This call happens before a commit has occurred. The lfile standins
234 # Before commit, largefile standins have not had their
236 # have not had their contents updated (to reflect the hash of their
235 # contents updated to reflect the hash of their largefile.
237 # lfile). Do that here.
236 # Do that here.
238 def commit(self, text="", user=None, date=None, match=None,
237 def commit(self, text="", user=None, date=None, match=None,
239 force=False, editor=False, extra={}):
238 force=False, editor=False, extra={}):
240 orig = super(lfiles_repo, self).commit
239 orig = super(lfiles_repo, self).commit
241
240
242 wlock = repo.wlock()
241 wlock = repo.wlock()
243 try:
242 try:
244 if getattr(repo, "_isrebasing", False):
243 if getattr(repo, "_isrebasing", False):
245 # We have to take the time to pull down the new lfiles now.
244 # We have to take the time to pull down the new
246 # Otherwise if we are rebasing, any lfiles that were
245 # largefiles now. Otherwise if we are rebasing,
247 # modified in the changesets we are rebasing on top of get
246 # any largefiles that were modified in the
248 # overwritten either by the rebase or in the first commit
247 # destination changesets get overwritten, either
249 # after the rebase.
248 # by the rebase or in the first commit after the
249 # rebase.
250 lfcommands.updatelfiles(repo.ui, repo)
250 lfcommands.updatelfiles(repo.ui, repo)
251 # Case 1: user calls commit with no specific files or
251 # Case 1: user calls commit with no specific files or
252 # include/exclude patterns: refresh and commit all files that
252 # include/exclude patterns: refresh and commit all files that
253 # are "dirty".
253 # are "dirty".
254 if (match is None) or (not match.anypats() and not \
254 if (match is None) or (not match.anypats() and not \
255 match.files()):
255 match.files()):
256 # Spend a bit of time here to get a list of files we know
256 # Spend a bit of time here to get a list of files we know
257 # are modified so we can compare only against those.
257 # are modified so we can compare only against those.
258 # It can cost a lot of time (several seconds)
258 # It can cost a lot of time (several seconds)
259 # otherwise to update all standins if the largefiles are
259 # otherwise to update all standins if the largefiles are
260 # large.
260 # large.
261 lfdirstate = lfutil.openlfdirstate(ui, self)
261 lfdirstate = lfutil.openlfdirstate(ui, self)
262 dirtymatch = match_.always(repo.root, repo.getcwd())
262 dirtymatch = match_.always(repo.root, repo.getcwd())
263 s = lfdirstate.status(dirtymatch, [], False, False, False)
263 s = lfdirstate.status(dirtymatch, [], False, False, False)
264 modifiedfiles = []
264 modifiedfiles = []
265 for i in s:
265 for i in s:
266 modifiedfiles.extend(i)
266 modifiedfiles.extend(i)
267 lfiles = lfutil.listlfiles(self)
267 lfiles = lfutil.listlfiles(self)
268 # this only loops through lfiles that exist (not
268 # this only loops through largefiles that exist (not
269 # removed/renamed)
269 # removed/renamed)
270 for lfile in lfiles:
270 for lfile in lfiles:
271 if lfile in modifiedfiles:
271 if lfile in modifiedfiles:
272 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
272 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
273 # this handles the case where a rebase is being
273 # this handles the case where a rebase is being
274 # performed and the working copy is not updated
274 # performed and the working copy is not updated
275 # yet.
275 # yet.
276 if os.path.exists(self.wjoin(lfile)):
276 if os.path.exists(self.wjoin(lfile)):
277 lfutil.updatestandin(self,
277 lfutil.updatestandin(self,
278 lfutil.standin(lfile))
278 lfutil.standin(lfile))
279 lfdirstate.normal(lfile)
279 lfdirstate.normal(lfile)
280 for lfile in lfdirstate:
280 for lfile in lfdirstate:
281 if lfile in modifiedfiles:
281 if lfile in modifiedfiles:
282 if not os.path.exists(
282 if not os.path.exists(
283 repo.wjoin(lfutil.standin(lfile))):
283 repo.wjoin(lfutil.standin(lfile))):
284 lfdirstate.drop(lfile)
284 lfdirstate.drop(lfile)
285 lfdirstate.write()
285 lfdirstate.write()
286
286
287 return orig(text=text, user=user, date=date, match=match,
287 return orig(text=text, user=user, date=date, match=match,
288 force=force, editor=editor, extra=extra)
288 force=force, editor=editor, extra=extra)
289
289
290 for f in match.files():
290 for f in match.files():
291 if lfutil.isstandin(f):
291 if lfutil.isstandin(f):
292 raise util.Abort(
292 raise util.Abort(
293 _('file "%s" is a largefile standin') % f,
293 _('file "%s" is a largefile standin') % f,
294 hint=('commit the largefile itself instead'))
294 hint=('commit the largefile itself instead'))
295
295
296 # Case 2: user calls commit with specified patterns: refresh
296 # Case 2: user calls commit with specified patterns: refresh
297 # any matching big files.
297 # any matching big files.
298 smatcher = lfutil.composestandinmatcher(self, match)
298 smatcher = lfutil.composestandinmatcher(self, match)
299 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
299 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
300
300
301 # No matching big files: get out of the way and pass control to
301 # No matching big files: get out of the way and pass control to
302 # the usual commit() method.
302 # the usual commit() method.
303 if not standins:
303 if not standins:
304 return orig(text=text, user=user, date=date, match=match,
304 return orig(text=text, user=user, date=date, match=match,
305 force=force, editor=editor, extra=extra)
305 force=force, editor=editor, extra=extra)
306
306
307 # Refresh all matching big files. It's possible that the
307 # Refresh all matching big files. It's possible that the
308 # commit will end up failing, in which case the big files will
308 # commit will end up failing, in which case the big files will
309 # stay refreshed. No harm done: the user modified them and
309 # stay refreshed. No harm done: the user modified them and
310 # asked to commit them, so sooner or later we're going to
310 # asked to commit them, so sooner or later we're going to
311 # refresh the standins. Might as well leave them refreshed.
311 # refresh the standins. Might as well leave them refreshed.
312 lfdirstate = lfutil.openlfdirstate(ui, self)
312 lfdirstate = lfutil.openlfdirstate(ui, self)
313 for standin in standins:
313 for standin in standins:
314 lfile = lfutil.splitstandin(standin)
314 lfile = lfutil.splitstandin(standin)
315 if lfdirstate[lfile] <> 'r':
315 if lfdirstate[lfile] <> 'r':
316 lfutil.updatestandin(self, standin)
316 lfutil.updatestandin(self, standin)
317 lfdirstate.normal(lfile)
317 lfdirstate.normal(lfile)
318 else:
318 else:
319 lfdirstate.drop(lfile)
319 lfdirstate.drop(lfile)
320 lfdirstate.write()
320 lfdirstate.write()
321
321
322 # Cook up a new matcher that only matches regular files or
322 # Cook up a new matcher that only matches regular files or
323 # standins corresponding to the big files requested by the
323 # standins corresponding to the big files requested by the
324 # user. Have to modify _files to prevent commit() from
324 # user. Have to modify _files to prevent commit() from
325 # complaining "not tracked" for big files.
325 # complaining "not tracked" for big files.
326 lfiles = lfutil.listlfiles(repo)
326 lfiles = lfutil.listlfiles(repo)
327 match = copy.copy(match)
327 match = copy.copy(match)
328 orig_matchfn = match.matchfn
328 orig_matchfn = match.matchfn
329
329
330 # Check both the list of lfiles and the list of standins
330 # Check both the list of largefiles and the list of
331 # because if a lfile was removed, it won't be in the list of
331 # standins because if a largefile was removed, it
332 # lfiles at this point
332 # won't be in the list of largefiles at this point
333 match._files += sorted(standins)
333 match._files += sorted(standins)
334
334
335 actualfiles = []
335 actualfiles = []
336 for f in match._files:
336 for f in match._files:
337 fstandin = lfutil.standin(f)
337 fstandin = lfutil.standin(f)
338
338
339 # ignore known largefiles and standins
339 # ignore known largefiles and standins
340 if f in lfiles or fstandin in standins:
340 if f in lfiles or fstandin in standins:
341 continue
341 continue
342
342
343 # append directory separator to avoid collisions
343 # append directory separator to avoid collisions
344 if not fstandin.endswith(os.sep):
344 if not fstandin.endswith(os.sep):
345 fstandin += os.sep
345 fstandin += os.sep
346
346
347 # prevalidate matching standin directories
347 # prevalidate matching standin directories
348 if lfutil.any_(st for st in match._files if \
348 if lfutil.any_(st for st in match._files if \
349 st.startswith(fstandin)):
349 st.startswith(fstandin)):
350 continue
350 continue
351 actualfiles.append(f)
351 actualfiles.append(f)
352 match._files = actualfiles
352 match._files = actualfiles
353
353
354 def matchfn(f):
354 def matchfn(f):
355 if orig_matchfn(f):
355 if orig_matchfn(f):
356 return f not in lfiles
356 return f not in lfiles
357 else:
357 else:
358 return f in standins
358 return f in standins
359
359
360 match.matchfn = matchfn
360 match.matchfn = matchfn
361 return orig(text=text, user=user, date=date, match=match,
361 return orig(text=text, user=user, date=date, match=match,
362 force=force, editor=editor, extra=extra)
362 force=force, editor=editor, extra=extra)
363 finally:
363 finally:
364 wlock.release()
364 wlock.release()
365
365
366 def push(self, remote, force=False, revs=None, newbranch=False):
366 def push(self, remote, force=False, revs=None, newbranch=False):
367 o = lfutil.findoutgoing(repo, remote, force)
367 o = lfutil.findoutgoing(repo, remote, force)
368 if o:
368 if o:
369 toupload = set()
369 toupload = set()
370 o = repo.changelog.nodesbetween(o, revs)[0]
370 o = repo.changelog.nodesbetween(o, revs)[0]
371 for n in o:
371 for n in o:
372 parents = [p for p in repo.changelog.parents(n) if p != \
372 parents = [p for p in repo.changelog.parents(n) if p != \
373 node.nullid]
373 node.nullid]
374 ctx = repo[n]
374 ctx = repo[n]
375 files = set(ctx.files())
375 files = set(ctx.files())
376 if len(parents) == 2:
376 if len(parents) == 2:
377 mc = ctx.manifest()
377 mc = ctx.manifest()
378 mp1 = ctx.parents()[0].manifest()
378 mp1 = ctx.parents()[0].manifest()
379 mp2 = ctx.parents()[1].manifest()
379 mp2 = ctx.parents()[1].manifest()
380 for f in mp1:
380 for f in mp1:
381 if f not in mc:
381 if f not in mc:
382 files.add(f)
382 files.add(f)
383 for f in mp2:
383 for f in mp2:
384 if f not in mc:
384 if f not in mc:
385 files.add(f)
385 files.add(f)
386 for f in mc:
386 for f in mc:
387 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
387 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
388 None):
388 None):
389 files.add(f)
389 files.add(f)
390
390
391 toupload = toupload.union(set([ctx[f].data().strip() for f\
391 toupload = toupload.union(set([ctx[f].data().strip() for f\
392 in files if lfutil.isstandin(f) and f in ctx]))
392 in files if lfutil.isstandin(f) and f in ctx]))
393 lfcommands.uploadlfiles(ui, self, remote, toupload)
393 lfcommands.uploadlfiles(ui, self, remote, toupload)
394 return super(lfiles_repo, self).push(remote, force, revs,
394 return super(lfiles_repo, self).push(remote, force, revs,
395 newbranch)
395 newbranch)
396
396
397 repo.__class__ = lfiles_repo
397 repo.__class__ = lfiles_repo
398
398
399 def checkrequireslfiles(ui, repo, **kwargs):
399 def checkrequireslfiles(ui, repo, **kwargs):
400 if 'largefiles' not in repo.requirements and lfutil.any_(
400 if 'largefiles' not in repo.requirements and lfutil.any_(
401 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
401 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
402 # workaround bug in Mercurial 1.9 whereby requirements is
402 # workaround bug in Mercurial 1.9 whereby requirements is
403 # a list on newly-cloned repos
403 # a list on newly-cloned repos
404 repo.requirements = set(repo.requirements)
404 repo.requirements = set(repo.requirements)
405
405
406 repo.requirements |= set(['largefiles'])
406 repo.requirements |= set(['largefiles'])
407 repo._writerequirements()
407 repo._writerequirements()
408
408
409 checkrequireslfiles(ui, repo)
409 checkrequireslfiles(ui, repo)
410
410
411 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
411 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
412 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
412 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,138 +1,138 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles extension: uisetup'''
9 '''setup for largefiles extension: uisetup'''
10
10
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 httprepo, localrepo, sshrepo, sshserver, util, wireproto
12 httprepo, localrepo, sshrepo, sshserver, util, wireproto
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.hgweb import hgweb_mod, protocol
14 from mercurial.hgweb import hgweb_mod, protocol
15
15
16 import overrides
16 import overrides
17 import proto
17 import proto
18
18
19 def uisetup(ui):
19 def uisetup(ui):
20 # Disable auto-status for some commands which assume that all
20 # Disable auto-status for some commands which assume that all
21 # files in the result are under Mercurial's control
21 # files in the result are under Mercurial's control
22
22
23 entry = extensions.wrapcommand(commands.table, 'add',
23 entry = extensions.wrapcommand(commands.table, 'add',
24 overrides.override_add)
24 overrides.override_add)
25 addopt = [('', 'large', None, _('add as largefile')),
25 addopt = [('', 'large', None, _('add as largefile')),
26 ('', 'lfsize', '', _('add all files above this size (in megabytes)'
26 ('', 'lfsize', '', _('add all files above this size (in megabytes)'
27 'as largefiles (default: 10)'))]
27 'as largefiles (default: 10)'))]
28 entry[1].extend(addopt)
28 entry[1].extend(addopt)
29
29
30 entry = extensions.wrapcommand(commands.table, 'addremove',
30 entry = extensions.wrapcommand(commands.table, 'addremove',
31 overrides.override_addremove)
31 overrides.override_addremove)
32 entry = extensions.wrapcommand(commands.table, 'remove',
32 entry = extensions.wrapcommand(commands.table, 'remove',
33 overrides.override_remove)
33 overrides.override_remove)
34 entry = extensions.wrapcommand(commands.table, 'forget',
34 entry = extensions.wrapcommand(commands.table, 'forget',
35 overrides.override_forget)
35 overrides.override_forget)
36 entry = extensions.wrapcommand(commands.table, 'status',
36 entry = extensions.wrapcommand(commands.table, 'status',
37 overrides.override_status)
37 overrides.override_status)
38 entry = extensions.wrapcommand(commands.table, 'log',
38 entry = extensions.wrapcommand(commands.table, 'log',
39 overrides.override_log)
39 overrides.override_log)
40 entry = extensions.wrapcommand(commands.table, 'rollback',
40 entry = extensions.wrapcommand(commands.table, 'rollback',
41 overrides.override_rollback)
41 overrides.override_rollback)
42 entry = extensions.wrapcommand(commands.table, 'verify',
42 entry = extensions.wrapcommand(commands.table, 'verify',
43 overrides.override_verify)
43 overrides.override_verify)
44
44
45 verifyopt = [('', 'large', None, _('verify largefiles')),
45 verifyopt = [('', 'large', None, _('verify largefiles')),
46 ('', 'lfa', None,
46 ('', 'lfa', None,
47 _('verify all revisions of largefiles not just current')),
47 _('verify all revisions of largefiles not just current')),
48 ('', 'lfc', None,
48 ('', 'lfc', None,
49 _('verify largefile contents not just existence'))]
49 _('verify largefile contents not just existence'))]
50 entry[1].extend(verifyopt)
50 entry[1].extend(verifyopt)
51
51
52 entry = extensions.wrapcommand(commands.table, 'outgoing',
52 entry = extensions.wrapcommand(commands.table, 'outgoing',
53 overrides.override_outgoing)
53 overrides.override_outgoing)
54 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
54 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
55 entry[1].extend(outgoingopt)
55 entry[1].extend(outgoingopt)
56 entry = extensions.wrapcommand(commands.table, 'summary',
56 entry = extensions.wrapcommand(commands.table, 'summary',
57 overrides.override_summary)
57 overrides.override_summary)
58 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
58 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
59 entry[1].extend(summaryopt)
59 entry[1].extend(summaryopt)
60
60
61 entry = extensions.wrapcommand(commands.table, 'update',
61 entry = extensions.wrapcommand(commands.table, 'update',
62 overrides.override_update)
62 overrides.override_update)
63 entry = extensions.wrapcommand(commands.table, 'pull',
63 entry = extensions.wrapcommand(commands.table, 'pull',
64 overrides.override_pull)
64 overrides.override_pull)
65 entry = extensions.wrapfunction(filemerge, 'filemerge',
65 entry = extensions.wrapfunction(filemerge, 'filemerge',
66 overrides.override_filemerge)
66 overrides.override_filemerge)
67 entry = extensions.wrapfunction(cmdutil, 'copy',
67 entry = extensions.wrapfunction(cmdutil, 'copy',
68 overrides.override_copy)
68 overrides.override_copy)
69
69
70 # Backout calls revert so we need to override both the command and the
70 # Backout calls revert so we need to override both the command and the
71 # function
71 # function
72 entry = extensions.wrapcommand(commands.table, 'revert',
72 entry = extensions.wrapcommand(commands.table, 'revert',
73 overrides.override_revert)
73 overrides.override_revert)
74 entry = extensions.wrapfunction(commands, 'revert',
74 entry = extensions.wrapfunction(commands, 'revert',
75 overrides.override_revert)
75 overrides.override_revert)
76
76
77 # clone uses hg._update instead of hg.update even though they are the
77 # clone uses hg._update instead of hg.update even though they are the
78 # same function... so wrap both of them)
78 # same function... so wrap both of them)
79 extensions.wrapfunction(hg, 'update', overrides.hg_update)
79 extensions.wrapfunction(hg, 'update', overrides.hg_update)
80 extensions.wrapfunction(hg, '_update', overrides.hg_update)
80 extensions.wrapfunction(hg, '_update', overrides.hg_update)
81 extensions.wrapfunction(hg, 'clean', overrides.hg_clean)
81 extensions.wrapfunction(hg, 'clean', overrides.hg_clean)
82 extensions.wrapfunction(hg, 'merge', overrides.hg_merge)
82 extensions.wrapfunction(hg, 'merge', overrides.hg_merge)
83
83
84 extensions.wrapfunction(archival, 'archive', overrides.override_archive)
84 extensions.wrapfunction(archival, 'archive', overrides.override_archive)
85 if util.safehasattr(cmdutil, 'bailifchanged'):
85 if util.safehasattr(cmdutil, 'bailifchanged'):
86 extensions.wrapfunction(cmdutil, 'bailifchanged',
86 extensions.wrapfunction(cmdutil, 'bailifchanged',
87 overrides.override_bailifchanged)
87 overrides.override_bailifchanged)
88 else:
88 else:
89 extensions.wrapfunction(cmdutil, 'bail_if_changed',
89 extensions.wrapfunction(cmdutil, 'bail_if_changed',
90 overrides.override_bailifchanged)
90 overrides.override_bailifchanged)
91
91
92 # create the new wireproto commands ...
92 # create the new wireproto commands ...
93 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
93 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
94 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
94 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
95 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
95 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
96
96
97 # ... and wrap some existing ones
97 # ... and wrap some existing ones
98 wireproto.commands['capabilities'] = (proto.capabilities, '')
98 wireproto.commands['capabilities'] = (proto.capabilities, '')
99 wireproto.commands['heads'] = (proto.heads, '')
99 wireproto.commands['heads'] = (proto.heads, '')
100 wireproto.commands['lheads'] = (wireproto.heads, '')
100 wireproto.commands['lheads'] = (wireproto.heads, '')
101
101
102 # make putlfile behave the same as push and {get,stat}lfile behave the same
102 # make putlfile behave the same as push and {get,stat}lfile behave
103 # as pull w.r.t. permissions checks
103 # the same as pull w.r.t. permissions checks
104 hgweb_mod.perms['putlfile'] = 'push'
104 hgweb_mod.perms['putlfile'] = 'push'
105 hgweb_mod.perms['getlfile'] = 'pull'
105 hgweb_mod.perms['getlfile'] = 'pull'
106 hgweb_mod.perms['statlfile'] = 'pull'
106 hgweb_mod.perms['statlfile'] = 'pull'
107
107
108 # the hello wireproto command uses wireproto.capabilities, so it won't see
108 # the hello wireproto command uses wireproto.capabilities, so it won't see
109 # our largefiles capability unless we replace the actual function as well.
109 # our largefiles capability unless we replace the actual function as well.
110 proto.capabilities_orig = wireproto.capabilities
110 proto.capabilities_orig = wireproto.capabilities
111 wireproto.capabilities = proto.capabilities
111 wireproto.capabilities = proto.capabilities
112
112
113 # these let us reject non-largefiles clients and make them display
113 # these let us reject non-largefiles clients and make them display
114 # our error messages
114 # our error messages
115 protocol.webproto.refuseclient = proto.webproto_refuseclient
115 protocol.webproto.refuseclient = proto.webproto_refuseclient
116 sshserver.sshserver.refuseclient = proto.sshproto_refuseclient
116 sshserver.sshserver.refuseclient = proto.sshproto_refuseclient
117
117
118 # can't do this in reposetup because it needs to have happened before
118 # can't do this in reposetup because it needs to have happened before
119 # wirerepo.__init__ is called
119 # wirerepo.__init__ is called
120 proto.ssh_oldcallstream = sshrepo.sshrepository._callstream
120 proto.ssh_oldcallstream = sshrepo.sshrepository._callstream
121 proto.http_oldcallstream = httprepo.httprepository._callstream
121 proto.http_oldcallstream = httprepo.httprepository._callstream
122 sshrepo.sshrepository._callstream = proto.sshrepo_callstream
122 sshrepo.sshrepository._callstream = proto.sshrepo_callstream
123 httprepo.httprepository._callstream = proto.httprepo_callstream
123 httprepo.httprepository._callstream = proto.httprepo_callstream
124
124
125 # don't die on seeing a repo with the largefiles requirement
125 # don't die on seeing a repo with the largefiles requirement
126 localrepo.localrepository.supported |= set(['largefiles'])
126 localrepo.localrepository.supported |= set(['largefiles'])
127
127
128 # override some extensions' stuff as well
128 # override some extensions' stuff as well
129 for name, module in extensions.extensions():
129 for name, module in extensions.extensions():
130 if name == 'fetch':
130 if name == 'fetch':
131 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
131 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
132 overrides.override_fetch)
132 overrides.override_fetch)
133 if name == 'purge':
133 if name == 'purge':
134 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
134 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
135 overrides.override_purge)
135 overrides.override_purge)
136 if name == 'rebase':
136 if name == 'rebase':
137 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
137 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
138 overrides.override_rebase)
138 overrides.override_rebase)
General Comments 0
You need to be logged in to leave comments. Login now