##// END OF EJS Templates
largefiles: factor out lfutil.getminsize()
Greg Ward -
r15227:a7686abf default
parent child Browse files
Show More
@@ -1,483 +1,473 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command functions: lfadd() et. al, plus the cmdtable.'''
9 '''High-level command functions: lfadd() et. al, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''Convert a normal repository to a largefiles repository
23 '''Convert a normal repository to a largefiles repository
24
24
25 Convert source repository creating an identical repository, except that all
25 Convert source repository creating an identical repository, except that all
26 files that match the patterns given, or are over the given size will be
26 files that match the patterns given, or are over the given size will be
27 added as largefiles. The size used to determine whether or not to track a
27 added as largefiles. The size used to determine whether or not to track a
28 file as a largefile is the size of the first version of the file. After
28 file as a largefile is the size of the first version of the file. After
29 running this command you will need to make sure that largefiles is enabled
29 running this command you will need to make sure that largefiles is enabled
30 anywhere you intend to push the new repository.'''
30 anywhere you intend to push the new repository.'''
31
31
32 if opts['tonormal']:
32 if opts['tonormal']:
33 tolfile = False
33 tolfile = False
34 else:
34 else:
35 tolfile = True
35 tolfile = True
36 size = opts['size']
36 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
37 if not size:
38 size = ui.config(lfutil.longname, 'size', default=None)
39 try:
40 size = int(size)
41 except ValueError:
42 raise util.Abort(_('largefiles.size must be integer, was %s\n')
43 % size)
44 except TypeError:
45 raise util.Abort(_('size must be specified'))
46
47 try:
37 try:
48 rsrc = hg.repository(ui, src)
38 rsrc = hg.repository(ui, src)
49 if not rsrc.local():
39 if not rsrc.local():
50 raise util.Abort(_('%s is not a local Mercurial repo') % src)
40 raise util.Abort(_('%s is not a local Mercurial repo') % src)
51 except error.RepoError, err:
41 except error.RepoError, err:
52 ui.traceback()
42 ui.traceback()
53 raise util.Abort(err.args[0])
43 raise util.Abort(err.args[0])
54 if os.path.exists(dest):
44 if os.path.exists(dest):
55 if not os.path.isdir(dest):
45 if not os.path.isdir(dest):
56 raise util.Abort(_('destination %s already exists') % dest)
46 raise util.Abort(_('destination %s already exists') % dest)
57 elif os.listdir(dest):
47 elif os.listdir(dest):
58 raise util.Abort(_('destination %s is not empty') % dest)
48 raise util.Abort(_('destination %s is not empty') % dest)
59 try:
49 try:
60 ui.status(_('initializing destination %s\n') % dest)
50 ui.status(_('initializing destination %s\n') % dest)
61 rdst = hg.repository(ui, dest, create=True)
51 rdst = hg.repository(ui, dest, create=True)
62 if not rdst.local():
52 if not rdst.local():
63 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
53 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
64 except error.RepoError:
54 except error.RepoError:
65 ui.traceback()
55 ui.traceback()
66 raise util.Abort(_('%s is not a repo') % dest)
56 raise util.Abort(_('%s is not a repo') % dest)
67
57
68 success = False
58 success = False
69 try:
59 try:
70 # Lock destination to prevent modification while it is converted to.
60 # Lock destination to prevent modification while it is converted to.
71 # Don't need to lock src because we are just reading from its history
61 # Don't need to lock src because we are just reading from its history
72 # which can't change.
62 # which can't change.
73 dst_lock = rdst.lock()
63 dst_lock = rdst.lock()
74
64
75 # Get a list of all changesets in the source. The easy way to do this
65 # Get a list of all changesets in the source. The easy way to do this
76 # is to simply walk the changelog, using changelog.nodesbewteen().
66 # is to simply walk the changelog, using changelog.nodesbewteen().
77 # Take a look at mercurial/revlog.py:639 for more details.
67 # Take a look at mercurial/revlog.py:639 for more details.
78 # Use a generator instead of a list to decrease memory usage
68 # Use a generator instead of a list to decrease memory usage
79 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
69 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
80 rsrc.heads())[0])
70 rsrc.heads())[0])
81 revmap = {node.nullid: node.nullid}
71 revmap = {node.nullid: node.nullid}
82 if tolfile:
72 if tolfile:
83 lfiles = set()
73 lfiles = set()
84 normalfiles = set()
74 normalfiles = set()
85 if not pats:
75 if not pats:
86 pats = ui.config(lfutil.longname, 'patterns', default=())
76 pats = ui.config(lfutil.longname, 'patterns', default=())
87 if pats:
77 if pats:
88 pats = pats.split(' ')
78 pats = pats.split(' ')
89 if pats:
79 if pats:
90 matcher = match_.match(rsrc.root, '', list(pats))
80 matcher = match_.match(rsrc.root, '', list(pats))
91 else:
81 else:
92 matcher = None
82 matcher = None
93
83
94 lfiletohash = {}
84 lfiletohash = {}
95 for ctx in ctxs:
85 for ctx in ctxs:
96 ui.progress(_('converting revisions'), ctx.rev(),
86 ui.progress(_('converting revisions'), ctx.rev(),
97 unit=_('revision'), total=rsrc['tip'].rev())
87 unit=_('revision'), total=rsrc['tip'].rev())
98 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
88 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
99 lfiles, normalfiles, matcher, size, lfiletohash)
89 lfiles, normalfiles, matcher, size, lfiletohash)
100 ui.progress(_('converting revisions'), None)
90 ui.progress(_('converting revisions'), None)
101
91
102 if os.path.exists(rdst.wjoin(lfutil.shortname)):
92 if os.path.exists(rdst.wjoin(lfutil.shortname)):
103 shutil.rmtree(rdst.wjoin(lfutil.shortname))
93 shutil.rmtree(rdst.wjoin(lfutil.shortname))
104
94
105 for f in lfiletohash.keys():
95 for f in lfiletohash.keys():
106 if os.path.isfile(rdst.wjoin(f)):
96 if os.path.isfile(rdst.wjoin(f)):
107 os.unlink(rdst.wjoin(f))
97 os.unlink(rdst.wjoin(f))
108 try:
98 try:
109 os.removedirs(os.path.dirname(rdst.wjoin(f)))
99 os.removedirs(os.path.dirname(rdst.wjoin(f)))
110 except OSError:
100 except OSError:
111 pass
101 pass
112
102
113 else:
103 else:
114 for ctx in ctxs:
104 for ctx in ctxs:
115 ui.progress(_('converting revisions'), ctx.rev(),
105 ui.progress(_('converting revisions'), ctx.rev(),
116 unit=_('revision'), total=rsrc['tip'].rev())
106 unit=_('revision'), total=rsrc['tip'].rev())
117 _addchangeset(ui, rsrc, rdst, ctx, revmap)
107 _addchangeset(ui, rsrc, rdst, ctx, revmap)
118
108
119 ui.progress(_('converting revisions'), None)
109 ui.progress(_('converting revisions'), None)
120 success = True
110 success = True
121 finally:
111 finally:
122 if not success:
112 if not success:
123 # we failed, remove the new directory
113 # we failed, remove the new directory
124 shutil.rmtree(rdst.root)
114 shutil.rmtree(rdst.root)
125 dst_lock.release()
115 dst_lock.release()
126
116
127 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
117 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
128 # Convert src parents to dst parents
118 # Convert src parents to dst parents
129 parents = []
119 parents = []
130 for p in ctx.parents():
120 for p in ctx.parents():
131 parents.append(revmap[p.node()])
121 parents.append(revmap[p.node()])
132 while len(parents) < 2:
122 while len(parents) < 2:
133 parents.append(node.nullid)
123 parents.append(node.nullid)
134
124
135 # Generate list of changed files
125 # Generate list of changed files
136 files = set(ctx.files())
126 files = set(ctx.files())
137 if node.nullid not in parents:
127 if node.nullid not in parents:
138 mc = ctx.manifest()
128 mc = ctx.manifest()
139 mp1 = ctx.parents()[0].manifest()
129 mp1 = ctx.parents()[0].manifest()
140 mp2 = ctx.parents()[1].manifest()
130 mp2 = ctx.parents()[1].manifest()
141 files |= (set(mp1) | set(mp2)) - set(mc)
131 files |= (set(mp1) | set(mp2)) - set(mc)
142 for f in mc:
132 for f in mc:
143 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
133 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
144 files.add(f)
134 files.add(f)
145
135
146 def getfilectx(repo, memctx, f):
136 def getfilectx(repo, memctx, f):
147 if lfutil.standin(f) in files:
137 if lfutil.standin(f) in files:
148 # if the file isn't in the manifest then it was removed
138 # if the file isn't in the manifest then it was removed
149 # or renamed, raise IOError to indicate this
139 # or renamed, raise IOError to indicate this
150 try:
140 try:
151 fctx = ctx.filectx(lfutil.standin(f))
141 fctx = ctx.filectx(lfutil.standin(f))
152 except error.LookupError:
142 except error.LookupError:
153 raise IOError()
143 raise IOError()
154 renamed = fctx.renamed()
144 renamed = fctx.renamed()
155 if renamed:
145 if renamed:
156 renamed = lfutil.splitstandin(renamed[0])
146 renamed = lfutil.splitstandin(renamed[0])
157
147
158 hash = fctx.data().strip()
148 hash = fctx.data().strip()
159 path = lfutil.findfile(rsrc, hash)
149 path = lfutil.findfile(rsrc, hash)
160 ### TODO: What if the file is not cached?
150 ### TODO: What if the file is not cached?
161 data = ''
151 data = ''
162 fd = None
152 fd = None
163 try:
153 try:
164 fd = open(path, 'rb')
154 fd = open(path, 'rb')
165 data = fd.read()
155 data = fd.read()
166 finally:
156 finally:
167 if fd:
157 if fd:
168 fd.close()
158 fd.close()
169 return context.memfilectx(f, data, 'l' in fctx.flags(),
159 return context.memfilectx(f, data, 'l' in fctx.flags(),
170 'x' in fctx.flags(), renamed)
160 'x' in fctx.flags(), renamed)
171 else:
161 else:
172 try:
162 try:
173 fctx = ctx.filectx(f)
163 fctx = ctx.filectx(f)
174 except error.LookupError:
164 except error.LookupError:
175 raise IOError()
165 raise IOError()
176 renamed = fctx.renamed()
166 renamed = fctx.renamed()
177 if renamed:
167 if renamed:
178 renamed = renamed[0]
168 renamed = renamed[0]
179 data = fctx.data()
169 data = fctx.data()
180 if f == '.hgtags':
170 if f == '.hgtags':
181 newdata = []
171 newdata = []
182 for line in data.splitlines():
172 for line in data.splitlines():
183 id, name = line.split(' ', 1)
173 id, name = line.split(' ', 1)
184 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
174 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
185 name))
175 name))
186 data = ''.join(newdata)
176 data = ''.join(newdata)
187 return context.memfilectx(f, data, 'l' in fctx.flags(),
177 return context.memfilectx(f, data, 'l' in fctx.flags(),
188 'x' in fctx.flags(), renamed)
178 'x' in fctx.flags(), renamed)
189
179
190 dstfiles = []
180 dstfiles = []
191 for file in files:
181 for file in files:
192 if lfutil.isstandin(file):
182 if lfutil.isstandin(file):
193 dstfiles.append(lfutil.splitstandin(file))
183 dstfiles.append(lfutil.splitstandin(file))
194 else:
184 else:
195 dstfiles.append(file)
185 dstfiles.append(file)
196 # Commit
186 # Commit
197 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
187 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
198 getfilectx, ctx.user(), ctx.date(), ctx.extra())
188 getfilectx, ctx.user(), ctx.date(), ctx.extra())
199 ret = rdst.commitctx(mctx)
189 ret = rdst.commitctx(mctx)
200 rdst.dirstate.setparents(ret)
190 rdst.dirstate.setparents(ret)
201 revmap[ctx.node()] = rdst.changelog.tip()
191 revmap[ctx.node()] = rdst.changelog.tip()
202
192
203 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
193 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
204 matcher, size, lfiletohash):
194 matcher, size, lfiletohash):
205 # Convert src parents to dst parents
195 # Convert src parents to dst parents
206 parents = []
196 parents = []
207 for p in ctx.parents():
197 for p in ctx.parents():
208 parents.append(revmap[p.node()])
198 parents.append(revmap[p.node()])
209 while len(parents) < 2:
199 while len(parents) < 2:
210 parents.append(node.nullid)
200 parents.append(node.nullid)
211
201
212 # Generate list of changed files
202 # Generate list of changed files
213 files = set(ctx.files())
203 files = set(ctx.files())
214 if node.nullid not in parents:
204 if node.nullid not in parents:
215 mc = ctx.manifest()
205 mc = ctx.manifest()
216 mp1 = ctx.parents()[0].manifest()
206 mp1 = ctx.parents()[0].manifest()
217 mp2 = ctx.parents()[1].manifest()
207 mp2 = ctx.parents()[1].manifest()
218 files |= (set(mp1) | set(mp2)) - set(mc)
208 files |= (set(mp1) | set(mp2)) - set(mc)
219 for f in mc:
209 for f in mc:
220 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
210 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
221 files.add(f)
211 files.add(f)
222
212
223 dstfiles = []
213 dstfiles = []
224 for f in files:
214 for f in files:
225 if f not in lfiles and f not in normalfiles:
215 if f not in lfiles and f not in normalfiles:
226 islfile = _islfile(f, ctx, matcher, size)
216 islfile = _islfile(f, ctx, matcher, size)
227 # If this file was renamed or copied then copy
217 # If this file was renamed or copied then copy
228 # the lfileness of its predecessor
218 # the lfileness of its predecessor
229 if f in ctx.manifest():
219 if f in ctx.manifest():
230 fctx = ctx.filectx(f)
220 fctx = ctx.filectx(f)
231 renamed = fctx.renamed()
221 renamed = fctx.renamed()
232 renamedlfile = renamed and renamed[0] in lfiles
222 renamedlfile = renamed and renamed[0] in lfiles
233 islfile |= renamedlfile
223 islfile |= renamedlfile
234 if 'l' in fctx.flags():
224 if 'l' in fctx.flags():
235 if renamedlfile:
225 if renamedlfile:
236 raise util.Abort(
226 raise util.Abort(
237 _('Renamed/copied largefile %s becomes symlink')
227 _('Renamed/copied largefile %s becomes symlink')
238 % f)
228 % f)
239 islfile = False
229 islfile = False
240 if islfile:
230 if islfile:
241 lfiles.add(f)
231 lfiles.add(f)
242 else:
232 else:
243 normalfiles.add(f)
233 normalfiles.add(f)
244
234
245 if f in lfiles:
235 if f in lfiles:
246 dstfiles.append(lfutil.standin(f))
236 dstfiles.append(lfutil.standin(f))
247 # lfile in manifest if it has not been removed/renamed
237 # lfile in manifest if it has not been removed/renamed
248 if f in ctx.manifest():
238 if f in ctx.manifest():
249 if 'l' in ctx.filectx(f).flags():
239 if 'l' in ctx.filectx(f).flags():
250 if renamed and renamed[0] in lfiles:
240 if renamed and renamed[0] in lfiles:
251 raise util.Abort(_('largefile %s becomes symlink') % f)
241 raise util.Abort(_('largefile %s becomes symlink') % f)
252
242
253 # lfile was modified, update standins
243 # lfile was modified, update standins
254 fullpath = rdst.wjoin(f)
244 fullpath = rdst.wjoin(f)
255 lfutil.createdir(os.path.dirname(fullpath))
245 lfutil.createdir(os.path.dirname(fullpath))
256 m = util.sha1('')
246 m = util.sha1('')
257 m.update(ctx[f].data())
247 m.update(ctx[f].data())
258 hash = m.hexdigest()
248 hash = m.hexdigest()
259 if f not in lfiletohash or lfiletohash[f] != hash:
249 if f not in lfiletohash or lfiletohash[f] != hash:
260 try:
250 try:
261 fd = open(fullpath, 'wb')
251 fd = open(fullpath, 'wb')
262 fd.write(ctx[f].data())
252 fd.write(ctx[f].data())
263 finally:
253 finally:
264 if fd:
254 if fd:
265 fd.close()
255 fd.close()
266 executable = 'x' in ctx[f].flags()
256 executable = 'x' in ctx[f].flags()
267 os.chmod(fullpath, lfutil.getmode(executable))
257 os.chmod(fullpath, lfutil.getmode(executable))
268 lfutil.writestandin(rdst, lfutil.standin(f), hash,
258 lfutil.writestandin(rdst, lfutil.standin(f), hash,
269 executable)
259 executable)
270 lfiletohash[f] = hash
260 lfiletohash[f] = hash
271 else:
261 else:
272 # normal file
262 # normal file
273 dstfiles.append(f)
263 dstfiles.append(f)
274
264
275 def getfilectx(repo, memctx, f):
265 def getfilectx(repo, memctx, f):
276 if lfutil.isstandin(f):
266 if lfutil.isstandin(f):
277 # if the file isn't in the manifest then it was removed
267 # if the file isn't in the manifest then it was removed
278 # or renamed, raise IOError to indicate this
268 # or renamed, raise IOError to indicate this
279 srcfname = lfutil.splitstandin(f)
269 srcfname = lfutil.splitstandin(f)
280 try:
270 try:
281 fctx = ctx.filectx(srcfname)
271 fctx = ctx.filectx(srcfname)
282 except error.LookupError:
272 except error.LookupError:
283 raise IOError()
273 raise IOError()
284 renamed = fctx.renamed()
274 renamed = fctx.renamed()
285 if renamed:
275 if renamed:
286 # standin is always a lfile because lfileness
276 # standin is always a lfile because lfileness
287 # doesn't change after rename or copy
277 # doesn't change after rename or copy
288 renamed = lfutil.standin(renamed[0])
278 renamed = lfutil.standin(renamed[0])
289
279
290 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
280 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
291 fctx.flags(), 'x' in fctx.flags(), renamed)
281 fctx.flags(), 'x' in fctx.flags(), renamed)
292 else:
282 else:
293 try:
283 try:
294 fctx = ctx.filectx(f)
284 fctx = ctx.filectx(f)
295 except error.LookupError:
285 except error.LookupError:
296 raise IOError()
286 raise IOError()
297 renamed = fctx.renamed()
287 renamed = fctx.renamed()
298 if renamed:
288 if renamed:
299 renamed = renamed[0]
289 renamed = renamed[0]
300
290
301 data = fctx.data()
291 data = fctx.data()
302 if f == '.hgtags':
292 if f == '.hgtags':
303 newdata = []
293 newdata = []
304 for line in data.splitlines():
294 for line in data.splitlines():
305 id, name = line.split(' ', 1)
295 id, name = line.split(' ', 1)
306 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
296 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
307 name))
297 name))
308 data = ''.join(newdata)
298 data = ''.join(newdata)
309 return context.memfilectx(f, data, 'l' in fctx.flags(),
299 return context.memfilectx(f, data, 'l' in fctx.flags(),
310 'x' in fctx.flags(), renamed)
300 'x' in fctx.flags(), renamed)
311
301
312 # Commit
302 # Commit
313 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
303 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
314 getfilectx, ctx.user(), ctx.date(), ctx.extra())
304 getfilectx, ctx.user(), ctx.date(), ctx.extra())
315 ret = rdst.commitctx(mctx)
305 ret = rdst.commitctx(mctx)
316 rdst.dirstate.setparents(ret)
306 rdst.dirstate.setparents(ret)
317 revmap[ctx.node()] = rdst.changelog.tip()
307 revmap[ctx.node()] = rdst.changelog.tip()
318
308
319 def _islfile(file, ctx, matcher, size):
309 def _islfile(file, ctx, matcher, size):
320 '''
310 '''
321 A file is a lfile if it matches a pattern or is over
311 A file is a lfile if it matches a pattern or is over
322 the given size.
312 the given size.
323 '''
313 '''
324 # Never store hgtags or hgignore as lfiles
314 # Never store hgtags or hgignore as lfiles
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
315 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
326 return False
316 return False
327 if matcher and matcher(file):
317 if matcher and matcher(file):
328 return True
318 return True
329 try:
319 try:
330 return ctx.filectx(file).size() >= size * 1024 * 1024
320 return ctx.filectx(file).size() >= size * 1024 * 1024
331 except error.LookupError:
321 except error.LookupError:
332 return False
322 return False
333
323
334 def uploadlfiles(ui, rsrc, rdst, files):
324 def uploadlfiles(ui, rsrc, rdst, files):
335 '''upload largefiles to the central store'''
325 '''upload largefiles to the central store'''
336
326
337 # Don't upload locally. All largefiles are in the system wide cache
327 # Don't upload locally. All largefiles are in the system wide cache
338 # so the other repo can just get them from there.
328 # so the other repo can just get them from there.
339 if not files or rdst.local():
329 if not files or rdst.local():
340 return
330 return
341
331
342 store = basestore._openstore(rsrc, rdst, put=True)
332 store = basestore._openstore(rsrc, rdst, put=True)
343
333
344 at = 0
334 at = 0
345 files = filter(lambda h: not store.exists(h), files)
335 files = filter(lambda h: not store.exists(h), files)
346 for hash in files:
336 for hash in files:
347 ui.progress(_('uploading largefiles'), at, unit='largefile',
337 ui.progress(_('uploading largefiles'), at, unit='largefile',
348 total=len(files))
338 total=len(files))
349 source = lfutil.findfile(rsrc, hash)
339 source = lfutil.findfile(rsrc, hash)
350 if not source:
340 if not source:
351 raise util.Abort(_('Missing largefile %s needs to be uploaded')
341 raise util.Abort(_('Missing largefile %s needs to be uploaded')
352 % hash)
342 % hash)
353 # XXX check for errors here
343 # XXX check for errors here
354 store.put(source, hash)
344 store.put(source, hash)
355 at += 1
345 at += 1
356 ui.progress(_('uploading largefiles'), None)
346 ui.progress(_('uploading largefiles'), None)
357
347
358 def verifylfiles(ui, repo, all=False, contents=False):
348 def verifylfiles(ui, repo, all=False, contents=False):
359 '''Verify that every big file revision in the current changeset
349 '''Verify that every big file revision in the current changeset
360 exists in the central store. With --contents, also verify that
350 exists in the central store. With --contents, also verify that
361 the contents of each big file revision are correct (SHA-1 hash
351 the contents of each big file revision are correct (SHA-1 hash
362 matches the revision ID). With --all, check every changeset in
352 matches the revision ID). With --all, check every changeset in
363 this repository.'''
353 this repository.'''
364 if all:
354 if all:
365 # Pass a list to the function rather than an iterator because we know a
355 # Pass a list to the function rather than an iterator because we know a
366 # list will work.
356 # list will work.
367 revs = range(len(repo))
357 revs = range(len(repo))
368 else:
358 else:
369 revs = ['.']
359 revs = ['.']
370
360
371 store = basestore._openstore(repo)
361 store = basestore._openstore(repo)
372 return store.verify(revs, contents=contents)
362 return store.verify(revs, contents=contents)
373
363
374 def cachelfiles(ui, repo, node):
364 def cachelfiles(ui, repo, node):
375 '''cachelfiles ensures that all largefiles needed by the specified revision
365 '''cachelfiles ensures that all largefiles needed by the specified revision
376 are present in the repository's largefile cache.
366 are present in the repository's largefile cache.
377
367
378 returns a tuple (cached, missing). cached is the list of files downloaded
368 returns a tuple (cached, missing). cached is the list of files downloaded
379 by this operation; missing is the list of files that were needed but could
369 by this operation; missing is the list of files that were needed but could
380 not be found.'''
370 not be found.'''
381 lfiles = lfutil.listlfiles(repo, node)
371 lfiles = lfutil.listlfiles(repo, node)
382 toget = []
372 toget = []
383
373
384 for lfile in lfiles:
374 for lfile in lfiles:
385 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
375 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
386 # if it exists and its hash matches, it might have been locally
376 # if it exists and its hash matches, it might have been locally
387 # modified before updating and the user chose 'local'. in this case,
377 # modified before updating and the user chose 'local'. in this case,
388 # it will not be in any store, so don't look for it.
378 # it will not be in any store, so don't look for it.
389 if (not os.path.exists(repo.wjoin(lfile)) \
379 if (not os.path.exists(repo.wjoin(lfile)) \
390 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
380 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
391 not lfutil.findfile(repo, expectedhash):
381 not lfutil.findfile(repo, expectedhash):
392 toget.append((lfile, expectedhash))
382 toget.append((lfile, expectedhash))
393
383
394 if toget:
384 if toget:
395 store = basestore._openstore(repo)
385 store = basestore._openstore(repo)
396 ret = store.get(toget)
386 ret = store.get(toget)
397 return ret
387 return ret
398
388
399 return ([], [])
389 return ([], [])
400
390
401 def updatelfiles(ui, repo, filelist=None, printmessage=True):
391 def updatelfiles(ui, repo, filelist=None, printmessage=True):
402 wlock = repo.wlock()
392 wlock = repo.wlock()
403 try:
393 try:
404 lfdirstate = lfutil.openlfdirstate(ui, repo)
394 lfdirstate = lfutil.openlfdirstate(ui, repo)
405 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
395 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
406
396
407 if filelist is not None:
397 if filelist is not None:
408 lfiles = [f for f in lfiles if f in filelist]
398 lfiles = [f for f in lfiles if f in filelist]
409
399
410 printed = False
400 printed = False
411 if printmessage and lfiles:
401 if printmessage and lfiles:
412 ui.status(_('getting changed largefiles\n'))
402 ui.status(_('getting changed largefiles\n'))
413 printed = True
403 printed = True
414 cachelfiles(ui, repo, '.')
404 cachelfiles(ui, repo, '.')
415
405
416 updated, removed = 0, 0
406 updated, removed = 0, 0
417 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
407 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
418 # increment the appropriate counter according to _updatelfile's
408 # increment the appropriate counter according to _updatelfile's
419 # return value
409 # return value
420 updated += i > 0 and i or 0
410 updated += i > 0 and i or 0
421 removed -= i < 0 and i or 0
411 removed -= i < 0 and i or 0
422 if printmessage and (removed or updated) and not printed:
412 if printmessage and (removed or updated) and not printed:
423 ui.status(_('getting changed largefiles\n'))
413 ui.status(_('getting changed largefiles\n'))
424 printed = True
414 printed = True
425
415
426 lfdirstate.write()
416 lfdirstate.write()
427 if printed and printmessage:
417 if printed and printmessage:
428 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
418 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
429 removed))
419 removed))
430 finally:
420 finally:
431 wlock.release()
421 wlock.release()
432
422
433 def _updatelfile(repo, lfdirstate, lfile):
423 def _updatelfile(repo, lfdirstate, lfile):
434 '''updates a single largefile and copies the state of its standin from
424 '''updates a single largefile and copies the state of its standin from
435 the repository's dirstate to its state in the lfdirstate.
425 the repository's dirstate to its state in the lfdirstate.
436
426
437 returns 1 if the file was modified, -1 if the file was removed, 0 if the
427 returns 1 if the file was modified, -1 if the file was removed, 0 if the
438 file was unchanged, and None if the needed largefile was missing from the
428 file was unchanged, and None if the needed largefile was missing from the
439 cache.'''
429 cache.'''
440 ret = 0
430 ret = 0
441 abslfile = repo.wjoin(lfile)
431 abslfile = repo.wjoin(lfile)
442 absstandin = repo.wjoin(lfutil.standin(lfile))
432 absstandin = repo.wjoin(lfutil.standin(lfile))
443 if os.path.exists(absstandin):
433 if os.path.exists(absstandin):
444 if os.path.exists(absstandin+'.orig'):
434 if os.path.exists(absstandin+'.orig'):
445 shutil.copyfile(abslfile, abslfile+'.orig')
435 shutil.copyfile(abslfile, abslfile+'.orig')
446 expecthash = lfutil.readstandin(repo, lfile)
436 expecthash = lfutil.readstandin(repo, lfile)
447 if expecthash != '' and \
437 if expecthash != '' and \
448 (not os.path.exists(abslfile) or \
438 (not os.path.exists(abslfile) or \
449 expecthash != lfutil.hashfile(abslfile)):
439 expecthash != lfutil.hashfile(abslfile)):
450 if not lfutil.copyfromcache(repo, expecthash, lfile):
440 if not lfutil.copyfromcache(repo, expecthash, lfile):
451 return None # don't try to set the mode or update the dirstate
441 return None # don't try to set the mode or update the dirstate
452 ret = 1
442 ret = 1
453 mode = os.stat(absstandin).st_mode
443 mode = os.stat(absstandin).st_mode
454 if mode != os.stat(abslfile).st_mode:
444 if mode != os.stat(abslfile).st_mode:
455 os.chmod(abslfile, mode)
445 os.chmod(abslfile, mode)
456 ret = 1
446 ret = 1
457 else:
447 else:
458 if os.path.exists(abslfile):
448 if os.path.exists(abslfile):
459 os.unlink(abslfile)
449 os.unlink(abslfile)
460 ret = -1
450 ret = -1
461 state = repo.dirstate[lfutil.standin(lfile)]
451 state = repo.dirstate[lfutil.standin(lfile)]
462 if state == 'n':
452 if state == 'n':
463 lfdirstate.normal(lfile)
453 lfdirstate.normal(lfile)
464 elif state == 'r':
454 elif state == 'r':
465 lfdirstate.remove(lfile)
455 lfdirstate.remove(lfile)
466 elif state == 'a':
456 elif state == 'a':
467 lfdirstate.add(lfile)
457 lfdirstate.add(lfile)
468 elif state == '?':
458 elif state == '?':
469 lfdirstate.drop(lfile)
459 lfdirstate.drop(lfile)
470 return ret
460 return ret
471
461
472 # -- hg commands declarations ------------------------------------------------
462 # -- hg commands declarations ------------------------------------------------
473
463
474
464
475 cmdtable = {
465 cmdtable = {
476 'lfconvert': (lfconvert,
466 'lfconvert': (lfconvert,
477 [('s', 'size', 0, 'All files over this size (in megabytes) '
467 [('s', 'size', 0, 'All files over this size (in megabytes) '
478 'will be considered largefiles. This can also be specified '
468 'will be considered largefiles. This can also be specified '
479 'in your hgrc as [largefiles].size.'),
469 'in your hgrc as [largefiles].size.'),
480 ('','tonormal',False,
470 ('','tonormal',False,
481 'Convert from a largefiles repo to a normal repo')],
471 'Convert from a largefiles repo to a normal repo')],
482 _('hg lfconvert SOURCE DEST [FILE ...]')),
472 _('hg lfconvert SOURCE DEST [FILE ...]')),
483 }
473 }
@@ -1,431 +1,445 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import shutil
13 import shutil
14 import stat
14 import stat
15 import hashlib
15 import hashlib
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repo_add(repo, list):
29 def repo_add(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def repo_remove(repo, list, unlink=False):
33 def repo_remove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repo_forget(repo, list):
49 def repo_forget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
56 remote, force=force)
57 return repo.changelog.findmissing(common)
57 return repo.changelog.findmissing(common)
58
58
59 # -- Private worker functions ------------------------------------------
59 # -- Private worker functions ------------------------------------------
60
60
61 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
63 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'size', default=default)
65 if lfsize:
66 try:
67 lfsize = int(lfsize)
68 except ValueError:
69 raise util.Abort(_('largefiles: size must be an integer, was %s\n')
70 % lfsize)
71 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
74
61 def link(src, dest):
75 def link(src, dest):
62 try:
76 try:
63 util.oslink(src, dest)
77 util.oslink(src, dest)
64 except OSError:
78 except OSError:
65 # If hardlinks fail fall back on copy
79 # If hardlinks fail fall back on copy
66 shutil.copyfile(src, dest)
80 shutil.copyfile(src, dest)
67 os.chmod(dest, os.stat(src).st_mode)
81 os.chmod(dest, os.stat(src).st_mode)
68
82
69 def systemcachepath(ui, hash):
83 def systemcachepath(ui, hash):
70 path = ui.config(longname, 'systemcache', None)
84 path = ui.config(longname, 'systemcache', None)
71 if path:
85 if path:
72 path = os.path.join(path, hash)
86 path = os.path.join(path, hash)
73 else:
87 else:
74 if os.name == 'nt':
88 if os.name == 'nt':
75 path = os.path.join(os.getenv('LOCALAPPDATA') or \
89 path = os.path.join(os.getenv('LOCALAPPDATA') or \
76 os.getenv('APPDATA'), longname, hash)
90 os.getenv('APPDATA'), longname, hash)
77 elif os.name == 'posix':
91 elif os.name == 'posix':
78 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
79 else:
93 else:
80 raise util.Abort(_('Unknown operating system: %s\n') % os.name)
94 raise util.Abort(_('Unknown operating system: %s\n') % os.name)
81 return path
95 return path
82
96
83 def insystemcache(ui, hash):
97 def insystemcache(ui, hash):
84 return os.path.exists(systemcachepath(ui, hash))
98 return os.path.exists(systemcachepath(ui, hash))
85
99
86 def findfile(repo, hash):
100 def findfile(repo, hash):
87 if incache(repo, hash):
101 if incache(repo, hash):
88 repo.ui.note(_('Found %s in cache\n') % hash)
102 repo.ui.note(_('Found %s in cache\n') % hash)
89 return cachepath(repo, hash)
103 return cachepath(repo, hash)
90 if insystemcache(repo.ui, hash):
104 if insystemcache(repo.ui, hash):
91 repo.ui.note(_('Found %s in system cache\n') % hash)
105 repo.ui.note(_('Found %s in system cache\n') % hash)
92 return systemcachepath(repo.ui, hash)
106 return systemcachepath(repo.ui, hash)
93 return None
107 return None
94
108
95 class largefiles_dirstate(dirstate.dirstate):
109 class largefiles_dirstate(dirstate.dirstate):
96 def __getitem__(self, key):
110 def __getitem__(self, key):
97 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
98 def normal(self, f):
112 def normal(self, f):
99 return super(largefiles_dirstate, self).normal(unixpath(f))
113 return super(largefiles_dirstate, self).normal(unixpath(f))
100 def remove(self, f):
114 def remove(self, f):
101 return super(largefiles_dirstate, self).remove(unixpath(f))
115 return super(largefiles_dirstate, self).remove(unixpath(f))
102 def add(self, f):
116 def add(self, f):
103 return super(largefiles_dirstate, self).add(unixpath(f))
117 return super(largefiles_dirstate, self).add(unixpath(f))
104 def drop(self, f):
118 def drop(self, f):
105 return super(largefiles_dirstate, self).drop(unixpath(f))
119 return super(largefiles_dirstate, self).drop(unixpath(f))
106 def forget(self, f):
120 def forget(self, f):
107 return super(largefiles_dirstate, self).forget(unixpath(f))
121 return super(largefiles_dirstate, self).forget(unixpath(f))
108
122
109 def openlfdirstate(ui, repo):
123 def openlfdirstate(ui, repo):
110 '''
124 '''
111 Return a dirstate object that tracks big files: i.e. its root is the
125 Return a dirstate object that tracks big files: i.e. its root is the
112 repo root, but it is saved in .hg/largefiles/dirstate.
126 repo root, but it is saved in .hg/largefiles/dirstate.
113 '''
127 '''
114 admin = repo.join(longname)
128 admin = repo.join(longname)
115 opener = scmutil.opener(admin)
129 opener = scmutil.opener(admin)
116 if util.safehasattr(repo.dirstate, '_validate'):
130 if util.safehasattr(repo.dirstate, '_validate'):
117 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
118 repo.dirstate._validate)
132 repo.dirstate._validate)
119 else:
133 else:
120 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
121
135
122 # If the largefiles dirstate does not exist, populate and create it. This
136 # If the largefiles dirstate does not exist, populate and create it. This
123 # ensures that we create it on the first meaningful largefiles operation in
137 # ensures that we create it on the first meaningful largefiles operation in
124 # a new clone. It also gives us an easy way to forcibly rebuild largefiles
138 # a new clone. It also gives us an easy way to forcibly rebuild largefiles
125 # state:
139 # state:
126 # rm .hg/largefiles/dirstate && hg status
140 # rm .hg/largefiles/dirstate && hg status
127 # Or even, if things are really messed up:
141 # Or even, if things are really messed up:
128 # rm -rf .hg/largefiles && hg status
142 # rm -rf .hg/largefiles && hg status
129 if not os.path.exists(os.path.join(admin, 'dirstate')):
143 if not os.path.exists(os.path.join(admin, 'dirstate')):
130 util.makedirs(admin)
144 util.makedirs(admin)
131 matcher = getstandinmatcher(repo)
145 matcher = getstandinmatcher(repo)
132 for standin in dirstate_walk(repo.dirstate, matcher):
146 for standin in dirstate_walk(repo.dirstate, matcher):
133 lfile = splitstandin(standin)
147 lfile = splitstandin(standin)
134 hash = readstandin(repo, lfile)
148 hash = readstandin(repo, lfile)
135 lfdirstate.normallookup(lfile)
149 lfdirstate.normallookup(lfile)
136 try:
150 try:
137 if hash == hashfile(lfile):
151 if hash == hashfile(lfile):
138 lfdirstate.normal(lfile)
152 lfdirstate.normal(lfile)
139 except IOError, err:
153 except IOError, err:
140 if err.errno != errno.ENOENT:
154 if err.errno != errno.ENOENT:
141 raise
155 raise
142
156
143 lfdirstate.write()
157 lfdirstate.write()
144
158
145 return lfdirstate
159 return lfdirstate
146
160
147 def lfdirstate_status(lfdirstate, repo, rev):
161 def lfdirstate_status(lfdirstate, repo, rev):
148 wlock = repo.wlock()
162 wlock = repo.wlock()
149 try:
163 try:
150 match = match_.always(repo.root, repo.getcwd())
164 match = match_.always(repo.root, repo.getcwd())
151 s = lfdirstate.status(match, [], False, False, False)
165 s = lfdirstate.status(match, [], False, False, False)
152 unsure, modified, added, removed, missing, unknown, ignored, clean = s
166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
153 for lfile in unsure:
167 for lfile in unsure:
154 if repo[rev][standin(lfile)].data().strip() != \
168 if repo[rev][standin(lfile)].data().strip() != \
155 hashfile(repo.wjoin(lfile)):
169 hashfile(repo.wjoin(lfile)):
156 modified.append(lfile)
170 modified.append(lfile)
157 else:
171 else:
158 clean.append(lfile)
172 clean.append(lfile)
159 lfdirstate.normal(lfile)
173 lfdirstate.normal(lfile)
160 lfdirstate.write()
174 lfdirstate.write()
161 finally:
175 finally:
162 wlock.release()
176 wlock.release()
163 return (modified, added, removed, missing, unknown, ignored, clean)
177 return (modified, added, removed, missing, unknown, ignored, clean)
164
178
165 def listlfiles(repo, rev=None, matcher=None):
179 def listlfiles(repo, rev=None, matcher=None):
166 '''list largefiles in the working copy or specified changeset'''
180 '''list largefiles in the working copy or specified changeset'''
167
181
168 if matcher is None:
182 if matcher is None:
169 matcher = getstandinmatcher(repo)
183 matcher = getstandinmatcher(repo)
170
184
171 # ignore unknown files in working directory
185 # ignore unknown files in working directory
172 return [splitstandin(f) for f in repo[rev].walk(matcher) \
186 return [splitstandin(f) for f in repo[rev].walk(matcher) \
173 if rev is not None or repo.dirstate[f] != '?']
187 if rev is not None or repo.dirstate[f] != '?']
174
188
175 def incache(repo, hash):
189 def incache(repo, hash):
176 return os.path.exists(cachepath(repo, hash))
190 return os.path.exists(cachepath(repo, hash))
177
191
178 def createdir(dir):
192 def createdir(dir):
179 if not os.path.exists(dir):
193 if not os.path.exists(dir):
180 os.makedirs(dir)
194 os.makedirs(dir)
181
195
182 def cachepath(repo, hash):
196 def cachepath(repo, hash):
183 return repo.join(os.path.join(longname, hash))
197 return repo.join(os.path.join(longname, hash))
184
198
185 def copyfromcache(repo, hash, filename):
199 def copyfromcache(repo, hash, filename):
186 '''copyfromcache copies the specified largefile from the repo or system
200 '''copyfromcache copies the specified largefile from the repo or system
187 cache to the specified location in the repository. It will not throw an
201 cache to the specified location in the repository. It will not throw an
188 exception on failure, as it is meant to be called only after ensuring that
202 exception on failure, as it is meant to be called only after ensuring that
189 the needed largefile exists in the cache.'''
203 the needed largefile exists in the cache.'''
190 path = findfile(repo, hash)
204 path = findfile(repo, hash)
191 if path is None:
205 if path is None:
192 return False
206 return False
193 util.makedirs(os.path.dirname(repo.wjoin(filename)))
207 util.makedirs(os.path.dirname(repo.wjoin(filename)))
194 shutil.copy(path, repo.wjoin(filename))
208 shutil.copy(path, repo.wjoin(filename))
195 return True
209 return True
196
210
197 def copytocache(repo, rev, file, uploaded=False):
211 def copytocache(repo, rev, file, uploaded=False):
198 hash = readstandin(repo, file)
212 hash = readstandin(repo, file)
199 if incache(repo, hash):
213 if incache(repo, hash):
200 return
214 return
201 copytocacheabsolute(repo, repo.wjoin(file), hash)
215 copytocacheabsolute(repo, repo.wjoin(file), hash)
202
216
203 def copytocacheabsolute(repo, file, hash):
217 def copytocacheabsolute(repo, file, hash):
204 createdir(os.path.dirname(cachepath(repo, hash)))
218 createdir(os.path.dirname(cachepath(repo, hash)))
205 if insystemcache(repo.ui, hash):
219 if insystemcache(repo.ui, hash):
206 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
220 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
207 else:
221 else:
208 shutil.copyfile(file, cachepath(repo, hash))
222 shutil.copyfile(file, cachepath(repo, hash))
209 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
223 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
210 linktosystemcache(repo, hash)
224 linktosystemcache(repo, hash)
211
225
212 def linktosystemcache(repo, hash):
226 def linktosystemcache(repo, hash):
213 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
227 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
214 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
228 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
215
229
216 def getstandinmatcher(repo, pats=[], opts={}):
230 def getstandinmatcher(repo, pats=[], opts={}):
217 '''Return a match object that applies pats to the standin directory'''
231 '''Return a match object that applies pats to the standin directory'''
218 standindir = repo.pathto(shortname)
232 standindir = repo.pathto(shortname)
219 if pats:
233 if pats:
220 # patterns supplied: search standin directory relative to current dir
234 # patterns supplied: search standin directory relative to current dir
221 cwd = repo.getcwd()
235 cwd = repo.getcwd()
222 if os.path.isabs(cwd):
236 if os.path.isabs(cwd):
223 # cwd is an absolute path for hg -R <reponame>
237 # cwd is an absolute path for hg -R <reponame>
224 # work relative to the repository root in this case
238 # work relative to the repository root in this case
225 cwd = ''
239 cwd = ''
226 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
240 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
227 elif os.path.isdir(standindir):
241 elif os.path.isdir(standindir):
228 # no patterns: relative to repo root
242 # no patterns: relative to repo root
229 pats = [standindir]
243 pats = [standindir]
230 else:
244 else:
231 # no patterns and no standin dir: return matcher that matches nothing
245 # no patterns and no standin dir: return matcher that matches nothing
232 match = match_.match(repo.root, None, [], exact=True)
246 match = match_.match(repo.root, None, [], exact=True)
233 match.matchfn = lambda f: False
247 match.matchfn = lambda f: False
234 return match
248 return match
235 return getmatcher(repo, pats, opts, showbad=False)
249 return getmatcher(repo, pats, opts, showbad=False)
236
250
237 def getmatcher(repo, pats=[], opts={}, showbad=True):
251 def getmatcher(repo, pats=[], opts={}, showbad=True):
238 '''Wrapper around scmutil.match() that adds showbad: if false, neuter
252 '''Wrapper around scmutil.match() that adds showbad: if false, neuter
239 the match object\'s bad() method so it does not print any warnings
253 the match object\'s bad() method so it does not print any warnings
240 about missing files or directories.'''
254 about missing files or directories.'''
241 match = scmutil.match(repo[None], pats, opts)
255 match = scmutil.match(repo[None], pats, opts)
242
256
243 if not showbad:
257 if not showbad:
244 match.bad = lambda f, msg: None
258 match.bad = lambda f, msg: None
245 return match
259 return match
246
260
247 def composestandinmatcher(repo, rmatcher):
261 def composestandinmatcher(repo, rmatcher):
248 '''Return a matcher that accepts standins corresponding to the files
262 '''Return a matcher that accepts standins corresponding to the files
249 accepted by rmatcher. Pass the list of files in the matcher as the
263 accepted by rmatcher. Pass the list of files in the matcher as the
250 paths specified by the user.'''
264 paths specified by the user.'''
251 smatcher = getstandinmatcher(repo, rmatcher.files())
265 smatcher = getstandinmatcher(repo, rmatcher.files())
252 isstandin = smatcher.matchfn
266 isstandin = smatcher.matchfn
253 def composed_matchfn(f):
267 def composed_matchfn(f):
254 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
268 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
255 smatcher.matchfn = composed_matchfn
269 smatcher.matchfn = composed_matchfn
256
270
257 return smatcher
271 return smatcher
258
272
259 def standin(filename):
273 def standin(filename):
260 '''Return the repo-relative path to the standin for the specified big
274 '''Return the repo-relative path to the standin for the specified big
261 file.'''
275 file.'''
262 # Notes:
276 # Notes:
263 # 1) Most callers want an absolute path, but _create_standin() needs
277 # 1) Most callers want an absolute path, but _create_standin() needs
264 # it repo-relative so lfadd() can pass it to repo_add(). So leave
278 # it repo-relative so lfadd() can pass it to repo_add(). So leave
265 # it up to the caller to use repo.wjoin() to get an absolute path.
279 # it up to the caller to use repo.wjoin() to get an absolute path.
266 # 2) Join with '/' because that's what dirstate always uses, even on
280 # 2) Join with '/' because that's what dirstate always uses, even on
267 # Windows. Change existing separator to '/' first in case we are
281 # Windows. Change existing separator to '/' first in case we are
268 # passed filenames from an external source (like the command line).
282 # passed filenames from an external source (like the command line).
269 return shortname + '/' + filename.replace(os.sep, '/')
283 return shortname + '/' + filename.replace(os.sep, '/')
270
284
271 def isstandin(filename):
285 def isstandin(filename):
272 '''Return true if filename is a big file standin. filename must
286 '''Return true if filename is a big file standin. filename must
273 be in Mercurial\'s internal form (slash-separated).'''
287 be in Mercurial\'s internal form (slash-separated).'''
274 return filename.startswith(shortname + '/')
288 return filename.startswith(shortname + '/')
275
289
276 def splitstandin(filename):
290 def splitstandin(filename):
277 # Split on / because that's what dirstate always uses, even on Windows.
291 # Split on / because that's what dirstate always uses, even on Windows.
278 # Change local separator to / first just in case we are passed filenames
292 # Change local separator to / first just in case we are passed filenames
279 # from an external source (like the command line).
293 # from an external source (like the command line).
280 bits = filename.replace(os.sep, '/').split('/', 1)
294 bits = filename.replace(os.sep, '/').split('/', 1)
281 if len(bits) == 2 and bits[0] == shortname:
295 if len(bits) == 2 and bits[0] == shortname:
282 return bits[1]
296 return bits[1]
283 else:
297 else:
284 return None
298 return None
285
299
286 def updatestandin(repo, standin):
300 def updatestandin(repo, standin):
287 file = repo.wjoin(splitstandin(standin))
301 file = repo.wjoin(splitstandin(standin))
288 if os.path.exists(file):
302 if os.path.exists(file):
289 hash = hashfile(file)
303 hash = hashfile(file)
290 executable = getexecutable(file)
304 executable = getexecutable(file)
291 writestandin(repo, standin, hash, executable)
305 writestandin(repo, standin, hash, executable)
292
306
293 def readstandin(repo, filename, node=None):
307 def readstandin(repo, filename, node=None):
294 '''read hex hash from standin for filename at given node, or working
308 '''read hex hash from standin for filename at given node, or working
295 directory if no node is given'''
309 directory if no node is given'''
296 return repo[node][standin(filename)].data().strip()
310 return repo[node][standin(filename)].data().strip()
297
311
298 def writestandin(repo, standin, hash, executable):
312 def writestandin(repo, standin, hash, executable):
299 '''write hhash to <repo.root>/<standin>'''
313 '''write hhash to <repo.root>/<standin>'''
300 writehash(hash, repo.wjoin(standin), executable)
314 writehash(hash, repo.wjoin(standin), executable)
301
315
302 def copyandhash(instream, outfile):
316 def copyandhash(instream, outfile):
303 '''Read bytes from instream (iterable) and write them to outfile,
317 '''Read bytes from instream (iterable) and write them to outfile,
304 computing the SHA-1 hash of the data along the way. Close outfile
318 computing the SHA-1 hash of the data along the way. Close outfile
305 when done and return the binary hash.'''
319 when done and return the binary hash.'''
306 hasher = util.sha1('')
320 hasher = util.sha1('')
307 for data in instream:
321 for data in instream:
308 hasher.update(data)
322 hasher.update(data)
309 outfile.write(data)
323 outfile.write(data)
310
324
311 # Blecch: closing a file that somebody else opened is rude and
325 # Blecch: closing a file that somebody else opened is rude and
312 # wrong. But it's so darn convenient and practical! After all,
326 # wrong. But it's so darn convenient and practical! After all,
313 # outfile was opened just to copy and hash.
327 # outfile was opened just to copy and hash.
314 outfile.close()
328 outfile.close()
315
329
316 return hasher.digest()
330 return hasher.digest()
317
331
318 def hashrepofile(repo, file):
332 def hashrepofile(repo, file):
319 return hashfile(repo.wjoin(file))
333 return hashfile(repo.wjoin(file))
320
334
321 def hashfile(file):
335 def hashfile(file):
322 if not os.path.exists(file):
336 if not os.path.exists(file):
323 return ''
337 return ''
324 hasher = util.sha1('')
338 hasher = util.sha1('')
325 fd = open(file, 'rb')
339 fd = open(file, 'rb')
326 for data in blockstream(fd):
340 for data in blockstream(fd):
327 hasher.update(data)
341 hasher.update(data)
328 fd.close()
342 fd.close()
329 return hasher.hexdigest()
343 return hasher.hexdigest()
330
344
331 class limitreader(object):
345 class limitreader(object):
332 def __init__(self, f, limit):
346 def __init__(self, f, limit):
333 self.f = f
347 self.f = f
334 self.limit = limit
348 self.limit = limit
335
349
336 def read(self, length):
350 def read(self, length):
337 if self.limit == 0:
351 if self.limit == 0:
338 return ''
352 return ''
339 length = length > self.limit and self.limit or length
353 length = length > self.limit and self.limit or length
340 self.limit -= length
354 self.limit -= length
341 return self.f.read(length)
355 return self.f.read(length)
342
356
343 def close(self):
357 def close(self):
344 pass
358 pass
345
359
346 def blockstream(infile, blocksize=128 * 1024):
360 def blockstream(infile, blocksize=128 * 1024):
347 """Generator that yields blocks of data from infile and closes infile."""
361 """Generator that yields blocks of data from infile and closes infile."""
348 while True:
362 while True:
349 data = infile.read(blocksize)
363 data = infile.read(blocksize)
350 if not data:
364 if not data:
351 break
365 break
352 yield data
366 yield data
353 # Same blecch as above.
367 # Same blecch as above.
354 infile.close()
368 infile.close()
355
369
356 def readhash(filename):
370 def readhash(filename):
357 rfile = open(filename, 'rb')
371 rfile = open(filename, 'rb')
358 hash = rfile.read(40)
372 hash = rfile.read(40)
359 rfile.close()
373 rfile.close()
360 if len(hash) < 40:
374 if len(hash) < 40:
361 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
375 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
362 % (filename, len(hash)))
376 % (filename, len(hash)))
363 return hash
377 return hash
364
378
365 def writehash(hash, filename, executable):
379 def writehash(hash, filename, executable):
366 util.makedirs(os.path.dirname(filename))
380 util.makedirs(os.path.dirname(filename))
367 if os.path.exists(filename):
381 if os.path.exists(filename):
368 os.unlink(filename)
382 os.unlink(filename)
369 wfile = open(filename, 'wb')
383 wfile = open(filename, 'wb')
370
384
371 try:
385 try:
372 wfile.write(hash)
386 wfile.write(hash)
373 wfile.write('\n')
387 wfile.write('\n')
374 finally:
388 finally:
375 wfile.close()
389 wfile.close()
376 if os.path.exists(filename):
390 if os.path.exists(filename):
377 os.chmod(filename, getmode(executable))
391 os.chmod(filename, getmode(executable))
378
392
379 def getexecutable(filename):
393 def getexecutable(filename):
380 mode = os.stat(filename).st_mode
394 mode = os.stat(filename).st_mode
381 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
395 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
382 stat.S_IXOTH)
396 stat.S_IXOTH)
383
397
384 def getmode(executable):
398 def getmode(executable):
385 if executable:
399 if executable:
386 return 0755
400 return 0755
387 else:
401 else:
388 return 0644
402 return 0644
389
403
390 def urljoin(first, second, *arg):
404 def urljoin(first, second, *arg):
391 def join(left, right):
405 def join(left, right):
392 if not left.endswith('/'):
406 if not left.endswith('/'):
393 left += '/'
407 left += '/'
394 if right.startswith('/'):
408 if right.startswith('/'):
395 right = right[1:]
409 right = right[1:]
396 return left + right
410 return left + right
397
411
398 url = join(first, second)
412 url = join(first, second)
399 for a in arg:
413 for a in arg:
400 url = join(url, a)
414 url = join(url, a)
401 return url
415 return url
402
416
403 def hexsha1(data):
417 def hexsha1(data):
404 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
418 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
405 object data"""
419 object data"""
406 h = hashlib.sha1()
420 h = hashlib.sha1()
407 for chunk in util.filechunkiter(data):
421 for chunk in util.filechunkiter(data):
408 h.update(chunk)
422 h.update(chunk)
409 return h.hexdigest()
423 return h.hexdigest()
410
424
411 def httpsendfile(ui, filename):
425 def httpsendfile(ui, filename):
412 return httpconnection.httpsendfile(ui, filename, 'rb')
426 return httpconnection.httpsendfile(ui, filename, 'rb')
413
427
414 # Convert a path to a unix style path. This is used to give a
428 # Convert a path to a unix style path. This is used to give a
415 # canonical path to the lfdirstate.
429 # canonical path to the lfdirstate.
416 def unixpath(path):
430 def unixpath(path):
417 return os.path.normpath(path).replace(os.sep, '/')
431 return os.path.normpath(path).replace(os.sep, '/')
418
432
419 def islfilesrepo(repo):
433 def islfilesrepo(repo):
420 return ('largefiles' in repo.requirements and
434 return ('largefiles' in repo.requirements and
421 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
435 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
422
436
423 def any_(gen):
437 def any_(gen):
424 for x in gen:
438 for x in gen:
425 if x:
439 if x:
426 return True
440 return True
427 return False
441 return False
428
442
429 class storeprotonotcapable(BaseException):
443 class storeprotonotcapable(BaseException):
430 def __init__(self, storetypes):
444 def __init__(self, storetypes):
431 self.storetypes = storetypes
445 self.storetypes = storetypes
@@ -1,830 +1,823 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
15 archival, error, merge
15 archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19 import lfutil
19
20
20 try:
21 try:
21 from mercurial import scmutil
22 from mercurial import scmutil
22 except ImportError:
23 except ImportError:
23 pass
24 pass
24
25
25 import lfutil
26 import lfutil
26 import lfcommands
27 import lfcommands
27
28
28 def installnormalfilesmatchfn(manifest):
29 def installnormalfilesmatchfn(manifest):
29 '''overrides scmutil.match so that the matcher it returns will ignore all
30 '''overrides scmutil.match so that the matcher it returns will ignore all
30 largefiles'''
31 largefiles'''
31 oldmatch = None # for the closure
32 oldmatch = None # for the closure
32 def override_match(repo, pats=[], opts={}, globbed=False,
33 def override_match(repo, pats=[], opts={}, globbed=False,
33 default='relpath'):
34 default='relpath'):
34 match = oldmatch(repo, pats, opts, globbed, default)
35 match = oldmatch(repo, pats, opts, globbed, default)
35 m = copy.copy(match)
36 m = copy.copy(match)
36 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
37 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
37 manifest)
38 manifest)
38 m._files = filter(notlfile, m._files)
39 m._files = filter(notlfile, m._files)
39 m._fmap = set(m._files)
40 m._fmap = set(m._files)
40 orig_matchfn = m.matchfn
41 orig_matchfn = m.matchfn
41 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
42 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
42 return m
43 return m
43 oldmatch = installmatchfn(override_match)
44 oldmatch = installmatchfn(override_match)
44
45
45 def installmatchfn(f):
46 def installmatchfn(f):
46 oldmatch = scmutil.match
47 oldmatch = scmutil.match
47 setattr(f, 'oldmatch', oldmatch)
48 setattr(f, 'oldmatch', oldmatch)
48 scmutil.match = f
49 scmutil.match = f
49 return oldmatch
50 return oldmatch
50
51
51 def restorematchfn():
52 def restorematchfn():
52 '''restores scmutil.match to what it was before installnormalfilesmatchfn
53 '''restores scmutil.match to what it was before installnormalfilesmatchfn
53 was called. no-op if scmutil.match is its original function.
54 was called. no-op if scmutil.match is its original function.
54
55
55 Note that n calls to installnormalfilesmatchfn will require n calls to
56 Note that n calls to installnormalfilesmatchfn will require n calls to
56 restore matchfn to reverse'''
57 restore matchfn to reverse'''
57 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
58 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
58
59
59 # -- Wrappers: modify existing commands --------------------------------
60 # -- Wrappers: modify existing commands --------------------------------
60
61
61 # Add works by going through the files that the user wanted to add
62 # Add works by going through the files that the user wanted to add
62 # and checking if they should be added as lfiles. Then making a new
63 # and checking if they should be added as lfiles. Then making a new
63 # matcher which matches only the normal files and running the original
64 # matcher which matches only the normal files and running the original
64 # version of add.
65 # version of add.
65 def override_add(orig, ui, repo, *pats, **opts):
66 def override_add(orig, ui, repo, *pats, **opts):
66 large = opts.pop('large', None)
67 large = opts.pop('large', None)
67
68 lfsize = lfutil.getminsize(
68 lfsize = opts.pop('lfsize', None)
69 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
69 if not lfsize and lfutil.islfilesrepo(repo):
70 lfsize = ui.config(lfutil.longname, 'size', default='10')
71 if lfsize:
72 try:
73 lfsize = int(lfsize)
74 except ValueError:
75 raise util.Abort(_('largefiles: size must be an integer, was %s\n')
76 % lfsize)
77
70
78 lfmatcher = None
71 lfmatcher = None
79 if os.path.exists(repo.wjoin(lfutil.shortname)):
72 if os.path.exists(repo.wjoin(lfutil.shortname)):
80 lfpats = ui.config(lfutil.longname, 'patterns', default=())
73 lfpats = ui.config(lfutil.longname, 'patterns', default=())
81 if lfpats:
74 if lfpats:
82 lfpats = lfpats.split(' ')
75 lfpats = lfpats.split(' ')
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
76 lfmatcher = match_.match(repo.root, '', list(lfpats))
84
77
85 lfnames = []
78 lfnames = []
86 m = scmutil.match(repo[None], pats, opts)
79 m = scmutil.match(repo[None], pats, opts)
87 m.bad = lambda x, y: None
80 m.bad = lambda x, y: None
88 wctx = repo[None]
81 wctx = repo[None]
89 for f in repo.walk(m):
82 for f in repo.walk(m):
90 exact = m.exact(f)
83 exact = m.exact(f)
91 lfile = lfutil.standin(f) in wctx
84 lfile = lfutil.standin(f) in wctx
92 nfile = f in wctx
85 nfile = f in wctx
93 exists = lfile or nfile
86 exists = lfile or nfile
94
87
95 # Don't warn the user when they attempt to add a normal tracked file.
88 # Don't warn the user when they attempt to add a normal tracked file.
96 # The normal add code will do that for us.
89 # The normal add code will do that for us.
97 if exact and exists:
90 if exact and exists:
98 if lfile:
91 if lfile:
99 ui.warn(_('%s already a largefile\n') % f)
92 ui.warn(_('%s already a largefile\n') % f)
100 continue
93 continue
101
94
102 if exact or not exists:
95 if exact or not exists:
103 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
96 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
104 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
97 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
105 lfnames.append(f)
98 lfnames.append(f)
106 if ui.verbose or not exact:
99 if ui.verbose or not exact:
107 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100 ui.status(_('adding %s as a largefile\n') % m.rel(f))
108
101
109 bad = []
102 bad = []
110 standins = []
103 standins = []
111
104
112 # Need to lock otherwise there could be a race condition inbetween when
105 # Need to lock otherwise there could be a race condition inbetween when
113 # standins are created and added to the repo
106 # standins are created and added to the repo
114 wlock = repo.wlock()
107 wlock = repo.wlock()
115 try:
108 try:
116 if not opts.get('dry_run'):
109 if not opts.get('dry_run'):
117 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 lfdirstate = lfutil.openlfdirstate(ui, repo)
118 for f in lfnames:
111 for f in lfnames:
119 standinname = lfutil.standin(f)
112 standinname = lfutil.standin(f)
120 lfutil.writestandin(repo, standinname, hash='',
113 lfutil.writestandin(repo, standinname, hash='',
121 executable=lfutil.getexecutable(repo.wjoin(f)))
114 executable=lfutil.getexecutable(repo.wjoin(f)))
122 standins.append(standinname)
115 standins.append(standinname)
123 if lfdirstate[f] == 'r':
116 if lfdirstate[f] == 'r':
124 lfdirstate.normallookup(f)
117 lfdirstate.normallookup(f)
125 else:
118 else:
126 lfdirstate.add(f)
119 lfdirstate.add(f)
127 lfdirstate.write()
120 lfdirstate.write()
128 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
121 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
129 standins) if f in m.files()]
122 standins) if f in m.files()]
130 finally:
123 finally:
131 wlock.release()
124 wlock.release()
132
125
133 installnormalfilesmatchfn(repo[None].manifest())
126 installnormalfilesmatchfn(repo[None].manifest())
134 result = orig(ui, repo, *pats, **opts)
127 result = orig(ui, repo, *pats, **opts)
135 restorematchfn()
128 restorematchfn()
136
129
137 return (result == 1 or bad) and 1 or 0
130 return (result == 1 or bad) and 1 or 0
138
131
139 def override_remove(orig, ui, repo, *pats, **opts):
132 def override_remove(orig, ui, repo, *pats, **opts):
140 manifest = repo[None].manifest()
133 manifest = repo[None].manifest()
141 installnormalfilesmatchfn(manifest)
134 installnormalfilesmatchfn(manifest)
142 orig(ui, repo, *pats, **opts)
135 orig(ui, repo, *pats, **opts)
143 restorematchfn()
136 restorematchfn()
144
137
145 after, force = opts.get('after'), opts.get('force')
138 after, force = opts.get('after'), opts.get('force')
146 if not pats and not after:
139 if not pats and not after:
147 raise util.Abort(_('no files specified'))
140 raise util.Abort(_('no files specified'))
148 m = scmutil.match(repo[None], pats, opts)
141 m = scmutil.match(repo[None], pats, opts)
149 try:
142 try:
150 repo.lfstatus = True
143 repo.lfstatus = True
151 s = repo.status(match=m, clean=True)
144 s = repo.status(match=m, clean=True)
152 finally:
145 finally:
153 repo.lfstatus = False
146 repo.lfstatus = False
154 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
147 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
155 in manifest] for list in [s[0], s[1], s[3], s[6]]]
148 in manifest] for list in [s[0], s[1], s[3], s[6]]]
156
149
157 def warn(files, reason):
150 def warn(files, reason):
158 for f in files:
151 for f in files:
159 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
152 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
160 % (m.rel(f), reason))
153 % (m.rel(f), reason))
161
154
162 if force:
155 if force:
163 remove, forget = modified + deleted + clean, added
156 remove, forget = modified + deleted + clean, added
164 elif after:
157 elif after:
165 remove, forget = deleted, []
158 remove, forget = deleted, []
166 warn(modified + added + clean, _('still exists'))
159 warn(modified + added + clean, _('still exists'))
167 else:
160 else:
168 remove, forget = deleted + clean, []
161 remove, forget = deleted + clean, []
169 warn(modified, _('is modified'))
162 warn(modified, _('is modified'))
170 warn(added, _('has been marked for add'))
163 warn(added, _('has been marked for add'))
171
164
172 for f in sorted(remove + forget):
165 for f in sorted(remove + forget):
173 if ui.verbose or not m.exact(f):
166 if ui.verbose or not m.exact(f):
174 ui.status(_('removing %s\n') % m.rel(f))
167 ui.status(_('removing %s\n') % m.rel(f))
175
168
176 # Need to lock because standin files are deleted then removed from the
169 # Need to lock because standin files are deleted then removed from the
177 # repository and we could race inbetween.
170 # repository and we could race inbetween.
178 wlock = repo.wlock()
171 wlock = repo.wlock()
179 try:
172 try:
180 lfdirstate = lfutil.openlfdirstate(ui, repo)
173 lfdirstate = lfutil.openlfdirstate(ui, repo)
181 for f in remove:
174 for f in remove:
182 if not after:
175 if not after:
183 os.unlink(repo.wjoin(f))
176 os.unlink(repo.wjoin(f))
184 currentdir = os.path.split(f)[0]
177 currentdir = os.path.split(f)[0]
185 while currentdir and not os.listdir(repo.wjoin(currentdir)):
178 while currentdir and not os.listdir(repo.wjoin(currentdir)):
186 os.rmdir(repo.wjoin(currentdir))
179 os.rmdir(repo.wjoin(currentdir))
187 currentdir = os.path.split(currentdir)[0]
180 currentdir = os.path.split(currentdir)[0]
188 lfdirstate.remove(f)
181 lfdirstate.remove(f)
189 lfdirstate.write()
182 lfdirstate.write()
190
183
191 forget = [lfutil.standin(f) for f in forget]
184 forget = [lfutil.standin(f) for f in forget]
192 remove = [lfutil.standin(f) for f in remove]
185 remove = [lfutil.standin(f) for f in remove]
193 lfutil.repo_forget(repo, forget)
186 lfutil.repo_forget(repo, forget)
194 lfutil.repo_remove(repo, remove, unlink=True)
187 lfutil.repo_remove(repo, remove, unlink=True)
195 finally:
188 finally:
196 wlock.release()
189 wlock.release()
197
190
198 def override_status(orig, ui, repo, *pats, **opts):
191 def override_status(orig, ui, repo, *pats, **opts):
199 try:
192 try:
200 repo.lfstatus = True
193 repo.lfstatus = True
201 return orig(ui, repo, *pats, **opts)
194 return orig(ui, repo, *pats, **opts)
202 finally:
195 finally:
203 repo.lfstatus = False
196 repo.lfstatus = False
204
197
205 def override_log(orig, ui, repo, *pats, **opts):
198 def override_log(orig, ui, repo, *pats, **opts):
206 try:
199 try:
207 repo.lfstatus = True
200 repo.lfstatus = True
208 orig(ui, repo, *pats, **opts)
201 orig(ui, repo, *pats, **opts)
209 finally:
202 finally:
210 repo.lfstatus = False
203 repo.lfstatus = False
211
204
212 def override_verify(orig, ui, repo, *pats, **opts):
205 def override_verify(orig, ui, repo, *pats, **opts):
213 large = opts.pop('large', False)
206 large = opts.pop('large', False)
214 all = opts.pop('lfa', False)
207 all = opts.pop('lfa', False)
215 contents = opts.pop('lfc', False)
208 contents = opts.pop('lfc', False)
216
209
217 result = orig(ui, repo, *pats, **opts)
210 result = orig(ui, repo, *pats, **opts)
218 if large:
211 if large:
219 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
212 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
220 return result
213 return result
221
214
222 # Override needs to refresh standins so that update's normal merge
215 # Override needs to refresh standins so that update's normal merge
223 # will go through properly. Then the other update hook (overriding repo.update)
216 # will go through properly. Then the other update hook (overriding repo.update)
224 # will get the new files. Filemerge is also overriden so that the merge
217 # will get the new files. Filemerge is also overriden so that the merge
225 # will merge standins correctly.
218 # will merge standins correctly.
226 def override_update(orig, ui, repo, *pats, **opts):
219 def override_update(orig, ui, repo, *pats, **opts):
227 lfdirstate = lfutil.openlfdirstate(ui, repo)
220 lfdirstate = lfutil.openlfdirstate(ui, repo)
228 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
221 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
229 False, False)
222 False, False)
230 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
223 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
231
224
232 # Need to lock between the standins getting updated and their lfiles
225 # Need to lock between the standins getting updated and their lfiles
233 # getting updated
226 # getting updated
234 wlock = repo.wlock()
227 wlock = repo.wlock()
235 try:
228 try:
236 if opts['check']:
229 if opts['check']:
237 mod = len(modified) > 0
230 mod = len(modified) > 0
238 for lfile in unsure:
231 for lfile in unsure:
239 standin = lfutil.standin(lfile)
232 standin = lfutil.standin(lfile)
240 if repo['.'][standin].data().strip() != \
233 if repo['.'][standin].data().strip() != \
241 lfutil.hashfile(repo.wjoin(lfile)):
234 lfutil.hashfile(repo.wjoin(lfile)):
242 mod = True
235 mod = True
243 else:
236 else:
244 lfdirstate.normal(lfile)
237 lfdirstate.normal(lfile)
245 lfdirstate.write()
238 lfdirstate.write()
246 if mod:
239 if mod:
247 raise util.Abort(_('uncommitted local changes'))
240 raise util.Abort(_('uncommitted local changes'))
248 # XXX handle removed differently
241 # XXX handle removed differently
249 if not opts['clean']:
242 if not opts['clean']:
250 for lfile in unsure + modified + added:
243 for lfile in unsure + modified + added:
251 lfutil.updatestandin(repo, lfutil.standin(lfile))
244 lfutil.updatestandin(repo, lfutil.standin(lfile))
252 finally:
245 finally:
253 wlock.release()
246 wlock.release()
254 return orig(ui, repo, *pats, **opts)
247 return orig(ui, repo, *pats, **opts)
255
248
256 # Override filemerge to prompt the user about how they wish to merge lfiles.
249 # Override filemerge to prompt the user about how they wish to merge lfiles.
257 # This will handle identical edits, and copy/rename + edit without prompting
250 # This will handle identical edits, and copy/rename + edit without prompting
258 # the user.
251 # the user.
259 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
252 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
260 # Use better variable names here. Because this is a wrapper we cannot
253 # Use better variable names here. Because this is a wrapper we cannot
261 # change the variable names in the function declaration.
254 # change the variable names in the function declaration.
262 fcdest, fcother, fcancestor = fcd, fco, fca
255 fcdest, fcother, fcancestor = fcd, fco, fca
263 if not lfutil.isstandin(orig):
256 if not lfutil.isstandin(orig):
264 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
257 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
265 else:
258 else:
266 if not fcother.cmp(fcdest): # files identical?
259 if not fcother.cmp(fcdest): # files identical?
267 return None
260 return None
268
261
269 # backwards, use working dir parent as ancestor
262 # backwards, use working dir parent as ancestor
270 if fcancestor == fcother:
263 if fcancestor == fcother:
271 fcancestor = fcdest.parents()[0]
264 fcancestor = fcdest.parents()[0]
272
265
273 if orig != fcother.path():
266 if orig != fcother.path():
274 repo.ui.status(_('merging %s and %s to %s\n')
267 repo.ui.status(_('merging %s and %s to %s\n')
275 % (lfutil.splitstandin(orig),
268 % (lfutil.splitstandin(orig),
276 lfutil.splitstandin(fcother.path()),
269 lfutil.splitstandin(fcother.path()),
277 lfutil.splitstandin(fcdest.path())))
270 lfutil.splitstandin(fcdest.path())))
278 else:
271 else:
279 repo.ui.status(_('merging %s\n')
272 repo.ui.status(_('merging %s\n')
280 % lfutil.splitstandin(fcdest.path()))
273 % lfutil.splitstandin(fcdest.path()))
281
274
282 if fcancestor.path() != fcother.path() and fcother.data() == \
275 if fcancestor.path() != fcother.path() and fcother.data() == \
283 fcancestor.data():
276 fcancestor.data():
284 return 0
277 return 0
285 if fcancestor.path() != fcdest.path() and fcdest.data() == \
278 if fcancestor.path() != fcdest.path() and fcdest.data() == \
286 fcancestor.data():
279 fcancestor.data():
287 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
280 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
288 return 0
281 return 0
289
282
290 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
283 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
291 'keep (l)ocal or take (o)ther?') %
284 'keep (l)ocal or take (o)ther?') %
292 lfutil.splitstandin(orig),
285 lfutil.splitstandin(orig),
293 (_('&Local'), _('&Other')), 0) == 0:
286 (_('&Local'), _('&Other')), 0) == 0:
294 return 0
287 return 0
295 else:
288 else:
296 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
289 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
297 return 0
290 return 0
298
291
299 # Copy first changes the matchers to match standins instead of lfiles.
292 # Copy first changes the matchers to match standins instead of lfiles.
300 # Then it overrides util.copyfile in that function it checks if the destination
293 # Then it overrides util.copyfile in that function it checks if the destination
301 # lfile already exists. It also keeps a list of copied files so that the lfiles
294 # lfile already exists. It also keeps a list of copied files so that the lfiles
302 # can be copied and the dirstate updated.
295 # can be copied and the dirstate updated.
303 def override_copy(orig, ui, repo, pats, opts, rename=False):
296 def override_copy(orig, ui, repo, pats, opts, rename=False):
304 # doesn't remove lfile on rename
297 # doesn't remove lfile on rename
305 if len(pats) < 2:
298 if len(pats) < 2:
306 # this isn't legal, let the original function deal with it
299 # this isn't legal, let the original function deal with it
307 return orig(ui, repo, pats, opts, rename)
300 return orig(ui, repo, pats, opts, rename)
308
301
309 def makestandin(relpath):
302 def makestandin(relpath):
310 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
303 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
311 return os.path.join(os.path.relpath('.', repo.getcwd()),
304 return os.path.join(os.path.relpath('.', repo.getcwd()),
312 lfutil.standin(path))
305 lfutil.standin(path))
313
306
314 fullpats = scmutil.expandpats(pats)
307 fullpats = scmutil.expandpats(pats)
315 dest = fullpats[-1]
308 dest = fullpats[-1]
316
309
317 if os.path.isdir(dest):
310 if os.path.isdir(dest):
318 if not os.path.isdir(makestandin(dest)):
311 if not os.path.isdir(makestandin(dest)):
319 os.makedirs(makestandin(dest))
312 os.makedirs(makestandin(dest))
320 # This could copy both lfiles and normal files in one command, but we don't
313 # This could copy both lfiles and normal files in one command, but we don't
321 # want to do that first replace their matcher to only match normal files
314 # want to do that first replace their matcher to only match normal files
322 # and run it then replace it to just match lfiles and run it again
315 # and run it then replace it to just match lfiles and run it again
323 nonormalfiles = False
316 nonormalfiles = False
324 nolfiles = False
317 nolfiles = False
325 try:
318 try:
326 installnormalfilesmatchfn(repo[None].manifest())
319 installnormalfilesmatchfn(repo[None].manifest())
327 result = orig(ui, repo, pats, opts, rename)
320 result = orig(ui, repo, pats, opts, rename)
328 except util.Abort, e:
321 except util.Abort, e:
329 if str(e) != 'no files to copy':
322 if str(e) != 'no files to copy':
330 raise e
323 raise e
331 else:
324 else:
332 nonormalfiles = True
325 nonormalfiles = True
333 result = 0
326 result = 0
334 finally:
327 finally:
335 restorematchfn()
328 restorematchfn()
336
329
337 # The first rename can cause our current working directory to be removed.
330 # The first rename can cause our current working directory to be removed.
338 # In that case there is nothing left to copy/rename so just quit.
331 # In that case there is nothing left to copy/rename so just quit.
339 try:
332 try:
340 repo.getcwd()
333 repo.getcwd()
341 except OSError:
334 except OSError:
342 return result
335 return result
343
336
344 try:
337 try:
345 # When we call orig below it creates the standins but we don't add them
338 # When we call orig below it creates the standins but we don't add them
346 # to the dir state until later so lock during that time.
339 # to the dir state until later so lock during that time.
347 wlock = repo.wlock()
340 wlock = repo.wlock()
348
341
349 manifest = repo[None].manifest()
342 manifest = repo[None].manifest()
350 oldmatch = None # for the closure
343 oldmatch = None # for the closure
351 def override_match(repo, pats=[], opts={}, globbed=False,
344 def override_match(repo, pats=[], opts={}, globbed=False,
352 default='relpath'):
345 default='relpath'):
353 newpats = []
346 newpats = []
354 # The patterns were previously mangled to add the standin
347 # The patterns were previously mangled to add the standin
355 # directory; we need to remove that now
348 # directory; we need to remove that now
356 for pat in pats:
349 for pat in pats:
357 if match_.patkind(pat) is None and lfutil.shortname in pat:
350 if match_.patkind(pat) is None and lfutil.shortname in pat:
358 newpats.append(pat.replace(lfutil.shortname, ''))
351 newpats.append(pat.replace(lfutil.shortname, ''))
359 else:
352 else:
360 newpats.append(pat)
353 newpats.append(pat)
361 match = oldmatch(repo, newpats, opts, globbed, default)
354 match = oldmatch(repo, newpats, opts, globbed, default)
362 m = copy.copy(match)
355 m = copy.copy(match)
363 lfile = lambda f: lfutil.standin(f) in manifest
356 lfile = lambda f: lfutil.standin(f) in manifest
364 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
357 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
365 m._fmap = set(m._files)
358 m._fmap = set(m._files)
366 orig_matchfn = m.matchfn
359 orig_matchfn = m.matchfn
367 m.matchfn = lambda f: lfutil.isstandin(f) and \
360 m.matchfn = lambda f: lfutil.isstandin(f) and \
368 lfile(lfutil.splitstandin(f)) and \
361 lfile(lfutil.splitstandin(f)) and \
369 orig_matchfn(lfutil.splitstandin(f)) or None
362 orig_matchfn(lfutil.splitstandin(f)) or None
370 return m
363 return m
371 oldmatch = installmatchfn(override_match)
364 oldmatch = installmatchfn(override_match)
372 listpats = []
365 listpats = []
373 for pat in pats:
366 for pat in pats:
374 if match_.patkind(pat) is not None:
367 if match_.patkind(pat) is not None:
375 listpats.append(pat)
368 listpats.append(pat)
376 else:
369 else:
377 listpats.append(makestandin(pat))
370 listpats.append(makestandin(pat))
378
371
379 try:
372 try:
380 origcopyfile = util.copyfile
373 origcopyfile = util.copyfile
381 copiedfiles = []
374 copiedfiles = []
382 def override_copyfile(src, dest):
375 def override_copyfile(src, dest):
383 if lfutil.shortname in src and lfutil.shortname in dest:
376 if lfutil.shortname in src and lfutil.shortname in dest:
384 destlfile = dest.replace(lfutil.shortname, '')
377 destlfile = dest.replace(lfutil.shortname, '')
385 if not opts['force'] and os.path.exists(destlfile):
378 if not opts['force'] and os.path.exists(destlfile):
386 raise IOError('',
379 raise IOError('',
387 _('destination largefile already exists'))
380 _('destination largefile already exists'))
388 copiedfiles.append((src, dest))
381 copiedfiles.append((src, dest))
389 origcopyfile(src, dest)
382 origcopyfile(src, dest)
390
383
391 util.copyfile = override_copyfile
384 util.copyfile = override_copyfile
392 result += orig(ui, repo, listpats, opts, rename)
385 result += orig(ui, repo, listpats, opts, rename)
393 finally:
386 finally:
394 util.copyfile = origcopyfile
387 util.copyfile = origcopyfile
395
388
396 lfdirstate = lfutil.openlfdirstate(ui, repo)
389 lfdirstate = lfutil.openlfdirstate(ui, repo)
397 for (src, dest) in copiedfiles:
390 for (src, dest) in copiedfiles:
398 if lfutil.shortname in src and lfutil.shortname in dest:
391 if lfutil.shortname in src and lfutil.shortname in dest:
399 srclfile = src.replace(lfutil.shortname, '')
392 srclfile = src.replace(lfutil.shortname, '')
400 destlfile = dest.replace(lfutil.shortname, '')
393 destlfile = dest.replace(lfutil.shortname, '')
401 destlfiledir = os.path.dirname(destlfile) or '.'
394 destlfiledir = os.path.dirname(destlfile) or '.'
402 if not os.path.isdir(destlfiledir):
395 if not os.path.isdir(destlfiledir):
403 os.makedirs(destlfiledir)
396 os.makedirs(destlfiledir)
404 if rename:
397 if rename:
405 os.rename(srclfile, destlfile)
398 os.rename(srclfile, destlfile)
406 lfdirstate.remove(os.path.relpath(srclfile,
399 lfdirstate.remove(os.path.relpath(srclfile,
407 repo.root))
400 repo.root))
408 else:
401 else:
409 util.copyfile(srclfile, destlfile)
402 util.copyfile(srclfile, destlfile)
410 lfdirstate.add(os.path.relpath(destlfile,
403 lfdirstate.add(os.path.relpath(destlfile,
411 repo.root))
404 repo.root))
412 lfdirstate.write()
405 lfdirstate.write()
413 except util.Abort, e:
406 except util.Abort, e:
414 if str(e) != 'no files to copy':
407 if str(e) != 'no files to copy':
415 raise e
408 raise e
416 else:
409 else:
417 nolfiles = True
410 nolfiles = True
418 finally:
411 finally:
419 restorematchfn()
412 restorematchfn()
420 wlock.release()
413 wlock.release()
421
414
422 if nolfiles and nonormalfiles:
415 if nolfiles and nonormalfiles:
423 raise util.Abort(_('no files to copy'))
416 raise util.Abort(_('no files to copy'))
424
417
425 return result
418 return result
426
419
427 # When the user calls revert, we have to be careful to not revert any changes
420 # When the user calls revert, we have to be careful to not revert any changes
428 # to other lfiles accidentally. This means we have to keep track of the lfiles
421 # to other lfiles accidentally. This means we have to keep track of the lfiles
429 # that are being reverted so we only pull down the necessary lfiles.
422 # that are being reverted so we only pull down the necessary lfiles.
430 #
423 #
431 # Standins are only updated (to match the hash of lfiles) before commits.
424 # Standins are only updated (to match the hash of lfiles) before commits.
432 # Update the standins then run the original revert (changing the matcher to hit
425 # Update the standins then run the original revert (changing the matcher to hit
433 # standins instead of lfiles). Based on the resulting standins update the
426 # standins instead of lfiles). Based on the resulting standins update the
434 # lfiles. Then return the standins to their proper state
427 # lfiles. Then return the standins to their proper state
435 def override_revert(orig, ui, repo, *pats, **opts):
428 def override_revert(orig, ui, repo, *pats, **opts):
436 # Because we put the standins in a bad state (by updating them) and then
429 # Because we put the standins in a bad state (by updating them) and then
437 # return them to a correct state we need to lock to prevent others from
430 # return them to a correct state we need to lock to prevent others from
438 # changing them in their incorrect state.
431 # changing them in their incorrect state.
439 wlock = repo.wlock()
432 wlock = repo.wlock()
440 try:
433 try:
441 lfdirstate = lfutil.openlfdirstate(ui, repo)
434 lfdirstate = lfutil.openlfdirstate(ui, repo)
442 (modified, added, removed, missing, unknown, ignored, clean) = \
435 (modified, added, removed, missing, unknown, ignored, clean) = \
443 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
436 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
444 for lfile in modified:
437 for lfile in modified:
445 lfutil.updatestandin(repo, lfutil.standin(lfile))
438 lfutil.updatestandin(repo, lfutil.standin(lfile))
446
439
447 try:
440 try:
448 ctx = repo[opts.get('rev')]
441 ctx = repo[opts.get('rev')]
449 oldmatch = None # for the closure
442 oldmatch = None # for the closure
450 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
443 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
451 default='relpath'):
444 default='relpath'):
452 if util.safehasattr(ctxorrepo, 'match'):
445 if util.safehasattr(ctxorrepo, 'match'):
453 ctx0 = ctxorrepo
446 ctx0 = ctxorrepo
454 else:
447 else:
455 ctx0 = ctxorrepo[None]
448 ctx0 = ctxorrepo[None]
456 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
449 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
457 m = copy.copy(match)
450 m = copy.copy(match)
458 def tostandin(f):
451 def tostandin(f):
459 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
452 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
460 return lfutil.standin(f)
453 return lfutil.standin(f)
461 elif lfutil.standin(f) in repo[None]:
454 elif lfutil.standin(f) in repo[None]:
462 return None
455 return None
463 return f
456 return f
464 m._files = [tostandin(f) for f in m._files]
457 m._files = [tostandin(f) for f in m._files]
465 m._files = [f for f in m._files if f is not None]
458 m._files = [f for f in m._files if f is not None]
466 m._fmap = set(m._files)
459 m._fmap = set(m._files)
467 orig_matchfn = m.matchfn
460 orig_matchfn = m.matchfn
468 def matchfn(f):
461 def matchfn(f):
469 if lfutil.isstandin(f):
462 if lfutil.isstandin(f):
470 # We need to keep track of what lfiles are being
463 # We need to keep track of what lfiles are being
471 # matched so we know which ones to update later
464 # matched so we know which ones to update later
472 # (otherwise we revert changes to other lfiles
465 # (otherwise we revert changes to other lfiles
473 # accidentally). This is repo specific, so duckpunch
466 # accidentally). This is repo specific, so duckpunch
474 # the repo object to keep the list of lfiles for us
467 # the repo object to keep the list of lfiles for us
475 # later.
468 # later.
476 if orig_matchfn(lfutil.splitstandin(f)) and \
469 if orig_matchfn(lfutil.splitstandin(f)) and \
477 (f in repo[None] or f in ctx):
470 (f in repo[None] or f in ctx):
478 lfileslist = getattr(repo, '_lfilestoupdate', [])
471 lfileslist = getattr(repo, '_lfilestoupdate', [])
479 lfileslist.append(lfutil.splitstandin(f))
472 lfileslist.append(lfutil.splitstandin(f))
480 repo._lfilestoupdate = lfileslist
473 repo._lfilestoupdate = lfileslist
481 return True
474 return True
482 else:
475 else:
483 return False
476 return False
484 return orig_matchfn(f)
477 return orig_matchfn(f)
485 m.matchfn = matchfn
478 m.matchfn = matchfn
486 return m
479 return m
487 oldmatch = installmatchfn(override_match)
480 oldmatch = installmatchfn(override_match)
488 scmutil.match
481 scmutil.match
489 matches = override_match(repo[None], pats, opts)
482 matches = override_match(repo[None], pats, opts)
490 orig(ui, repo, *pats, **opts)
483 orig(ui, repo, *pats, **opts)
491 finally:
484 finally:
492 restorematchfn()
485 restorematchfn()
493 lfileslist = getattr(repo, '_lfilestoupdate', [])
486 lfileslist = getattr(repo, '_lfilestoupdate', [])
494 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
487 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
495 printmessage=False)
488 printmessage=False)
496 # Empty out the lfiles list so we start fresh next time
489 # Empty out the lfiles list so we start fresh next time
497 repo._lfilestoupdate = []
490 repo._lfilestoupdate = []
498 for lfile in modified:
491 for lfile in modified:
499 if lfile in lfileslist:
492 if lfile in lfileslist:
500 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
493 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
501 in repo['.']:
494 in repo['.']:
502 lfutil.writestandin(repo, lfutil.standin(lfile),
495 lfutil.writestandin(repo, lfutil.standin(lfile),
503 repo['.'][lfile].data().strip(),
496 repo['.'][lfile].data().strip(),
504 'x' in repo['.'][lfile].flags())
497 'x' in repo['.'][lfile].flags())
505 lfdirstate = lfutil.openlfdirstate(ui, repo)
498 lfdirstate = lfutil.openlfdirstate(ui, repo)
506 for lfile in added:
499 for lfile in added:
507 standin = lfutil.standin(lfile)
500 standin = lfutil.standin(lfile)
508 if standin not in ctx and (standin in matches or opts.get('all')):
501 if standin not in ctx and (standin in matches or opts.get('all')):
509 if lfile in lfdirstate:
502 if lfile in lfdirstate:
510 lfdirstate.drop(lfile)
503 lfdirstate.drop(lfile)
511 util.unlinkpath(repo.wjoin(standin))
504 util.unlinkpath(repo.wjoin(standin))
512 lfdirstate.write()
505 lfdirstate.write()
513 finally:
506 finally:
514 wlock.release()
507 wlock.release()
515
508
516 def hg_update(orig, repo, node):
509 def hg_update(orig, repo, node):
517 result = orig(repo, node)
510 result = orig(repo, node)
518 # XXX check if it worked first
511 # XXX check if it worked first
519 lfcommands.updatelfiles(repo.ui, repo)
512 lfcommands.updatelfiles(repo.ui, repo)
520 return result
513 return result
521
514
522 def hg_clean(orig, repo, node, show_stats=True):
515 def hg_clean(orig, repo, node, show_stats=True):
523 result = orig(repo, node, show_stats)
516 result = orig(repo, node, show_stats)
524 lfcommands.updatelfiles(repo.ui, repo)
517 lfcommands.updatelfiles(repo.ui, repo)
525 return result
518 return result
526
519
527 def hg_merge(orig, repo, node, force=None, remind=True):
520 def hg_merge(orig, repo, node, force=None, remind=True):
528 result = orig(repo, node, force, remind)
521 result = orig(repo, node, force, remind)
529 lfcommands.updatelfiles(repo.ui, repo)
522 lfcommands.updatelfiles(repo.ui, repo)
530 return result
523 return result
531
524
532 # When we rebase a repository with remotely changed lfiles, we need
525 # When we rebase a repository with remotely changed lfiles, we need
533 # to take some extra care so that the lfiles are correctly updated
526 # to take some extra care so that the lfiles are correctly updated
534 # in the working copy
527 # in the working copy
535 def override_pull(orig, ui, repo, source=None, **opts):
528 def override_pull(orig, ui, repo, source=None, **opts):
536 if opts.get('rebase', False):
529 if opts.get('rebase', False):
537 repo._isrebasing = True
530 repo._isrebasing = True
538 try:
531 try:
539 if opts.get('update'):
532 if opts.get('update'):
540 del opts['update']
533 del opts['update']
541 ui.debug('--update and --rebase are not compatible, ignoring '
534 ui.debug('--update and --rebase are not compatible, ignoring '
542 'the update flag\n')
535 'the update flag\n')
543 del opts['rebase']
536 del opts['rebase']
544 cmdutil.bailifchanged(repo)
537 cmdutil.bailifchanged(repo)
545 revsprepull = len(repo)
538 revsprepull = len(repo)
546 origpostincoming = commands.postincoming
539 origpostincoming = commands.postincoming
547 def _dummy(*args, **kwargs):
540 def _dummy(*args, **kwargs):
548 pass
541 pass
549 commands.postincoming = _dummy
542 commands.postincoming = _dummy
550 repo.lfpullsource = source
543 repo.lfpullsource = source
551 if not source:
544 if not source:
552 source = 'default'
545 source = 'default'
553 try:
546 try:
554 result = commands.pull(ui, repo, source, **opts)
547 result = commands.pull(ui, repo, source, **opts)
555 finally:
548 finally:
556 commands.postincoming = origpostincoming
549 commands.postincoming = origpostincoming
557 revspostpull = len(repo)
550 revspostpull = len(repo)
558 if revspostpull > revsprepull:
551 if revspostpull > revsprepull:
559 result = result or rebase.rebase(ui, repo)
552 result = result or rebase.rebase(ui, repo)
560 finally:
553 finally:
561 repo._isrebasing = False
554 repo._isrebasing = False
562 else:
555 else:
563 repo.lfpullsource = source
556 repo.lfpullsource = source
564 if not source:
557 if not source:
565 source = 'default'
558 source = 'default'
566 result = orig(ui, repo, source, **opts)
559 result = orig(ui, repo, source, **opts)
567 return result
560 return result
568
561
569 def override_rebase(orig, ui, repo, **opts):
562 def override_rebase(orig, ui, repo, **opts):
570 repo._isrebasing = True
563 repo._isrebasing = True
571 try:
564 try:
572 orig(ui, repo, **opts)
565 orig(ui, repo, **opts)
573 finally:
566 finally:
574 repo._isrebasing = False
567 repo._isrebasing = False
575
568
576 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
569 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
577 prefix=None, mtime=None, subrepos=None):
570 prefix=None, mtime=None, subrepos=None):
578 # No need to lock because we are only reading history and lfile caches
571 # No need to lock because we are only reading history and lfile caches
579 # neither of which are modified
572 # neither of which are modified
580
573
581 lfcommands.cachelfiles(repo.ui, repo, node)
574 lfcommands.cachelfiles(repo.ui, repo, node)
582
575
583 if kind not in archival.archivers:
576 if kind not in archival.archivers:
584 raise util.Abort(_("unknown archive type '%s'") % kind)
577 raise util.Abort(_("unknown archive type '%s'") % kind)
585
578
586 ctx = repo[node]
579 ctx = repo[node]
587
580
588 if kind == 'files':
581 if kind == 'files':
589 if prefix:
582 if prefix:
590 raise util.Abort(
583 raise util.Abort(
591 _('cannot give prefix when archiving to files'))
584 _('cannot give prefix when archiving to files'))
592 else:
585 else:
593 prefix = archival.tidyprefix(dest, kind, prefix)
586 prefix = archival.tidyprefix(dest, kind, prefix)
594
587
595 def write(name, mode, islink, getdata):
588 def write(name, mode, islink, getdata):
596 if matchfn and not matchfn(name):
589 if matchfn and not matchfn(name):
597 return
590 return
598 data = getdata()
591 data = getdata()
599 if decode:
592 if decode:
600 data = repo.wwritedata(name, data)
593 data = repo.wwritedata(name, data)
601 archiver.addfile(prefix + name, mode, islink, data)
594 archiver.addfile(prefix + name, mode, islink, data)
602
595
603 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
596 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
604
597
605 if repo.ui.configbool("ui", "archivemeta", True):
598 if repo.ui.configbool("ui", "archivemeta", True):
606 def metadata():
599 def metadata():
607 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
600 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
608 hex(repo.changelog.node(0)), hex(node), ctx.branch())
601 hex(repo.changelog.node(0)), hex(node), ctx.branch())
609
602
610 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
603 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
611 if repo.tagtype(t) == 'global')
604 if repo.tagtype(t) == 'global')
612 if not tags:
605 if not tags:
613 repo.ui.pushbuffer()
606 repo.ui.pushbuffer()
614 opts = {'template': '{latesttag}\n{latesttagdistance}',
607 opts = {'template': '{latesttag}\n{latesttagdistance}',
615 'style': '', 'patch': None, 'git': None}
608 'style': '', 'patch': None, 'git': None}
616 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
609 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
617 ltags, dist = repo.ui.popbuffer().split('\n')
610 ltags, dist = repo.ui.popbuffer().split('\n')
618 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
611 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
619 tags += 'latesttagdistance: %s\n' % dist
612 tags += 'latesttagdistance: %s\n' % dist
620
613
621 return base + tags
614 return base + tags
622
615
623 write('.hg_archival.txt', 0644, False, metadata)
616 write('.hg_archival.txt', 0644, False, metadata)
624
617
625 for f in ctx:
618 for f in ctx:
626 ff = ctx.flags(f)
619 ff = ctx.flags(f)
627 getdata = ctx[f].data
620 getdata = ctx[f].data
628 if lfutil.isstandin(f):
621 if lfutil.isstandin(f):
629 path = lfutil.findfile(repo, getdata().strip())
622 path = lfutil.findfile(repo, getdata().strip())
630 f = lfutil.splitstandin(f)
623 f = lfutil.splitstandin(f)
631
624
632 def getdatafn():
625 def getdatafn():
633 try:
626 try:
634 fd = open(path, 'rb')
627 fd = open(path, 'rb')
635 return fd.read()
628 return fd.read()
636 finally:
629 finally:
637 fd.close()
630 fd.close()
638
631
639 getdata = getdatafn
632 getdata = getdatafn
640 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
633 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
641
634
642 if subrepos:
635 if subrepos:
643 for subpath in ctx.substate:
636 for subpath in ctx.substate:
644 sub = ctx.sub(subpath)
637 sub = ctx.sub(subpath)
645 try:
638 try:
646 sub.archive(repo.ui, archiver, prefix)
639 sub.archive(repo.ui, archiver, prefix)
647 except TypeError:
640 except TypeError:
648 sub.archive(archiver, prefix)
641 sub.archive(archiver, prefix)
649
642
650 archiver.done()
643 archiver.done()
651
644
652 # If a lfile is modified the change is not reflected in its standin until a
645 # If a lfile is modified the change is not reflected in its standin until a
653 # commit. cmdutil.bailifchanged raises an exception if the repo has
646 # commit. cmdutil.bailifchanged raises an exception if the repo has
654 # uncommitted changes. Wrap it to also check if lfiles were changed. This is
647 # uncommitted changes. Wrap it to also check if lfiles were changed. This is
655 # used by bisect and backout.
648 # used by bisect and backout.
656 def override_bailifchanged(orig, repo):
649 def override_bailifchanged(orig, repo):
657 orig(repo)
650 orig(repo)
658 repo.lfstatus = True
651 repo.lfstatus = True
659 modified, added, removed, deleted = repo.status()[:4]
652 modified, added, removed, deleted = repo.status()[:4]
660 repo.lfstatus = False
653 repo.lfstatus = False
661 if modified or added or removed or deleted:
654 if modified or added or removed or deleted:
662 raise util.Abort(_('outstanding uncommitted changes'))
655 raise util.Abort(_('outstanding uncommitted changes'))
663
656
664 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
657 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
665 def override_fetch(orig, ui, repo, *pats, **opts):
658 def override_fetch(orig, ui, repo, *pats, **opts):
666 repo.lfstatus = True
659 repo.lfstatus = True
667 modified, added, removed, deleted = repo.status()[:4]
660 modified, added, removed, deleted = repo.status()[:4]
668 repo.lfstatus = False
661 repo.lfstatus = False
669 if modified or added or removed or deleted:
662 if modified or added or removed or deleted:
670 raise util.Abort(_('outstanding uncommitted changes'))
663 raise util.Abort(_('outstanding uncommitted changes'))
671 return orig(ui, repo, *pats, **opts)
664 return orig(ui, repo, *pats, **opts)
672
665
673 def override_forget(orig, ui, repo, *pats, **opts):
666 def override_forget(orig, ui, repo, *pats, **opts):
674 installnormalfilesmatchfn(repo[None].manifest())
667 installnormalfilesmatchfn(repo[None].manifest())
675 orig(ui, repo, *pats, **opts)
668 orig(ui, repo, *pats, **opts)
676 restorematchfn()
669 restorematchfn()
677 m = scmutil.match(repo[None], pats, opts)
670 m = scmutil.match(repo[None], pats, opts)
678
671
679 try:
672 try:
680 repo.lfstatus = True
673 repo.lfstatus = True
681 s = repo.status(match=m, clean=True)
674 s = repo.status(match=m, clean=True)
682 finally:
675 finally:
683 repo.lfstatus = False
676 repo.lfstatus = False
684 forget = sorted(s[0] + s[1] + s[3] + s[6])
677 forget = sorted(s[0] + s[1] + s[3] + s[6])
685 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
678 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
686
679
687 for f in forget:
680 for f in forget:
688 if lfutil.standin(f) not in repo.dirstate and not \
681 if lfutil.standin(f) not in repo.dirstate and not \
689 os.path.isdir(m.rel(lfutil.standin(f))):
682 os.path.isdir(m.rel(lfutil.standin(f))):
690 ui.warn(_('not removing %s: file is already untracked\n')
683 ui.warn(_('not removing %s: file is already untracked\n')
691 % m.rel(f))
684 % m.rel(f))
692
685
693 for f in forget:
686 for f in forget:
694 if ui.verbose or not m.exact(f):
687 if ui.verbose or not m.exact(f):
695 ui.status(_('removing %s\n') % m.rel(f))
688 ui.status(_('removing %s\n') % m.rel(f))
696
689
697 # Need to lock because standin files are deleted then removed from the
690 # Need to lock because standin files are deleted then removed from the
698 # repository and we could race inbetween.
691 # repository and we could race inbetween.
699 wlock = repo.wlock()
692 wlock = repo.wlock()
700 try:
693 try:
701 lfdirstate = lfutil.openlfdirstate(ui, repo)
694 lfdirstate = lfutil.openlfdirstate(ui, repo)
702 for f in forget:
695 for f in forget:
703 if lfdirstate[f] == 'a':
696 if lfdirstate[f] == 'a':
704 lfdirstate.drop(f)
697 lfdirstate.drop(f)
705 else:
698 else:
706 lfdirstate.remove(f)
699 lfdirstate.remove(f)
707 lfdirstate.write()
700 lfdirstate.write()
708 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
701 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
709 unlink=True)
702 unlink=True)
710 finally:
703 finally:
711 wlock.release()
704 wlock.release()
712
705
713 def getoutgoinglfiles(ui, repo, dest=None, **opts):
706 def getoutgoinglfiles(ui, repo, dest=None, **opts):
714 dest = ui.expandpath(dest or 'default-push', dest or 'default')
707 dest = ui.expandpath(dest or 'default-push', dest or 'default')
715 dest, branches = hg.parseurl(dest, opts.get('branch'))
708 dest, branches = hg.parseurl(dest, opts.get('branch'))
716 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
709 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
717 if revs:
710 if revs:
718 revs = [repo.lookup(rev) for rev in revs]
711 revs = [repo.lookup(rev) for rev in revs]
719
712
720 remoteui = hg.remoteui
713 remoteui = hg.remoteui
721
714
722 try:
715 try:
723 remote = hg.repository(remoteui(repo, opts), dest)
716 remote = hg.repository(remoteui(repo, opts), dest)
724 except error.RepoError:
717 except error.RepoError:
725 return None
718 return None
726 o = lfutil.findoutgoing(repo, remote, False)
719 o = lfutil.findoutgoing(repo, remote, False)
727 if not o:
720 if not o:
728 return None
721 return None
729 o = repo.changelog.nodesbetween(o, revs)[0]
722 o = repo.changelog.nodesbetween(o, revs)[0]
730 if opts.get('newest_first'):
723 if opts.get('newest_first'):
731 o.reverse()
724 o.reverse()
732
725
733 toupload = set()
726 toupload = set()
734 for n in o:
727 for n in o:
735 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
728 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
736 ctx = repo[n]
729 ctx = repo[n]
737 files = set(ctx.files())
730 files = set(ctx.files())
738 if len(parents) == 2:
731 if len(parents) == 2:
739 mc = ctx.manifest()
732 mc = ctx.manifest()
740 mp1 = ctx.parents()[0].manifest()
733 mp1 = ctx.parents()[0].manifest()
741 mp2 = ctx.parents()[1].manifest()
734 mp2 = ctx.parents()[1].manifest()
742 for f in mp1:
735 for f in mp1:
743 if f not in mc:
736 if f not in mc:
744 files.add(f)
737 files.add(f)
745 for f in mp2:
738 for f in mp2:
746 if f not in mc:
739 if f not in mc:
747 files.add(f)
740 files.add(f)
748 for f in mc:
741 for f in mc:
749 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
742 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
750 files.add(f)
743 files.add(f)
751 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
744 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
752 and f in ctx]))
745 and f in ctx]))
753 return toupload
746 return toupload
754
747
755 def override_outgoing(orig, ui, repo, dest=None, **opts):
748 def override_outgoing(orig, ui, repo, dest=None, **opts):
756 orig(ui, repo, dest, **opts)
749 orig(ui, repo, dest, **opts)
757
750
758 if opts.pop('large', None):
751 if opts.pop('large', None):
759 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
752 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
760 if toupload is None:
753 if toupload is None:
761 ui.status(_('largefiles: No remote repo\n'))
754 ui.status(_('largefiles: No remote repo\n'))
762 else:
755 else:
763 ui.status(_('largefiles to upload:\n'))
756 ui.status(_('largefiles to upload:\n'))
764 for file in toupload:
757 for file in toupload:
765 ui.status(lfutil.splitstandin(file) + '\n')
758 ui.status(lfutil.splitstandin(file) + '\n')
766 ui.status('\n')
759 ui.status('\n')
767
760
768 def override_summary(orig, ui, repo, *pats, **opts):
761 def override_summary(orig, ui, repo, *pats, **opts):
769 orig(ui, repo, *pats, **opts)
762 orig(ui, repo, *pats, **opts)
770
763
771 if opts.pop('large', None):
764 if opts.pop('large', None):
772 toupload = getoutgoinglfiles(ui, repo, None, **opts)
765 toupload = getoutgoinglfiles(ui, repo, None, **opts)
773 if toupload is None:
766 if toupload is None:
774 ui.status(_('largefiles: No remote repo\n'))
767 ui.status(_('largefiles: No remote repo\n'))
775 else:
768 else:
776 ui.status(_('largefiles: %d to upload\n') % len(toupload))
769 ui.status(_('largefiles: %d to upload\n') % len(toupload))
777
770
778 def override_addremove(orig, ui, repo, *pats, **opts):
771 def override_addremove(orig, ui, repo, *pats, **opts):
779 # Check if the parent or child has lfiles if they do don't allow it. If
772 # Check if the parent or child has lfiles if they do don't allow it. If
780 # there is a symlink in the manifest then getting the manifest throws an
773 # there is a symlink in the manifest then getting the manifest throws an
781 # exception catch it and let addremove deal with it. This happens in
774 # exception catch it and let addremove deal with it. This happens in
782 # Mercurial's test test-addremove-symlink
775 # Mercurial's test test-addremove-symlink
783 try:
776 try:
784 manifesttip = set(repo['tip'].manifest())
777 manifesttip = set(repo['tip'].manifest())
785 except util.Abort:
778 except util.Abort:
786 manifesttip = set()
779 manifesttip = set()
787 try:
780 try:
788 manifestworking = set(repo[None].manifest())
781 manifestworking = set(repo[None].manifest())
789 except util.Abort:
782 except util.Abort:
790 manifestworking = set()
783 manifestworking = set()
791
784
792 # Manifests are only iterable so turn them into sets then union
785 # Manifests are only iterable so turn them into sets then union
793 for file in manifesttip.union(manifestworking):
786 for file in manifesttip.union(manifestworking):
794 if file.startswith(lfutil.shortname):
787 if file.startswith(lfutil.shortname):
795 raise util.Abort(
788 raise util.Abort(
796 _('addremove cannot be run on a repo with largefiles'))
789 _('addremove cannot be run on a repo with largefiles'))
797
790
798 return orig(ui, repo, *pats, **opts)
791 return orig(ui, repo, *pats, **opts)
799
792
800 # Calling purge with --all will cause the lfiles to be deleted.
793 # Calling purge with --all will cause the lfiles to be deleted.
801 # Override repo.status to prevent this from happening.
794 # Override repo.status to prevent this from happening.
802 def override_purge(orig, ui, repo, *dirs, **opts):
795 def override_purge(orig, ui, repo, *dirs, **opts):
803 oldstatus = repo.status
796 oldstatus = repo.status
804 def override_status(node1='.', node2=None, match=None, ignored=False,
797 def override_status(node1='.', node2=None, match=None, ignored=False,
805 clean=False, unknown=False, listsubrepos=False):
798 clean=False, unknown=False, listsubrepos=False):
806 r = oldstatus(node1, node2, match, ignored, clean, unknown,
799 r = oldstatus(node1, node2, match, ignored, clean, unknown,
807 listsubrepos)
800 listsubrepos)
808 lfdirstate = lfutil.openlfdirstate(ui, repo)
801 lfdirstate = lfutil.openlfdirstate(ui, repo)
809 modified, added, removed, deleted, unknown, ignored, clean = r
802 modified, added, removed, deleted, unknown, ignored, clean = r
810 unknown = [f for f in unknown if lfdirstate[f] == '?']
803 unknown = [f for f in unknown if lfdirstate[f] == '?']
811 ignored = [f for f in ignored if lfdirstate[f] == '?']
804 ignored = [f for f in ignored if lfdirstate[f] == '?']
812 return modified, added, removed, deleted, unknown, ignored, clean
805 return modified, added, removed, deleted, unknown, ignored, clean
813 repo.status = override_status
806 repo.status = override_status
814 orig(ui, repo, *dirs, **opts)
807 orig(ui, repo, *dirs, **opts)
815 repo.status = oldstatus
808 repo.status = oldstatus
816
809
817 def override_rollback(orig, ui, repo, **opts):
810 def override_rollback(orig, ui, repo, **opts):
818 result = orig(ui, repo, **opts)
811 result = orig(ui, repo, **opts)
819 merge.update(repo, node=None, branchmerge=False, force=True,
812 merge.update(repo, node=None, branchmerge=False, force=True,
820 partial=lfutil.isstandin)
813 partial=lfutil.isstandin)
821 lfdirstate = lfutil.openlfdirstate(ui, repo)
814 lfdirstate = lfutil.openlfdirstate(ui, repo)
822 lfiles = lfutil.listlfiles(repo)
815 lfiles = lfutil.listlfiles(repo)
823 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
816 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
824 for file in lfiles:
817 for file in lfiles:
825 if file in oldlfiles:
818 if file in oldlfiles:
826 lfdirstate.normallookup(file)
819 lfdirstate.normallookup(file)
827 else:
820 else:
828 lfdirstate.add(file)
821 lfdirstate.add(file)
829 lfdirstate.write()
822 lfdirstate.write()
830 return result
823 return result
General Comments 0
You need to be logged in to leave comments. Login now