##// END OF EJS Templates
largefiles: use "normallookup" on "lfdirstate" while reverting...
FUJIWARA Katsunori -
r21934:0cb34b39 stable
parent child Browse files
Show More
@@ -1,577 +1,578 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os, errno
11 import os, errno
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error, \
14 from mercurial import util, match as match_, hg, node, context, error, \
15 cmdutil, scmutil, commands
15 cmdutil, scmutil, commands
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.lock import release
17 from mercurial.lock import release
18
18
19 import lfutil
19 import lfutil
20 import basestore
20 import basestore
21
21
22 # -- Commands ----------------------------------------------------------
22 # -- Commands ----------------------------------------------------------
23
23
24 cmdtable = {}
24 cmdtable = {}
25 command = cmdutil.command(cmdtable)
25 command = cmdutil.command(cmdtable)
26
26
27 @command('lfconvert',
27 @command('lfconvert',
28 [('s', 'size', '',
28 [('s', 'size', '',
29 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
29 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
30 ('', 'to-normal', False,
30 ('', 'to-normal', False,
31 _('convert from a largefiles repo to a normal repo')),
31 _('convert from a largefiles repo to a normal repo')),
32 ],
32 ],
33 _('hg lfconvert SOURCE DEST [FILE ...]'),
33 _('hg lfconvert SOURCE DEST [FILE ...]'),
34 norepo=True,
34 norepo=True,
35 inferrepo=True)
35 inferrepo=True)
36 def lfconvert(ui, src, dest, *pats, **opts):
36 def lfconvert(ui, src, dest, *pats, **opts):
37 '''convert a normal repository to a largefiles repository
37 '''convert a normal repository to a largefiles repository
38
38
39 Convert repository SOURCE to a new repository DEST, identical to
39 Convert repository SOURCE to a new repository DEST, identical to
40 SOURCE except that certain files will be converted as largefiles:
40 SOURCE except that certain files will be converted as largefiles:
41 specifically, any file that matches any PATTERN *or* whose size is
41 specifically, any file that matches any PATTERN *or* whose size is
42 above the minimum size threshold is converted as a largefile. The
42 above the minimum size threshold is converted as a largefile. The
43 size used to determine whether or not to track a file as a
43 size used to determine whether or not to track a file as a
44 largefile is the size of the first version of the file. The
44 largefile is the size of the first version of the file. The
45 minimum size can be specified either with --size or in
45 minimum size can be specified either with --size or in
46 configuration as ``largefiles.size``.
46 configuration as ``largefiles.size``.
47
47
48 After running this command you will need to make sure that
48 After running this command you will need to make sure that
49 largefiles is enabled anywhere you intend to push the new
49 largefiles is enabled anywhere you intend to push the new
50 repository.
50 repository.
51
51
52 Use --to-normal to convert largefiles back to normal files; after
52 Use --to-normal to convert largefiles back to normal files; after
53 this, the DEST repository can be used without largefiles at all.'''
53 this, the DEST repository can be used without largefiles at all.'''
54
54
55 if opts['to_normal']:
55 if opts['to_normal']:
56 tolfile = False
56 tolfile = False
57 else:
57 else:
58 tolfile = True
58 tolfile = True
59 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
59 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
60
60
61 if not hg.islocal(src):
61 if not hg.islocal(src):
62 raise util.Abort(_('%s is not a local Mercurial repo') % src)
62 raise util.Abort(_('%s is not a local Mercurial repo') % src)
63 if not hg.islocal(dest):
63 if not hg.islocal(dest):
64 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
64 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
65
65
66 rsrc = hg.repository(ui, src)
66 rsrc = hg.repository(ui, src)
67 ui.status(_('initializing destination %s\n') % dest)
67 ui.status(_('initializing destination %s\n') % dest)
68 rdst = hg.repository(ui, dest, create=True)
68 rdst = hg.repository(ui, dest, create=True)
69
69
70 success = False
70 success = False
71 dstwlock = dstlock = None
71 dstwlock = dstlock = None
72 try:
72 try:
73 # Lock destination to prevent modification while it is converted to.
73 # Lock destination to prevent modification while it is converted to.
74 # Don't need to lock src because we are just reading from its history
74 # Don't need to lock src because we are just reading from its history
75 # which can't change.
75 # which can't change.
76 dstwlock = rdst.wlock()
76 dstwlock = rdst.wlock()
77 dstlock = rdst.lock()
77 dstlock = rdst.lock()
78
78
79 # Get a list of all changesets in the source. The easy way to do this
79 # Get a list of all changesets in the source. The easy way to do this
80 # is to simply walk the changelog, using changelog.nodesbetween().
80 # is to simply walk the changelog, using changelog.nodesbetween().
81 # Take a look at mercurial/revlog.py:639 for more details.
81 # Take a look at mercurial/revlog.py:639 for more details.
82 # Use a generator instead of a list to decrease memory usage
82 # Use a generator instead of a list to decrease memory usage
83 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
83 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
84 rsrc.heads())[0])
84 rsrc.heads())[0])
85 revmap = {node.nullid: node.nullid}
85 revmap = {node.nullid: node.nullid}
86 if tolfile:
86 if tolfile:
87 lfiles = set()
87 lfiles = set()
88 normalfiles = set()
88 normalfiles = set()
89 if not pats:
89 if not pats:
90 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
90 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
91 if pats:
91 if pats:
92 matcher = match_.match(rsrc.root, '', list(pats))
92 matcher = match_.match(rsrc.root, '', list(pats))
93 else:
93 else:
94 matcher = None
94 matcher = None
95
95
96 lfiletohash = {}
96 lfiletohash = {}
97 for ctx in ctxs:
97 for ctx in ctxs:
98 ui.progress(_('converting revisions'), ctx.rev(),
98 ui.progress(_('converting revisions'), ctx.rev(),
99 unit=_('revision'), total=rsrc['tip'].rev())
99 unit=_('revision'), total=rsrc['tip'].rev())
100 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
100 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
101 lfiles, normalfiles, matcher, size, lfiletohash)
101 lfiles, normalfiles, matcher, size, lfiletohash)
102 ui.progress(_('converting revisions'), None)
102 ui.progress(_('converting revisions'), None)
103
103
104 if os.path.exists(rdst.wjoin(lfutil.shortname)):
104 if os.path.exists(rdst.wjoin(lfutil.shortname)):
105 shutil.rmtree(rdst.wjoin(lfutil.shortname))
105 shutil.rmtree(rdst.wjoin(lfutil.shortname))
106
106
107 for f in lfiletohash.keys():
107 for f in lfiletohash.keys():
108 if os.path.isfile(rdst.wjoin(f)):
108 if os.path.isfile(rdst.wjoin(f)):
109 os.unlink(rdst.wjoin(f))
109 os.unlink(rdst.wjoin(f))
110 try:
110 try:
111 os.removedirs(os.path.dirname(rdst.wjoin(f)))
111 os.removedirs(os.path.dirname(rdst.wjoin(f)))
112 except OSError:
112 except OSError:
113 pass
113 pass
114
114
115 # If there were any files converted to largefiles, add largefiles
115 # If there were any files converted to largefiles, add largefiles
116 # to the destination repository's requirements.
116 # to the destination repository's requirements.
117 if lfiles:
117 if lfiles:
118 rdst.requirements.add('largefiles')
118 rdst.requirements.add('largefiles')
119 rdst._writerequirements()
119 rdst._writerequirements()
120 else:
120 else:
121 for ctx in ctxs:
121 for ctx in ctxs:
122 ui.progress(_('converting revisions'), ctx.rev(),
122 ui.progress(_('converting revisions'), ctx.rev(),
123 unit=_('revision'), total=rsrc['tip'].rev())
123 unit=_('revision'), total=rsrc['tip'].rev())
124 _addchangeset(ui, rsrc, rdst, ctx, revmap)
124 _addchangeset(ui, rsrc, rdst, ctx, revmap)
125
125
126 ui.progress(_('converting revisions'), None)
126 ui.progress(_('converting revisions'), None)
127 success = True
127 success = True
128 finally:
128 finally:
129 rdst.dirstate.clear()
129 rdst.dirstate.clear()
130 release(dstlock, dstwlock)
130 release(dstlock, dstwlock)
131 if not success:
131 if not success:
132 # we failed, remove the new directory
132 # we failed, remove the new directory
133 shutil.rmtree(rdst.root)
133 shutil.rmtree(rdst.root)
134
134
135 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
135 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
136 # Convert src parents to dst parents
136 # Convert src parents to dst parents
137 parents = _convertparents(ctx, revmap)
137 parents = _convertparents(ctx, revmap)
138
138
139 # Generate list of changed files
139 # Generate list of changed files
140 files = _getchangedfiles(ctx, parents)
140 files = _getchangedfiles(ctx, parents)
141
141
142 def getfilectx(repo, memctx, f):
142 def getfilectx(repo, memctx, f):
143 if lfutil.standin(f) in files:
143 if lfutil.standin(f) in files:
144 # if the file isn't in the manifest then it was removed
144 # if the file isn't in the manifest then it was removed
145 # or renamed, raise IOError to indicate this
145 # or renamed, raise IOError to indicate this
146 try:
146 try:
147 fctx = ctx.filectx(lfutil.standin(f))
147 fctx = ctx.filectx(lfutil.standin(f))
148 except error.LookupError:
148 except error.LookupError:
149 raise IOError
149 raise IOError
150 renamed = fctx.renamed()
150 renamed = fctx.renamed()
151 if renamed:
151 if renamed:
152 renamed = lfutil.splitstandin(renamed[0])
152 renamed = lfutil.splitstandin(renamed[0])
153
153
154 hash = fctx.data().strip()
154 hash = fctx.data().strip()
155 path = lfutil.findfile(rsrc, hash)
155 path = lfutil.findfile(rsrc, hash)
156
156
157 # If one file is missing, likely all files from this rev are
157 # If one file is missing, likely all files from this rev are
158 if path is None:
158 if path is None:
159 cachelfiles(ui, rsrc, ctx.node())
159 cachelfiles(ui, rsrc, ctx.node())
160 path = lfutil.findfile(rsrc, hash)
160 path = lfutil.findfile(rsrc, hash)
161
161
162 if path is None:
162 if path is None:
163 raise util.Abort(
163 raise util.Abort(
164 _("missing largefile \'%s\' from revision %s")
164 _("missing largefile \'%s\' from revision %s")
165 % (f, node.hex(ctx.node())))
165 % (f, node.hex(ctx.node())))
166
166
167 data = ''
167 data = ''
168 fd = None
168 fd = None
169 try:
169 try:
170 fd = open(path, 'rb')
170 fd = open(path, 'rb')
171 data = fd.read()
171 data = fd.read()
172 finally:
172 finally:
173 if fd:
173 if fd:
174 fd.close()
174 fd.close()
175 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
175 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
176 'x' in fctx.flags(), renamed)
176 'x' in fctx.flags(), renamed)
177 else:
177 else:
178 return _getnormalcontext(repo, ctx, f, revmap)
178 return _getnormalcontext(repo, ctx, f, revmap)
179
179
180 dstfiles = []
180 dstfiles = []
181 for file in files:
181 for file in files:
182 if lfutil.isstandin(file):
182 if lfutil.isstandin(file):
183 dstfiles.append(lfutil.splitstandin(file))
183 dstfiles.append(lfutil.splitstandin(file))
184 else:
184 else:
185 dstfiles.append(file)
185 dstfiles.append(file)
186 # Commit
186 # Commit
187 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
187 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
188
188
189 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
189 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
190 matcher, size, lfiletohash):
190 matcher, size, lfiletohash):
191 # Convert src parents to dst parents
191 # Convert src parents to dst parents
192 parents = _convertparents(ctx, revmap)
192 parents = _convertparents(ctx, revmap)
193
193
194 # Generate list of changed files
194 # Generate list of changed files
195 files = _getchangedfiles(ctx, parents)
195 files = _getchangedfiles(ctx, parents)
196
196
197 dstfiles = []
197 dstfiles = []
198 for f in files:
198 for f in files:
199 if f not in lfiles and f not in normalfiles:
199 if f not in lfiles and f not in normalfiles:
200 islfile = _islfile(f, ctx, matcher, size)
200 islfile = _islfile(f, ctx, matcher, size)
201 # If this file was renamed or copied then copy
201 # If this file was renamed or copied then copy
202 # the largefile-ness of its predecessor
202 # the largefile-ness of its predecessor
203 if f in ctx.manifest():
203 if f in ctx.manifest():
204 fctx = ctx.filectx(f)
204 fctx = ctx.filectx(f)
205 renamed = fctx.renamed()
205 renamed = fctx.renamed()
206 renamedlfile = renamed and renamed[0] in lfiles
206 renamedlfile = renamed and renamed[0] in lfiles
207 islfile |= renamedlfile
207 islfile |= renamedlfile
208 if 'l' in fctx.flags():
208 if 'l' in fctx.flags():
209 if renamedlfile:
209 if renamedlfile:
210 raise util.Abort(
210 raise util.Abort(
211 _('renamed/copied largefile %s becomes symlink')
211 _('renamed/copied largefile %s becomes symlink')
212 % f)
212 % f)
213 islfile = False
213 islfile = False
214 if islfile:
214 if islfile:
215 lfiles.add(f)
215 lfiles.add(f)
216 else:
216 else:
217 normalfiles.add(f)
217 normalfiles.add(f)
218
218
219 if f in lfiles:
219 if f in lfiles:
220 dstfiles.append(lfutil.standin(f))
220 dstfiles.append(lfutil.standin(f))
221 # largefile in manifest if it has not been removed/renamed
221 # largefile in manifest if it has not been removed/renamed
222 if f in ctx.manifest():
222 if f in ctx.manifest():
223 fctx = ctx.filectx(f)
223 fctx = ctx.filectx(f)
224 if 'l' in fctx.flags():
224 if 'l' in fctx.flags():
225 renamed = fctx.renamed()
225 renamed = fctx.renamed()
226 if renamed and renamed[0] in lfiles:
226 if renamed and renamed[0] in lfiles:
227 raise util.Abort(_('largefile %s becomes symlink') % f)
227 raise util.Abort(_('largefile %s becomes symlink') % f)
228
228
229 # largefile was modified, update standins
229 # largefile was modified, update standins
230 m = util.sha1('')
230 m = util.sha1('')
231 m.update(ctx[f].data())
231 m.update(ctx[f].data())
232 hash = m.hexdigest()
232 hash = m.hexdigest()
233 if f not in lfiletohash or lfiletohash[f] != hash:
233 if f not in lfiletohash or lfiletohash[f] != hash:
234 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
234 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
235 executable = 'x' in ctx[f].flags()
235 executable = 'x' in ctx[f].flags()
236 lfutil.writestandin(rdst, lfutil.standin(f), hash,
236 lfutil.writestandin(rdst, lfutil.standin(f), hash,
237 executable)
237 executable)
238 lfiletohash[f] = hash
238 lfiletohash[f] = hash
239 else:
239 else:
240 # normal file
240 # normal file
241 dstfiles.append(f)
241 dstfiles.append(f)
242
242
243 def getfilectx(repo, memctx, f):
243 def getfilectx(repo, memctx, f):
244 if lfutil.isstandin(f):
244 if lfutil.isstandin(f):
245 # if the file isn't in the manifest then it was removed
245 # if the file isn't in the manifest then it was removed
246 # or renamed, raise IOError to indicate this
246 # or renamed, raise IOError to indicate this
247 srcfname = lfutil.splitstandin(f)
247 srcfname = lfutil.splitstandin(f)
248 try:
248 try:
249 fctx = ctx.filectx(srcfname)
249 fctx = ctx.filectx(srcfname)
250 except error.LookupError:
250 except error.LookupError:
251 raise IOError
251 raise IOError
252 renamed = fctx.renamed()
252 renamed = fctx.renamed()
253 if renamed:
253 if renamed:
254 # standin is always a largefile because largefile-ness
254 # standin is always a largefile because largefile-ness
255 # doesn't change after rename or copy
255 # doesn't change after rename or copy
256 renamed = lfutil.standin(renamed[0])
256 renamed = lfutil.standin(renamed[0])
257
257
258 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
258 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
259 'l' in fctx.flags(), 'x' in fctx.flags(),
259 'l' in fctx.flags(), 'x' in fctx.flags(),
260 renamed)
260 renamed)
261 else:
261 else:
262 return _getnormalcontext(repo, ctx, f, revmap)
262 return _getnormalcontext(repo, ctx, f, revmap)
263
263
264 # Commit
264 # Commit
265 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
265 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
266
266
267 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
267 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
268 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
268 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
269 getfilectx, ctx.user(), ctx.date(), ctx.extra())
269 getfilectx, ctx.user(), ctx.date(), ctx.extra())
270 ret = rdst.commitctx(mctx)
270 ret = rdst.commitctx(mctx)
271 rdst.setparents(ret)
271 rdst.setparents(ret)
272 revmap[ctx.node()] = rdst.changelog.tip()
272 revmap[ctx.node()] = rdst.changelog.tip()
273
273
274 # Generate list of changed files
274 # Generate list of changed files
275 def _getchangedfiles(ctx, parents):
275 def _getchangedfiles(ctx, parents):
276 files = set(ctx.files())
276 files = set(ctx.files())
277 if node.nullid not in parents:
277 if node.nullid not in parents:
278 mc = ctx.manifest()
278 mc = ctx.manifest()
279 mp1 = ctx.parents()[0].manifest()
279 mp1 = ctx.parents()[0].manifest()
280 mp2 = ctx.parents()[1].manifest()
280 mp2 = ctx.parents()[1].manifest()
281 files |= (set(mp1) | set(mp2)) - set(mc)
281 files |= (set(mp1) | set(mp2)) - set(mc)
282 for f in mc:
282 for f in mc:
283 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
283 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
284 files.add(f)
284 files.add(f)
285 return files
285 return files
286
286
287 # Convert src parents to dst parents
287 # Convert src parents to dst parents
288 def _convertparents(ctx, revmap):
288 def _convertparents(ctx, revmap):
289 parents = []
289 parents = []
290 for p in ctx.parents():
290 for p in ctx.parents():
291 parents.append(revmap[p.node()])
291 parents.append(revmap[p.node()])
292 while len(parents) < 2:
292 while len(parents) < 2:
293 parents.append(node.nullid)
293 parents.append(node.nullid)
294 return parents
294 return parents
295
295
296 # Get memfilectx for a normal file
296 # Get memfilectx for a normal file
297 def _getnormalcontext(repo, ctx, f, revmap):
297 def _getnormalcontext(repo, ctx, f, revmap):
298 try:
298 try:
299 fctx = ctx.filectx(f)
299 fctx = ctx.filectx(f)
300 except error.LookupError:
300 except error.LookupError:
301 raise IOError
301 raise IOError
302 renamed = fctx.renamed()
302 renamed = fctx.renamed()
303 if renamed:
303 if renamed:
304 renamed = renamed[0]
304 renamed = renamed[0]
305
305
306 data = fctx.data()
306 data = fctx.data()
307 if f == '.hgtags':
307 if f == '.hgtags':
308 data = _converttags (repo.ui, revmap, data)
308 data = _converttags (repo.ui, revmap, data)
309 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
309 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
310 'x' in fctx.flags(), renamed)
310 'x' in fctx.flags(), renamed)
311
311
312 # Remap tag data using a revision map
312 # Remap tag data using a revision map
313 def _converttags(ui, revmap, data):
313 def _converttags(ui, revmap, data):
314 newdata = []
314 newdata = []
315 for line in data.splitlines():
315 for line in data.splitlines():
316 try:
316 try:
317 id, name = line.split(' ', 1)
317 id, name = line.split(' ', 1)
318 except ValueError:
318 except ValueError:
319 ui.warn(_('skipping incorrectly formatted tag %s\n')
319 ui.warn(_('skipping incorrectly formatted tag %s\n')
320 % line)
320 % line)
321 continue
321 continue
322 try:
322 try:
323 newid = node.bin(id)
323 newid = node.bin(id)
324 except TypeError:
324 except TypeError:
325 ui.warn(_('skipping incorrectly formatted id %s\n')
325 ui.warn(_('skipping incorrectly formatted id %s\n')
326 % id)
326 % id)
327 continue
327 continue
328 try:
328 try:
329 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
329 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
330 name))
330 name))
331 except KeyError:
331 except KeyError:
332 ui.warn(_('no mapping for id %s\n') % id)
332 ui.warn(_('no mapping for id %s\n') % id)
333 continue
333 continue
334 return ''.join(newdata)
334 return ''.join(newdata)
335
335
336 def _islfile(file, ctx, matcher, size):
336 def _islfile(file, ctx, matcher, size):
337 '''Return true if file should be considered a largefile, i.e.
337 '''Return true if file should be considered a largefile, i.e.
338 matcher matches it or it is larger than size.'''
338 matcher matches it or it is larger than size.'''
339 # never store special .hg* files as largefiles
339 # never store special .hg* files as largefiles
340 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
340 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
341 return False
341 return False
342 if matcher and matcher(file):
342 if matcher and matcher(file):
343 return True
343 return True
344 try:
344 try:
345 return ctx.filectx(file).size() >= size * 1024 * 1024
345 return ctx.filectx(file).size() >= size * 1024 * 1024
346 except error.LookupError:
346 except error.LookupError:
347 return False
347 return False
348
348
349 def uploadlfiles(ui, rsrc, rdst, files):
349 def uploadlfiles(ui, rsrc, rdst, files):
350 '''upload largefiles to the central store'''
350 '''upload largefiles to the central store'''
351
351
352 if not files:
352 if not files:
353 return
353 return
354
354
355 store = basestore._openstore(rsrc, rdst, put=True)
355 store = basestore._openstore(rsrc, rdst, put=True)
356
356
357 at = 0
357 at = 0
358 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
358 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
359 retval = store.exists(files)
359 retval = store.exists(files)
360 files = filter(lambda h: not retval[h], files)
360 files = filter(lambda h: not retval[h], files)
361 ui.debug("%d largefiles need to be uploaded\n" % len(files))
361 ui.debug("%d largefiles need to be uploaded\n" % len(files))
362
362
363 for hash in files:
363 for hash in files:
364 ui.progress(_('uploading largefiles'), at, unit='largefile',
364 ui.progress(_('uploading largefiles'), at, unit='largefile',
365 total=len(files))
365 total=len(files))
366 source = lfutil.findfile(rsrc, hash)
366 source = lfutil.findfile(rsrc, hash)
367 if not source:
367 if not source:
368 raise util.Abort(_('largefile %s missing from store'
368 raise util.Abort(_('largefile %s missing from store'
369 ' (needs to be uploaded)') % hash)
369 ' (needs to be uploaded)') % hash)
370 # XXX check for errors here
370 # XXX check for errors here
371 store.put(source, hash)
371 store.put(source, hash)
372 at += 1
372 at += 1
373 ui.progress(_('uploading largefiles'), None)
373 ui.progress(_('uploading largefiles'), None)
374
374
375 def verifylfiles(ui, repo, all=False, contents=False):
375 def verifylfiles(ui, repo, all=False, contents=False):
376 '''Verify that every largefile revision in the current changeset
376 '''Verify that every largefile revision in the current changeset
377 exists in the central store. With --contents, also verify that
377 exists in the central store. With --contents, also verify that
378 the contents of each local largefile file revision are correct (SHA-1 hash
378 the contents of each local largefile file revision are correct (SHA-1 hash
379 matches the revision ID). With --all, check every changeset in
379 matches the revision ID). With --all, check every changeset in
380 this repository.'''
380 this repository.'''
381 if all:
381 if all:
382 # Pass a list to the function rather than an iterator because we know a
382 # Pass a list to the function rather than an iterator because we know a
383 # list will work.
383 # list will work.
384 revs = range(len(repo))
384 revs = range(len(repo))
385 else:
385 else:
386 revs = ['.']
386 revs = ['.']
387
387
388 store = basestore._openstore(repo)
388 store = basestore._openstore(repo)
389 return store.verify(revs, contents=contents)
389 return store.verify(revs, contents=contents)
390
390
391 def cachelfiles(ui, repo, node, filelist=None):
391 def cachelfiles(ui, repo, node, filelist=None):
392 '''cachelfiles ensures that all largefiles needed by the specified revision
392 '''cachelfiles ensures that all largefiles needed by the specified revision
393 are present in the repository's largefile cache.
393 are present in the repository's largefile cache.
394
394
395 returns a tuple (cached, missing). cached is the list of files downloaded
395 returns a tuple (cached, missing). cached is the list of files downloaded
396 by this operation; missing is the list of files that were needed but could
396 by this operation; missing is the list of files that were needed but could
397 not be found.'''
397 not be found.'''
398 lfiles = lfutil.listlfiles(repo, node)
398 lfiles = lfutil.listlfiles(repo, node)
399 if filelist:
399 if filelist:
400 lfiles = set(lfiles) & set(filelist)
400 lfiles = set(lfiles) & set(filelist)
401 toget = []
401 toget = []
402
402
403 for lfile in lfiles:
403 for lfile in lfiles:
404 try:
404 try:
405 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
405 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
406 except IOError, err:
406 except IOError, err:
407 if err.errno == errno.ENOENT:
407 if err.errno == errno.ENOENT:
408 continue # node must be None and standin wasn't found in wctx
408 continue # node must be None and standin wasn't found in wctx
409 raise
409 raise
410 if not lfutil.findfile(repo, expectedhash):
410 if not lfutil.findfile(repo, expectedhash):
411 toget.append((lfile, expectedhash))
411 toget.append((lfile, expectedhash))
412
412
413 if toget:
413 if toget:
414 store = basestore._openstore(repo)
414 store = basestore._openstore(repo)
415 ret = store.get(toget)
415 ret = store.get(toget)
416 return ret
416 return ret
417
417
418 return ([], [])
418 return ([], [])
419
419
420 def downloadlfiles(ui, repo, rev=None):
420 def downloadlfiles(ui, repo, rev=None):
421 matchfn = scmutil.match(repo[None],
421 matchfn = scmutil.match(repo[None],
422 [repo.wjoin(lfutil.shortname)], {})
422 [repo.wjoin(lfutil.shortname)], {})
423 def prepare(ctx, fns):
423 def prepare(ctx, fns):
424 pass
424 pass
425 totalsuccess = 0
425 totalsuccess = 0
426 totalmissing = 0
426 totalmissing = 0
427 if rev != []: # walkchangerevs on empty list would return all revs
427 if rev != []: # walkchangerevs on empty list would return all revs
428 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
428 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
429 prepare):
429 prepare):
430 success, missing = cachelfiles(ui, repo, ctx.node())
430 success, missing = cachelfiles(ui, repo, ctx.node())
431 totalsuccess += len(success)
431 totalsuccess += len(success)
432 totalmissing += len(missing)
432 totalmissing += len(missing)
433 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
433 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
434 if totalmissing > 0:
434 if totalmissing > 0:
435 ui.status(_("%d largefiles failed to download\n") % totalmissing)
435 ui.status(_("%d largefiles failed to download\n") % totalmissing)
436 return totalsuccess, totalmissing
436 return totalsuccess, totalmissing
437
437
438 def updatelfiles(ui, repo, filelist=None, printmessage=True):
438 def updatelfiles(ui, repo, filelist=None, printmessage=True,
439 normallookup=False):
439 wlock = repo.wlock()
440 wlock = repo.wlock()
440 try:
441 try:
441 lfdirstate = lfutil.openlfdirstate(ui, repo)
442 lfdirstate = lfutil.openlfdirstate(ui, repo)
442 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
443 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
443
444
444 if filelist is not None:
445 if filelist is not None:
445 lfiles = [f for f in lfiles if f in filelist]
446 lfiles = [f for f in lfiles if f in filelist]
446
447
447 update = {}
448 update = {}
448 updated, removed = 0, 0
449 updated, removed = 0, 0
449 for lfile in lfiles:
450 for lfile in lfiles:
450 abslfile = repo.wjoin(lfile)
451 abslfile = repo.wjoin(lfile)
451 absstandin = repo.wjoin(lfutil.standin(lfile))
452 absstandin = repo.wjoin(lfutil.standin(lfile))
452 if os.path.exists(absstandin):
453 if os.path.exists(absstandin):
453 if (os.path.exists(absstandin + '.orig') and
454 if (os.path.exists(absstandin + '.orig') and
454 os.path.exists(abslfile)):
455 os.path.exists(abslfile)):
455 shutil.copyfile(abslfile, abslfile + '.orig')
456 shutil.copyfile(abslfile, abslfile + '.orig')
456 util.unlinkpath(absstandin + '.orig')
457 util.unlinkpath(absstandin + '.orig')
457 expecthash = lfutil.readstandin(repo, lfile)
458 expecthash = lfutil.readstandin(repo, lfile)
458 if (expecthash != '' and
459 if (expecthash != '' and
459 (not os.path.exists(abslfile) or
460 (not os.path.exists(abslfile) or
460 expecthash != lfutil.hashfile(abslfile))):
461 expecthash != lfutil.hashfile(abslfile))):
461 if lfile not in repo[None]: # not switched to normal file
462 if lfile not in repo[None]: # not switched to normal file
462 util.unlinkpath(abslfile, ignoremissing=True)
463 util.unlinkpath(abslfile, ignoremissing=True)
463 # use normallookup() to allocate entry in largefiles
464 # use normallookup() to allocate entry in largefiles
464 # dirstate, because lack of it misleads
465 # dirstate, because lack of it misleads
465 # lfilesrepo.status() into recognition that such cache
466 # lfilesrepo.status() into recognition that such cache
466 # missing files are REMOVED.
467 # missing files are REMOVED.
467 lfdirstate.normallookup(lfile)
468 lfdirstate.normallookup(lfile)
468 update[lfile] = expecthash
469 update[lfile] = expecthash
469 else:
470 else:
470 # Remove lfiles for which the standin is deleted, unless the
471 # Remove lfiles for which the standin is deleted, unless the
471 # lfile is added to the repository again. This happens when a
472 # lfile is added to the repository again. This happens when a
472 # largefile is converted back to a normal file: the standin
473 # largefile is converted back to a normal file: the standin
473 # disappears, but a new (normal) file appears as the lfile.
474 # disappears, but a new (normal) file appears as the lfile.
474 if (os.path.exists(abslfile) and
475 if (os.path.exists(abslfile) and
475 repo.dirstate.normalize(lfile) not in repo[None]):
476 repo.dirstate.normalize(lfile) not in repo[None]):
476 util.unlinkpath(abslfile)
477 util.unlinkpath(abslfile)
477 removed += 1
478 removed += 1
478
479
479 # largefile processing might be slow and be interrupted - be prepared
480 # largefile processing might be slow and be interrupted - be prepared
480 lfdirstate.write()
481 lfdirstate.write()
481
482
482 if lfiles:
483 if lfiles:
483 if printmessage:
484 if printmessage:
484 ui.status(_('getting changed largefiles\n'))
485 ui.status(_('getting changed largefiles\n'))
485 cachelfiles(ui, repo, None, lfiles)
486 cachelfiles(ui, repo, None, lfiles)
486
487
487 for lfile in lfiles:
488 for lfile in lfiles:
488 update1 = 0
489 update1 = 0
489
490
490 expecthash = update.get(lfile)
491 expecthash = update.get(lfile)
491 if expecthash:
492 if expecthash:
492 if not lfutil.copyfromcache(repo, expecthash, lfile):
493 if not lfutil.copyfromcache(repo, expecthash, lfile):
493 # failed ... but already removed and set to normallookup
494 # failed ... but already removed and set to normallookup
494 continue
495 continue
495 # Synchronize largefile dirstate to the last modified
496 # Synchronize largefile dirstate to the last modified
496 # time of the file
497 # time of the file
497 lfdirstate.normal(lfile)
498 lfdirstate.normal(lfile)
498 update1 = 1
499 update1 = 1
499
500
500 # copy the state of largefile standin from the repository's
501 # copy the state of largefile standin from the repository's
501 # dirstate to its state in the lfdirstate.
502 # dirstate to its state in the lfdirstate.
502 abslfile = repo.wjoin(lfile)
503 abslfile = repo.wjoin(lfile)
503 absstandin = repo.wjoin(lfutil.standin(lfile))
504 absstandin = repo.wjoin(lfutil.standin(lfile))
504 if os.path.exists(absstandin):
505 if os.path.exists(absstandin):
505 mode = os.stat(absstandin).st_mode
506 mode = os.stat(absstandin).st_mode
506 if mode != os.stat(abslfile).st_mode:
507 if mode != os.stat(abslfile).st_mode:
507 os.chmod(abslfile, mode)
508 os.chmod(abslfile, mode)
508 update1 = 1
509 update1 = 1
509
510
510 updated += update1
511 updated += update1
511
512
512 standin = lfutil.standin(lfile)
513 standin = lfutil.standin(lfile)
513 if standin in repo.dirstate:
514 if standin in repo.dirstate:
514 stat = repo.dirstate._map[standin]
515 stat = repo.dirstate._map[standin]
515 state, mtime = stat[0], stat[3]
516 state, mtime = stat[0], stat[3]
516 else:
517 else:
517 state, mtime = '?', -1
518 state, mtime = '?', -1
518 if state == 'n':
519 if state == 'n':
519 if mtime < 0:
520 if normallookup or mtime < 0:
520 # state 'n' doesn't ensure 'clean' in this case
521 # state 'n' doesn't ensure 'clean' in this case
521 lfdirstate.normallookup(lfile)
522 lfdirstate.normallookup(lfile)
522 else:
523 else:
523 lfdirstate.normal(lfile)
524 lfdirstate.normal(lfile)
524 elif state == 'm':
525 elif state == 'm':
525 lfdirstate.normallookup(lfile)
526 lfdirstate.normallookup(lfile)
526 elif state == 'r':
527 elif state == 'r':
527 lfdirstate.remove(lfile)
528 lfdirstate.remove(lfile)
528 elif state == 'a':
529 elif state == 'a':
529 lfdirstate.add(lfile)
530 lfdirstate.add(lfile)
530 elif state == '?':
531 elif state == '?':
531 lfdirstate.drop(lfile)
532 lfdirstate.drop(lfile)
532
533
533 lfdirstate.write()
534 lfdirstate.write()
534 if printmessage and lfiles:
535 if printmessage and lfiles:
535 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
536 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
536 removed))
537 removed))
537 finally:
538 finally:
538 wlock.release()
539 wlock.release()
539
540
540 @command('lfpull',
541 @command('lfpull',
541 [('r', 'rev', [], _('pull largefiles for these revisions'))
542 [('r', 'rev', [], _('pull largefiles for these revisions'))
542 ] + commands.remoteopts,
543 ] + commands.remoteopts,
543 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
544 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
544 def lfpull(ui, repo, source="default", **opts):
545 def lfpull(ui, repo, source="default", **opts):
545 """pull largefiles for the specified revisions from the specified source
546 """pull largefiles for the specified revisions from the specified source
546
547
547 Pull largefiles that are referenced from local changesets but missing
548 Pull largefiles that are referenced from local changesets but missing
548 locally, pulling from a remote repository to the local cache.
549 locally, pulling from a remote repository to the local cache.
549
550
550 If SOURCE is omitted, the 'default' path will be used.
551 If SOURCE is omitted, the 'default' path will be used.
551 See :hg:`help urls` for more information.
552 See :hg:`help urls` for more information.
552
553
553 .. container:: verbose
554 .. container:: verbose
554
555
555 Some examples:
556 Some examples:
556
557
557 - pull largefiles for all branch heads::
558 - pull largefiles for all branch heads::
558
559
559 hg lfpull -r "head() and not closed()"
560 hg lfpull -r "head() and not closed()"
560
561
561 - pull largefiles on the default branch::
562 - pull largefiles on the default branch::
562
563
563 hg lfpull -r "branch(default)"
564 hg lfpull -r "branch(default)"
564 """
565 """
565 repo.lfpullsource = source
566 repo.lfpullsource = source
566
567
567 revs = opts.get('rev', [])
568 revs = opts.get('rev', [])
568 if not revs:
569 if not revs:
569 raise util.Abort(_('no revisions specified'))
570 raise util.Abort(_('no revisions specified'))
570 revs = scmutil.revrange(repo, revs)
571 revs = scmutil.revrange(repo, revs)
571
572
572 numcached = 0
573 numcached = 0
573 for rev in revs:
574 for rev in revs:
574 ui.note(_('pulling largefiles for revision %s\n') % rev)
575 ui.note(_('pulling largefiles for revision %s\n') % rev)
575 (cached, missing) = cachelfiles(ui, repo, rev)
576 (cached, missing) = cachelfiles(ui, repo, rev)
576 numcached += len(cached)
577 numcached += len(cached)
577 ui.status(_("%d largefiles cached\n") % numcached)
578 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,1222 +1,1228 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 archival, merge, pathutil, revset
15 archival, merge, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22 import basestore
22 import basestore
23
23
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25
25
26 def installnormalfilesmatchfn(manifest):
26 def installnormalfilesmatchfn(manifest):
27 '''installmatchfn with a matchfn that ignores all largefiles'''
27 '''installmatchfn with a matchfn that ignores all largefiles'''
28 def overridematch(ctx, pats=[], opts={}, globbed=False,
28 def overridematch(ctx, pats=[], opts={}, globbed=False,
29 default='relpath'):
29 default='relpath'):
30 match = oldmatch(ctx, pats, opts, globbed, default)
30 match = oldmatch(ctx, pats, opts, globbed, default)
31 m = copy.copy(match)
31 m = copy.copy(match)
32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 manifest)
33 manifest)
34 m._files = filter(notlfile, m._files)
34 m._files = filter(notlfile, m._files)
35 m._fmap = set(m._files)
35 m._fmap = set(m._files)
36 m._always = False
36 m._always = False
37 origmatchfn = m.matchfn
37 origmatchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(overridematch)
40 oldmatch = installmatchfn(overridematch)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 '''monkey patch the scmutil module with a custom match function.
43 '''monkey patch the scmutil module with a custom match function.
44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
45 oldmatch = scmutil.match
45 oldmatch = scmutil.match
46 setattr(f, 'oldmatch', oldmatch)
46 setattr(f, 'oldmatch', oldmatch)
47 scmutil.match = f
47 scmutil.match = f
48 return oldmatch
48 return oldmatch
49
49
50 def restorematchfn():
50 def restorematchfn():
51 '''restores scmutil.match to what it was before installmatchfn
51 '''restores scmutil.match to what it was before installmatchfn
52 was called. no-op if scmutil.match is its original function.
52 was called. no-op if scmutil.match is its original function.
53
53
54 Note that n calls to installmatchfn will require n calls to
54 Note that n calls to installmatchfn will require n calls to
55 restore matchfn to reverse'''
55 restore matchfn to reverse'''
56 scmutil.match = getattr(scmutil.match, 'oldmatch')
56 scmutil.match = getattr(scmutil.match, 'oldmatch')
57
57
58 def installmatchandpatsfn(f):
58 def installmatchandpatsfn(f):
59 oldmatchandpats = scmutil.matchandpats
59 oldmatchandpats = scmutil.matchandpats
60 setattr(f, 'oldmatchandpats', oldmatchandpats)
60 setattr(f, 'oldmatchandpats', oldmatchandpats)
61 scmutil.matchandpats = f
61 scmutil.matchandpats = f
62 return oldmatchandpats
62 return oldmatchandpats
63
63
64 def restorematchandpatsfn():
64 def restorematchandpatsfn():
65 '''restores scmutil.matchandpats to what it was before
65 '''restores scmutil.matchandpats to what it was before
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
67 is its original function.
67 is its original function.
68
68
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
70 to restore matchfn to reverse'''
70 to restore matchfn to reverse'''
71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
72 scmutil.matchandpats)
72 scmutil.matchandpats)
73
73
74 def addlargefiles(ui, repo, *pats, **opts):
74 def addlargefiles(ui, repo, *pats, **opts):
75 large = opts.pop('large', None)
75 large = opts.pop('large', None)
76 lfsize = lfutil.getminsize(
76 lfsize = lfutil.getminsize(
77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
78
78
79 lfmatcher = None
79 lfmatcher = None
80 if lfutil.islfilesrepo(repo):
80 if lfutil.islfilesrepo(repo):
81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
82 if lfpats:
82 if lfpats:
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
84
84
85 lfnames = []
85 lfnames = []
86 m = scmutil.match(repo[None], pats, opts)
86 m = scmutil.match(repo[None], pats, opts)
87 m.bad = lambda x, y: None
87 m.bad = lambda x, y: None
88 wctx = repo[None]
88 wctx = repo[None]
89 for f in repo.walk(m):
89 for f in repo.walk(m):
90 exact = m.exact(f)
90 exact = m.exact(f)
91 lfile = lfutil.standin(f) in wctx
91 lfile = lfutil.standin(f) in wctx
92 nfile = f in wctx
92 nfile = f in wctx
93 exists = lfile or nfile
93 exists = lfile or nfile
94
94
95 # Don't warn the user when they attempt to add a normal tracked file.
95 # Don't warn the user when they attempt to add a normal tracked file.
96 # The normal add code will do that for us.
96 # The normal add code will do that for us.
97 if exact and exists:
97 if exact and exists:
98 if lfile:
98 if lfile:
99 ui.warn(_('%s already a largefile\n') % f)
99 ui.warn(_('%s already a largefile\n') % f)
100 continue
100 continue
101
101
102 if (exact or not exists) and not lfutil.isstandin(f):
102 if (exact or not exists) and not lfutil.isstandin(f):
103 wfile = repo.wjoin(f)
103 wfile = repo.wjoin(f)
104
104
105 # In case the file was removed previously, but not committed
105 # In case the file was removed previously, but not committed
106 # (issue3507)
106 # (issue3507)
107 if not os.path.exists(wfile):
107 if not os.path.exists(wfile):
108 continue
108 continue
109
109
110 abovemin = (lfsize and
110 abovemin = (lfsize and
111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
112 if large or abovemin or (lfmatcher and lfmatcher(f)):
112 if large or abovemin or (lfmatcher and lfmatcher(f)):
113 lfnames.append(f)
113 lfnames.append(f)
114 if ui.verbose or not exact:
114 if ui.verbose or not exact:
115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
116
116
117 bad = []
117 bad = []
118 standins = []
118 standins = []
119
119
120 # Need to lock, otherwise there could be a race condition between
120 # Need to lock, otherwise there could be a race condition between
121 # when standins are created and added to the repo.
121 # when standins are created and added to the repo.
122 wlock = repo.wlock()
122 wlock = repo.wlock()
123 try:
123 try:
124 if not opts.get('dry_run'):
124 if not opts.get('dry_run'):
125 lfdirstate = lfutil.openlfdirstate(ui, repo)
125 lfdirstate = lfutil.openlfdirstate(ui, repo)
126 for f in lfnames:
126 for f in lfnames:
127 standinname = lfutil.standin(f)
127 standinname = lfutil.standin(f)
128 lfutil.writestandin(repo, standinname, hash='',
128 lfutil.writestandin(repo, standinname, hash='',
129 executable=lfutil.getexecutable(repo.wjoin(f)))
129 executable=lfutil.getexecutable(repo.wjoin(f)))
130 standins.append(standinname)
130 standins.append(standinname)
131 if lfdirstate[f] == 'r':
131 if lfdirstate[f] == 'r':
132 lfdirstate.normallookup(f)
132 lfdirstate.normallookup(f)
133 else:
133 else:
134 lfdirstate.add(f)
134 lfdirstate.add(f)
135 lfdirstate.write()
135 lfdirstate.write()
136 bad += [lfutil.splitstandin(f)
136 bad += [lfutil.splitstandin(f)
137 for f in repo[None].add(standins)
137 for f in repo[None].add(standins)
138 if f in m.files()]
138 if f in m.files()]
139 finally:
139 finally:
140 wlock.release()
140 wlock.release()
141 return bad
141 return bad
142
142
143 def removelargefiles(ui, repo, *pats, **opts):
143 def removelargefiles(ui, repo, *pats, **opts):
144 after = opts.get('after')
144 after = opts.get('after')
145 if not pats and not after:
145 if not pats and not after:
146 raise util.Abort(_('no files specified'))
146 raise util.Abort(_('no files specified'))
147 m = scmutil.match(repo[None], pats, opts)
147 m = scmutil.match(repo[None], pats, opts)
148 try:
148 try:
149 repo.lfstatus = True
149 repo.lfstatus = True
150 s = repo.status(match=m, clean=True)
150 s = repo.status(match=m, clean=True)
151 finally:
151 finally:
152 repo.lfstatus = False
152 repo.lfstatus = False
153 manifest = repo[None].manifest()
153 manifest = repo[None].manifest()
154 modified, added, deleted, clean = [[f for f in list
154 modified, added, deleted, clean = [[f for f in list
155 if lfutil.standin(f) in manifest]
155 if lfutil.standin(f) in manifest]
156 for list in [s[0], s[1], s[3], s[6]]]
156 for list in [s[0], s[1], s[3], s[6]]]
157
157
158 def warn(files, msg):
158 def warn(files, msg):
159 for f in files:
159 for f in files:
160 ui.warn(msg % m.rel(f))
160 ui.warn(msg % m.rel(f))
161 return int(len(files) > 0)
161 return int(len(files) > 0)
162
162
163 result = 0
163 result = 0
164
164
165 if after:
165 if after:
166 remove, forget = deleted, []
166 remove, forget = deleted, []
167 result = warn(modified + added + clean,
167 result = warn(modified + added + clean,
168 _('not removing %s: file still exists\n'))
168 _('not removing %s: file still exists\n'))
169 else:
169 else:
170 remove, forget = deleted + clean, []
170 remove, forget = deleted + clean, []
171 result = warn(modified, _('not removing %s: file is modified (use -f'
171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 ' to force removal)\n'))
172 ' to force removal)\n'))
173 result = warn(added, _('not removing %s: file has been marked for add'
173 result = warn(added, _('not removing %s: file has been marked for add'
174 ' (use forget to undo)\n')) or result
174 ' (use forget to undo)\n')) or result
175
175
176 for f in sorted(remove + forget):
176 for f in sorted(remove + forget):
177 if ui.verbose or not m.exact(f):
177 if ui.verbose or not m.exact(f):
178 ui.status(_('removing %s\n') % m.rel(f))
178 ui.status(_('removing %s\n') % m.rel(f))
179
179
180 # Need to lock because standin files are deleted then removed from the
180 # Need to lock because standin files are deleted then removed from the
181 # repository and we could race in-between.
181 # repository and we could race in-between.
182 wlock = repo.wlock()
182 wlock = repo.wlock()
183 try:
183 try:
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 for f in remove:
185 for f in remove:
186 if not after:
186 if not after:
187 # If this is being called by addremove, notify the user that we
187 # If this is being called by addremove, notify the user that we
188 # are removing the file.
188 # are removing the file.
189 if getattr(repo, "_isaddremove", False):
189 if getattr(repo, "_isaddremove", False):
190 ui.status(_('removing %s\n') % f)
190 ui.status(_('removing %s\n') % f)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 lfdirstate.remove(f)
192 lfdirstate.remove(f)
193 lfdirstate.write()
193 lfdirstate.write()
194 forget = [lfutil.standin(f) for f in forget]
194 forget = [lfutil.standin(f) for f in forget]
195 remove = [lfutil.standin(f) for f in remove]
195 remove = [lfutil.standin(f) for f in remove]
196 repo[None].forget(forget)
196 repo[None].forget(forget)
197 # If this is being called by addremove, let the original addremove
197 # If this is being called by addremove, let the original addremove
198 # function handle this.
198 # function handle this.
199 if not getattr(repo, "_isaddremove", False):
199 if not getattr(repo, "_isaddremove", False):
200 for f in remove:
200 for f in remove:
201 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
201 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
202 repo[None].forget(remove)
202 repo[None].forget(remove)
203 finally:
203 finally:
204 wlock.release()
204 wlock.release()
205
205
206 return result
206 return result
207
207
208 # For overriding mercurial.hgweb.webcommands so that largefiles will
208 # For overriding mercurial.hgweb.webcommands so that largefiles will
209 # appear at their right place in the manifests.
209 # appear at their right place in the manifests.
210 def decodepath(orig, path):
210 def decodepath(orig, path):
211 return lfutil.splitstandin(path) or path
211 return lfutil.splitstandin(path) or path
212
212
213 # -- Wrappers: modify existing commands --------------------------------
213 # -- Wrappers: modify existing commands --------------------------------
214
214
215 # Add works by going through the files that the user wanted to add and
215 # Add works by going through the files that the user wanted to add and
216 # checking if they should be added as largefiles. Then it makes a new
216 # checking if they should be added as largefiles. Then it makes a new
217 # matcher which matches only the normal files and runs the original
217 # matcher which matches only the normal files and runs the original
218 # version of add.
218 # version of add.
219 def overrideadd(orig, ui, repo, *pats, **opts):
219 def overrideadd(orig, ui, repo, *pats, **opts):
220 normal = opts.pop('normal')
220 normal = opts.pop('normal')
221 if normal:
221 if normal:
222 if opts.get('large'):
222 if opts.get('large'):
223 raise util.Abort(_('--normal cannot be used with --large'))
223 raise util.Abort(_('--normal cannot be used with --large'))
224 return orig(ui, repo, *pats, **opts)
224 return orig(ui, repo, *pats, **opts)
225 bad = addlargefiles(ui, repo, *pats, **opts)
225 bad = addlargefiles(ui, repo, *pats, **opts)
226 installnormalfilesmatchfn(repo[None].manifest())
226 installnormalfilesmatchfn(repo[None].manifest())
227 result = orig(ui, repo, *pats, **opts)
227 result = orig(ui, repo, *pats, **opts)
228 restorematchfn()
228 restorematchfn()
229
229
230 return (result == 1 or bad) and 1 or 0
230 return (result == 1 or bad) and 1 or 0
231
231
232 def overrideremove(orig, ui, repo, *pats, **opts):
232 def overrideremove(orig, ui, repo, *pats, **opts):
233 installnormalfilesmatchfn(repo[None].manifest())
233 installnormalfilesmatchfn(repo[None].manifest())
234 result = orig(ui, repo, *pats, **opts)
234 result = orig(ui, repo, *pats, **opts)
235 restorematchfn()
235 restorematchfn()
236 return removelargefiles(ui, repo, *pats, **opts) or result
236 return removelargefiles(ui, repo, *pats, **opts) or result
237
237
238 def overridestatusfn(orig, repo, rev2, **opts):
238 def overridestatusfn(orig, repo, rev2, **opts):
239 try:
239 try:
240 repo._repo.lfstatus = True
240 repo._repo.lfstatus = True
241 return orig(repo, rev2, **opts)
241 return orig(repo, rev2, **opts)
242 finally:
242 finally:
243 repo._repo.lfstatus = False
243 repo._repo.lfstatus = False
244
244
245 def overridestatus(orig, ui, repo, *pats, **opts):
245 def overridestatus(orig, ui, repo, *pats, **opts):
246 try:
246 try:
247 repo.lfstatus = True
247 repo.lfstatus = True
248 return orig(ui, repo, *pats, **opts)
248 return orig(ui, repo, *pats, **opts)
249 finally:
249 finally:
250 repo.lfstatus = False
250 repo.lfstatus = False
251
251
252 def overridedirty(orig, repo, ignoreupdate=False):
252 def overridedirty(orig, repo, ignoreupdate=False):
253 try:
253 try:
254 repo._repo.lfstatus = True
254 repo._repo.lfstatus = True
255 return orig(repo, ignoreupdate)
255 return orig(repo, ignoreupdate)
256 finally:
256 finally:
257 repo._repo.lfstatus = False
257 repo._repo.lfstatus = False
258
258
259 def overridelog(orig, ui, repo, *pats, **opts):
259 def overridelog(orig, ui, repo, *pats, **opts):
260 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
260 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
261 default='relpath'):
261 default='relpath'):
262 """Matcher that merges root directory with .hglf, suitable for log.
262 """Matcher that merges root directory with .hglf, suitable for log.
263 It is still possible to match .hglf directly.
263 It is still possible to match .hglf directly.
264 For any listed files run log on the standin too.
264 For any listed files run log on the standin too.
265 matchfn tries both the given filename and with .hglf stripped.
265 matchfn tries both the given filename and with .hglf stripped.
266 """
266 """
267 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
267 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
268 m, p = copy.copy(matchandpats)
268 m, p = copy.copy(matchandpats)
269
269
270 pats = set(p)
270 pats = set(p)
271 # TODO: handling of patterns in both cases below
271 # TODO: handling of patterns in both cases below
272 if m._cwd:
272 if m._cwd:
273 if os.path.isabs(m._cwd):
273 if os.path.isabs(m._cwd):
274 # TODO: handle largefile magic when invoked from other cwd
274 # TODO: handle largefile magic when invoked from other cwd
275 return matchandpats
275 return matchandpats
276 back = (m._cwd.count('/') + 1) * '../'
276 back = (m._cwd.count('/') + 1) * '../'
277 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
277 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
278 else:
278 else:
279 pats.update(lfutil.standin(f) for f in p)
279 pats.update(lfutil.standin(f) for f in p)
280
280
281 for i in range(0, len(m._files)):
281 for i in range(0, len(m._files)):
282 standin = lfutil.standin(m._files[i])
282 standin = lfutil.standin(m._files[i])
283 if standin in repo[ctx.node()]:
283 if standin in repo[ctx.node()]:
284 m._files[i] = standin
284 m._files[i] = standin
285 elif m._files[i] not in repo[ctx.node()]:
285 elif m._files[i] not in repo[ctx.node()]:
286 m._files.append(standin)
286 m._files.append(standin)
287 pats.add(standin)
287 pats.add(standin)
288
288
289 m._fmap = set(m._files)
289 m._fmap = set(m._files)
290 m._always = False
290 m._always = False
291 origmatchfn = m.matchfn
291 origmatchfn = m.matchfn
292 def lfmatchfn(f):
292 def lfmatchfn(f):
293 lf = lfutil.splitstandin(f)
293 lf = lfutil.splitstandin(f)
294 if lf is not None and origmatchfn(lf):
294 if lf is not None and origmatchfn(lf):
295 return True
295 return True
296 r = origmatchfn(f)
296 r = origmatchfn(f)
297 return r
297 return r
298 m.matchfn = lfmatchfn
298 m.matchfn = lfmatchfn
299
299
300 return m, pats
300 return m, pats
301
301
302 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
302 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
303 try:
303 try:
304 repo.lfstatus = True
304 repo.lfstatus = True
305 return orig(ui, repo, *pats, **opts)
305 return orig(ui, repo, *pats, **opts)
306 finally:
306 finally:
307 repo.lfstatus = False
307 repo.lfstatus = False
308 restorematchandpatsfn()
308 restorematchandpatsfn()
309
309
310 def overrideverify(orig, ui, repo, *pats, **opts):
310 def overrideverify(orig, ui, repo, *pats, **opts):
311 large = opts.pop('large', False)
311 large = opts.pop('large', False)
312 all = opts.pop('lfa', False)
312 all = opts.pop('lfa', False)
313 contents = opts.pop('lfc', False)
313 contents = opts.pop('lfc', False)
314
314
315 result = orig(ui, repo, *pats, **opts)
315 result = orig(ui, repo, *pats, **opts)
316 if large or all or contents:
316 if large or all or contents:
317 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
317 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
318 return result
318 return result
319
319
320 def overridedebugstate(orig, ui, repo, *pats, **opts):
320 def overridedebugstate(orig, ui, repo, *pats, **opts):
321 large = opts.pop('large', False)
321 large = opts.pop('large', False)
322 if large:
322 if large:
323 class fakerepo(object):
323 class fakerepo(object):
324 dirstate = lfutil.openlfdirstate(ui, repo)
324 dirstate = lfutil.openlfdirstate(ui, repo)
325 orig(ui, fakerepo, *pats, **opts)
325 orig(ui, fakerepo, *pats, **opts)
326 else:
326 else:
327 orig(ui, repo, *pats, **opts)
327 orig(ui, repo, *pats, **opts)
328
328
329 # Override needs to refresh standins so that update's normal merge
329 # Override needs to refresh standins so that update's normal merge
330 # will go through properly. Then the other update hook (overriding repo.update)
330 # will go through properly. Then the other update hook (overriding repo.update)
331 # will get the new files. Filemerge is also overridden so that the merge
331 # will get the new files. Filemerge is also overridden so that the merge
332 # will merge standins correctly.
332 # will merge standins correctly.
333 def overrideupdate(orig, ui, repo, *pats, **opts):
333 def overrideupdate(orig, ui, repo, *pats, **opts):
334 # Need to lock between the standins getting updated and their
334 # Need to lock between the standins getting updated and their
335 # largefiles getting updated
335 # largefiles getting updated
336 wlock = repo.wlock()
336 wlock = repo.wlock()
337 try:
337 try:
338 lfdirstate = lfutil.openlfdirstate(ui, repo)
338 lfdirstate = lfutil.openlfdirstate(ui, repo)
339 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
339 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
340 [], False, False, False)
340 [], False, False, False)
341 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
341 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
342
342
343 if opts['check']:
343 if opts['check']:
344 mod = len(modified) > 0
344 mod = len(modified) > 0
345 for lfile in unsure:
345 for lfile in unsure:
346 standin = lfutil.standin(lfile)
346 standin = lfutil.standin(lfile)
347 if repo['.'][standin].data().strip() != \
347 if repo['.'][standin].data().strip() != \
348 lfutil.hashfile(repo.wjoin(lfile)):
348 lfutil.hashfile(repo.wjoin(lfile)):
349 mod = True
349 mod = True
350 else:
350 else:
351 lfdirstate.normal(lfile)
351 lfdirstate.normal(lfile)
352 lfdirstate.write()
352 lfdirstate.write()
353 if mod:
353 if mod:
354 raise util.Abort(_('uncommitted changes'))
354 raise util.Abort(_('uncommitted changes'))
355 # XXX handle removed differently
355 # XXX handle removed differently
356 if not opts['clean']:
356 if not opts['clean']:
357 for lfile in unsure + modified + added:
357 for lfile in unsure + modified + added:
358 lfutil.updatestandin(repo, lfutil.standin(lfile))
358 lfutil.updatestandin(repo, lfutil.standin(lfile))
359 return orig(ui, repo, *pats, **opts)
359 return orig(ui, repo, *pats, **opts)
360 finally:
360 finally:
361 wlock.release()
361 wlock.release()
362
362
363 # Before starting the manifest merge, merge.updates will call
363 # Before starting the manifest merge, merge.updates will call
364 # _checkunknown to check if there are any files in the merged-in
364 # _checkunknown to check if there are any files in the merged-in
365 # changeset that collide with unknown files in the working copy.
365 # changeset that collide with unknown files in the working copy.
366 #
366 #
367 # The largefiles are seen as unknown, so this prevents us from merging
367 # The largefiles are seen as unknown, so this prevents us from merging
368 # in a file 'foo' if we already have a largefile with the same name.
368 # in a file 'foo' if we already have a largefile with the same name.
369 #
369 #
370 # The overridden function filters the unknown files by removing any
370 # The overridden function filters the unknown files by removing any
371 # largefiles. This makes the merge proceed and we can then handle this
371 # largefiles. This makes the merge proceed and we can then handle this
372 # case further in the overridden manifestmerge function below.
372 # case further in the overridden manifestmerge function below.
373 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
373 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
374 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
374 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
375 return False
375 return False
376 return origfn(repo, wctx, mctx, f)
376 return origfn(repo, wctx, mctx, f)
377
377
378 # The manifest merge handles conflicts on the manifest level. We want
378 # The manifest merge handles conflicts on the manifest level. We want
379 # to handle changes in largefile-ness of files at this level too.
379 # to handle changes in largefile-ness of files at this level too.
380 #
380 #
381 # The strategy is to run the original manifestmerge and then process
381 # The strategy is to run the original manifestmerge and then process
382 # the action list it outputs. There are two cases we need to deal with:
382 # the action list it outputs. There are two cases we need to deal with:
383 #
383 #
384 # 1. Normal file in p1, largefile in p2. Here the largefile is
384 # 1. Normal file in p1, largefile in p2. Here the largefile is
385 # detected via its standin file, which will enter the working copy
385 # detected via its standin file, which will enter the working copy
386 # with a "get" action. It is not "merge" since the standin is all
386 # with a "get" action. It is not "merge" since the standin is all
387 # Mercurial is concerned with at this level -- the link to the
387 # Mercurial is concerned with at this level -- the link to the
388 # existing normal file is not relevant here.
388 # existing normal file is not relevant here.
389 #
389 #
390 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
390 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
391 # since the largefile will be present in the working copy and
391 # since the largefile will be present in the working copy and
392 # different from the normal file in p2. Mercurial therefore
392 # different from the normal file in p2. Mercurial therefore
393 # triggers a merge action.
393 # triggers a merge action.
394 #
394 #
395 # In both cases, we prompt the user and emit new actions to either
395 # In both cases, we prompt the user and emit new actions to either
396 # remove the standin (if the normal file was kept) or to remove the
396 # remove the standin (if the normal file was kept) or to remove the
397 # normal file and get the standin (if the largefile was kept). The
397 # normal file and get the standin (if the largefile was kept). The
398 # default prompt answer is to use the largefile version since it was
398 # default prompt answer is to use the largefile version since it was
399 # presumably changed on purpose.
399 # presumably changed on purpose.
400 #
400 #
401 # Finally, the merge.applyupdates function will then take care of
401 # Finally, the merge.applyupdates function will then take care of
402 # writing the files into the working copy and lfcommands.updatelfiles
402 # writing the files into the working copy and lfcommands.updatelfiles
403 # will update the largefiles.
403 # will update the largefiles.
404 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
404 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
405 partial, acceptremote, followcopies):
405 partial, acceptremote, followcopies):
406 overwrite = force and not branchmerge
406 overwrite = force and not branchmerge
407 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
407 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
408 acceptremote, followcopies)
408 acceptremote, followcopies)
409
409
410 if overwrite:
410 if overwrite:
411 return actions
411 return actions
412
412
413 removes = set(a[0] for a in actions['r'])
413 removes = set(a[0] for a in actions['r'])
414
414
415 newglist = []
415 newglist = []
416 for action in actions['g']:
416 for action in actions['g']:
417 f, args, msg = action
417 f, args, msg = action
418 splitstandin = f and lfutil.splitstandin(f)
418 splitstandin = f and lfutil.splitstandin(f)
419 if (splitstandin is not None and
419 if (splitstandin is not None and
420 splitstandin in p1 and splitstandin not in removes):
420 splitstandin in p1 and splitstandin not in removes):
421 # Case 1: normal file in the working copy, largefile in
421 # Case 1: normal file in the working copy, largefile in
422 # the second parent
422 # the second parent
423 lfile = splitstandin
423 lfile = splitstandin
424 standin = f
424 standin = f
425 msg = _('remote turned local normal file %s into a largefile\n'
425 msg = _('remote turned local normal file %s into a largefile\n'
426 'use (l)argefile or keep (n)ormal file?'
426 'use (l)argefile or keep (n)ormal file?'
427 '$$ &Largefile $$ &Normal file') % lfile
427 '$$ &Largefile $$ &Normal file') % lfile
428 if repo.ui.promptchoice(msg, 0) == 0:
428 if repo.ui.promptchoice(msg, 0) == 0:
429 actions['r'].append((lfile, None, msg))
429 actions['r'].append((lfile, None, msg))
430 newglist.append((standin, (p2.flags(standin),), msg))
430 newglist.append((standin, (p2.flags(standin),), msg))
431 else:
431 else:
432 actions['r'].append((standin, None, msg))
432 actions['r'].append((standin, None, msg))
433 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
433 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
434 # Case 2: largefile in the working copy, normal file in
434 # Case 2: largefile in the working copy, normal file in
435 # the second parent
435 # the second parent
436 standin = lfutil.standin(f)
436 standin = lfutil.standin(f)
437 lfile = f
437 lfile = f
438 msg = _('remote turned local largefile %s into a normal file\n'
438 msg = _('remote turned local largefile %s into a normal file\n'
439 'keep (l)argefile or use (n)ormal file?'
439 'keep (l)argefile or use (n)ormal file?'
440 '$$ &Largefile $$ &Normal file') % lfile
440 '$$ &Largefile $$ &Normal file') % lfile
441 if repo.ui.promptchoice(msg, 0) == 0:
441 if repo.ui.promptchoice(msg, 0) == 0:
442 actions['r'].append((lfile, None, msg))
442 actions['r'].append((lfile, None, msg))
443 else:
443 else:
444 actions['r'].append((standin, None, msg))
444 actions['r'].append((standin, None, msg))
445 newglist.append((lfile, (p2.flags(lfile),), msg))
445 newglist.append((lfile, (p2.flags(lfile),), msg))
446 else:
446 else:
447 newglist.append(action)
447 newglist.append(action)
448
448
449 newglist.sort()
449 newglist.sort()
450 actions['g'] = newglist
450 actions['g'] = newglist
451
451
452 return actions
452 return actions
453
453
454 # Override filemerge to prompt the user about how they wish to merge
454 # Override filemerge to prompt the user about how they wish to merge
455 # largefiles. This will handle identical edits without prompting the user.
455 # largefiles. This will handle identical edits without prompting the user.
456 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
456 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
457 if not lfutil.isstandin(orig):
457 if not lfutil.isstandin(orig):
458 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
458 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
459
459
460 ahash = fca.data().strip().lower()
460 ahash = fca.data().strip().lower()
461 dhash = fcd.data().strip().lower()
461 dhash = fcd.data().strip().lower()
462 ohash = fco.data().strip().lower()
462 ohash = fco.data().strip().lower()
463 if (ohash != ahash and
463 if (ohash != ahash and
464 ohash != dhash and
464 ohash != dhash and
465 (dhash == ahash or
465 (dhash == ahash or
466 repo.ui.promptchoice(
466 repo.ui.promptchoice(
467 _('largefile %s has a merge conflict\nancestor was %s\n'
467 _('largefile %s has a merge conflict\nancestor was %s\n'
468 'keep (l)ocal %s or\ntake (o)ther %s?'
468 'keep (l)ocal %s or\ntake (o)ther %s?'
469 '$$ &Local $$ &Other') %
469 '$$ &Local $$ &Other') %
470 (lfutil.splitstandin(orig), ahash, dhash, ohash),
470 (lfutil.splitstandin(orig), ahash, dhash, ohash),
471 0) == 1)):
471 0) == 1)):
472 repo.wwrite(fcd.path(), fco.data(), fco.flags())
472 repo.wwrite(fcd.path(), fco.data(), fco.flags())
473 return 0
473 return 0
474
474
475 # Copy first changes the matchers to match standins instead of
475 # Copy first changes the matchers to match standins instead of
476 # largefiles. Then it overrides util.copyfile in that function it
476 # largefiles. Then it overrides util.copyfile in that function it
477 # checks if the destination largefile already exists. It also keeps a
477 # checks if the destination largefile already exists. It also keeps a
478 # list of copied files so that the largefiles can be copied and the
478 # list of copied files so that the largefiles can be copied and the
479 # dirstate updated.
479 # dirstate updated.
480 def overridecopy(orig, ui, repo, pats, opts, rename=False):
480 def overridecopy(orig, ui, repo, pats, opts, rename=False):
481 # doesn't remove largefile on rename
481 # doesn't remove largefile on rename
482 if len(pats) < 2:
482 if len(pats) < 2:
483 # this isn't legal, let the original function deal with it
483 # this isn't legal, let the original function deal with it
484 return orig(ui, repo, pats, opts, rename)
484 return orig(ui, repo, pats, opts, rename)
485
485
486 def makestandin(relpath):
486 def makestandin(relpath):
487 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
487 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
488 return os.path.join(repo.wjoin(lfutil.standin(path)))
488 return os.path.join(repo.wjoin(lfutil.standin(path)))
489
489
490 fullpats = scmutil.expandpats(pats)
490 fullpats = scmutil.expandpats(pats)
491 dest = fullpats[-1]
491 dest = fullpats[-1]
492
492
493 if os.path.isdir(dest):
493 if os.path.isdir(dest):
494 if not os.path.isdir(makestandin(dest)):
494 if not os.path.isdir(makestandin(dest)):
495 os.makedirs(makestandin(dest))
495 os.makedirs(makestandin(dest))
496 # This could copy both lfiles and normal files in one command,
496 # This could copy both lfiles and normal files in one command,
497 # but we don't want to do that. First replace their matcher to
497 # but we don't want to do that. First replace their matcher to
498 # only match normal files and run it, then replace it to just
498 # only match normal files and run it, then replace it to just
499 # match largefiles and run it again.
499 # match largefiles and run it again.
500 nonormalfiles = False
500 nonormalfiles = False
501 nolfiles = False
501 nolfiles = False
502 installnormalfilesmatchfn(repo[None].manifest())
502 installnormalfilesmatchfn(repo[None].manifest())
503 try:
503 try:
504 try:
504 try:
505 result = orig(ui, repo, pats, opts, rename)
505 result = orig(ui, repo, pats, opts, rename)
506 except util.Abort, e:
506 except util.Abort, e:
507 if str(e) != _('no files to copy'):
507 if str(e) != _('no files to copy'):
508 raise e
508 raise e
509 else:
509 else:
510 nonormalfiles = True
510 nonormalfiles = True
511 result = 0
511 result = 0
512 finally:
512 finally:
513 restorematchfn()
513 restorematchfn()
514
514
515 # The first rename can cause our current working directory to be removed.
515 # The first rename can cause our current working directory to be removed.
516 # In that case there is nothing left to copy/rename so just quit.
516 # In that case there is nothing left to copy/rename so just quit.
517 try:
517 try:
518 repo.getcwd()
518 repo.getcwd()
519 except OSError:
519 except OSError:
520 return result
520 return result
521
521
522 try:
522 try:
523 try:
523 try:
524 # When we call orig below it creates the standins but we don't add
524 # When we call orig below it creates the standins but we don't add
525 # them to the dir state until later so lock during that time.
525 # them to the dir state until later so lock during that time.
526 wlock = repo.wlock()
526 wlock = repo.wlock()
527
527
528 manifest = repo[None].manifest()
528 manifest = repo[None].manifest()
529 def overridematch(ctx, pats=[], opts={}, globbed=False,
529 def overridematch(ctx, pats=[], opts={}, globbed=False,
530 default='relpath'):
530 default='relpath'):
531 newpats = []
531 newpats = []
532 # The patterns were previously mangled to add the standin
532 # The patterns were previously mangled to add the standin
533 # directory; we need to remove that now
533 # directory; we need to remove that now
534 for pat in pats:
534 for pat in pats:
535 if match_.patkind(pat) is None and lfutil.shortname in pat:
535 if match_.patkind(pat) is None and lfutil.shortname in pat:
536 newpats.append(pat.replace(lfutil.shortname, ''))
536 newpats.append(pat.replace(lfutil.shortname, ''))
537 else:
537 else:
538 newpats.append(pat)
538 newpats.append(pat)
539 match = oldmatch(ctx, newpats, opts, globbed, default)
539 match = oldmatch(ctx, newpats, opts, globbed, default)
540 m = copy.copy(match)
540 m = copy.copy(match)
541 lfile = lambda f: lfutil.standin(f) in manifest
541 lfile = lambda f: lfutil.standin(f) in manifest
542 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
542 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
543 m._fmap = set(m._files)
543 m._fmap = set(m._files)
544 m._always = False
544 m._always = False
545 origmatchfn = m.matchfn
545 origmatchfn = m.matchfn
546 m.matchfn = lambda f: (lfutil.isstandin(f) and
546 m.matchfn = lambda f: (lfutil.isstandin(f) and
547 (f in manifest) and
547 (f in manifest) and
548 origmatchfn(lfutil.splitstandin(f)) or
548 origmatchfn(lfutil.splitstandin(f)) or
549 None)
549 None)
550 return m
550 return m
551 oldmatch = installmatchfn(overridematch)
551 oldmatch = installmatchfn(overridematch)
552 listpats = []
552 listpats = []
553 for pat in pats:
553 for pat in pats:
554 if match_.patkind(pat) is not None:
554 if match_.patkind(pat) is not None:
555 listpats.append(pat)
555 listpats.append(pat)
556 else:
556 else:
557 listpats.append(makestandin(pat))
557 listpats.append(makestandin(pat))
558
558
559 try:
559 try:
560 origcopyfile = util.copyfile
560 origcopyfile = util.copyfile
561 copiedfiles = []
561 copiedfiles = []
562 def overridecopyfile(src, dest):
562 def overridecopyfile(src, dest):
563 if (lfutil.shortname in src and
563 if (lfutil.shortname in src and
564 dest.startswith(repo.wjoin(lfutil.shortname))):
564 dest.startswith(repo.wjoin(lfutil.shortname))):
565 destlfile = dest.replace(lfutil.shortname, '')
565 destlfile = dest.replace(lfutil.shortname, '')
566 if not opts['force'] and os.path.exists(destlfile):
566 if not opts['force'] and os.path.exists(destlfile):
567 raise IOError('',
567 raise IOError('',
568 _('destination largefile already exists'))
568 _('destination largefile already exists'))
569 copiedfiles.append((src, dest))
569 copiedfiles.append((src, dest))
570 origcopyfile(src, dest)
570 origcopyfile(src, dest)
571
571
572 util.copyfile = overridecopyfile
572 util.copyfile = overridecopyfile
573 result += orig(ui, repo, listpats, opts, rename)
573 result += orig(ui, repo, listpats, opts, rename)
574 finally:
574 finally:
575 util.copyfile = origcopyfile
575 util.copyfile = origcopyfile
576
576
577 lfdirstate = lfutil.openlfdirstate(ui, repo)
577 lfdirstate = lfutil.openlfdirstate(ui, repo)
578 for (src, dest) in copiedfiles:
578 for (src, dest) in copiedfiles:
579 if (lfutil.shortname in src and
579 if (lfutil.shortname in src and
580 dest.startswith(repo.wjoin(lfutil.shortname))):
580 dest.startswith(repo.wjoin(lfutil.shortname))):
581 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
581 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
582 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
582 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
583 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
583 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
584 if not os.path.isdir(destlfiledir):
584 if not os.path.isdir(destlfiledir):
585 os.makedirs(destlfiledir)
585 os.makedirs(destlfiledir)
586 if rename:
586 if rename:
587 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
587 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
588
588
589 # The file is gone, but this deletes any empty parent
589 # The file is gone, but this deletes any empty parent
590 # directories as a side-effect.
590 # directories as a side-effect.
591 util.unlinkpath(repo.wjoin(srclfile), True)
591 util.unlinkpath(repo.wjoin(srclfile), True)
592 lfdirstate.remove(srclfile)
592 lfdirstate.remove(srclfile)
593 else:
593 else:
594 util.copyfile(repo.wjoin(srclfile),
594 util.copyfile(repo.wjoin(srclfile),
595 repo.wjoin(destlfile))
595 repo.wjoin(destlfile))
596
596
597 lfdirstate.add(destlfile)
597 lfdirstate.add(destlfile)
598 lfdirstate.write()
598 lfdirstate.write()
599 except util.Abort, e:
599 except util.Abort, e:
600 if str(e) != _('no files to copy'):
600 if str(e) != _('no files to copy'):
601 raise e
601 raise e
602 else:
602 else:
603 nolfiles = True
603 nolfiles = True
604 finally:
604 finally:
605 restorematchfn()
605 restorematchfn()
606 wlock.release()
606 wlock.release()
607
607
608 if nolfiles and nonormalfiles:
608 if nolfiles and nonormalfiles:
609 raise util.Abort(_('no files to copy'))
609 raise util.Abort(_('no files to copy'))
610
610
611 return result
611 return result
612
612
613 # When the user calls revert, we have to be careful to not revert any
613 # When the user calls revert, we have to be careful to not revert any
614 # changes to other largefiles accidentally. This means we have to keep
614 # changes to other largefiles accidentally. This means we have to keep
615 # track of the largefiles that are being reverted so we only pull down
615 # track of the largefiles that are being reverted so we only pull down
616 # the necessary largefiles.
616 # the necessary largefiles.
617 #
617 #
618 # Standins are only updated (to match the hash of largefiles) before
618 # Standins are only updated (to match the hash of largefiles) before
619 # commits. Update the standins then run the original revert, changing
619 # commits. Update the standins then run the original revert, changing
620 # the matcher to hit standins instead of largefiles. Based on the
620 # the matcher to hit standins instead of largefiles. Based on the
621 # resulting standins update the largefiles.
621 # resulting standins update the largefiles.
622 def overriderevert(orig, ui, repo, *pats, **opts):
622 def overriderevert(orig, ui, repo, *pats, **opts):
623 # Because we put the standins in a bad state (by updating them)
623 # Because we put the standins in a bad state (by updating them)
624 # and then return them to a correct state we need to lock to
624 # and then return them to a correct state we need to lock to
625 # prevent others from changing them in their incorrect state.
625 # prevent others from changing them in their incorrect state.
626 wlock = repo.wlock()
626 wlock = repo.wlock()
627 try:
627 try:
628 lfdirstate = lfutil.openlfdirstate(ui, repo)
628 lfdirstate = lfutil.openlfdirstate(ui, repo)
629 (modified, added, removed, missing, unknown, ignored, clean) = \
629 (modified, added, removed, missing, unknown, ignored, clean) = \
630 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
630 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
631 lfdirstate.write()
631 lfdirstate.write()
632 for lfile in modified:
632 for lfile in modified:
633 lfutil.updatestandin(repo, lfutil.standin(lfile))
633 lfutil.updatestandin(repo, lfutil.standin(lfile))
634 for lfile in missing:
634 for lfile in missing:
635 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
635 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
636 os.unlink(repo.wjoin(lfutil.standin(lfile)))
636 os.unlink(repo.wjoin(lfutil.standin(lfile)))
637
637
638 oldstandins = lfutil.getstandinsstate(repo)
638 oldstandins = lfutil.getstandinsstate(repo)
639
639
640 def overridematch(ctx, pats=[], opts={}, globbed=False,
640 def overridematch(ctx, pats=[], opts={}, globbed=False,
641 default='relpath'):
641 default='relpath'):
642 match = oldmatch(ctx, pats, opts, globbed, default)
642 match = oldmatch(ctx, pats, opts, globbed, default)
643 m = copy.copy(match)
643 m = copy.copy(match)
644 def tostandin(f):
644 def tostandin(f):
645 if lfutil.standin(f) in ctx:
645 if lfutil.standin(f) in ctx:
646 return lfutil.standin(f)
646 return lfutil.standin(f)
647 elif lfutil.standin(f) in repo[None]:
647 elif lfutil.standin(f) in repo[None]:
648 return None
648 return None
649 return f
649 return f
650 m._files = [tostandin(f) for f in m._files]
650 m._files = [tostandin(f) for f in m._files]
651 m._files = [f for f in m._files if f is not None]
651 m._files = [f for f in m._files if f is not None]
652 m._fmap = set(m._files)
652 m._fmap = set(m._files)
653 m._always = False
653 m._always = False
654 origmatchfn = m.matchfn
654 origmatchfn = m.matchfn
655 def matchfn(f):
655 def matchfn(f):
656 if lfutil.isstandin(f):
656 if lfutil.isstandin(f):
657 return (origmatchfn(lfutil.splitstandin(f)) and
657 return (origmatchfn(lfutil.splitstandin(f)) and
658 (f in repo[None] or f in ctx))
658 (f in repo[None] or f in ctx))
659 return origmatchfn(f)
659 return origmatchfn(f)
660 m.matchfn = matchfn
660 m.matchfn = matchfn
661 return m
661 return m
662 oldmatch = installmatchfn(overridematch)
662 oldmatch = installmatchfn(overridematch)
663 try:
663 try:
664 orig(ui, repo, *pats, **opts)
664 orig(ui, repo, *pats, **opts)
665 finally:
665 finally:
666 restorematchfn()
666 restorematchfn()
667
667
668 newstandins = lfutil.getstandinsstate(repo)
668 newstandins = lfutil.getstandinsstate(repo)
669 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
669 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
670 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False)
670 # lfdirstate should be 'normallookup'-ed for updated files,
671 # because reverting doesn't touch dirstate for 'normal' files
672 # when target revision is explicitly specified: in such case,
673 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
674 # of target (standin) file.
675 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
676 normallookup=True)
671
677
672 finally:
678 finally:
673 wlock.release()
679 wlock.release()
674
680
675 def hgupdaterepo(orig, repo, node, overwrite):
681 def hgupdaterepo(orig, repo, node, overwrite):
676 if not overwrite:
682 if not overwrite:
677 # Only call updatelfiles on the standins that have changed to save time
683 # Only call updatelfiles on the standins that have changed to save time
678 oldstandins = lfutil.getstandinsstate(repo)
684 oldstandins = lfutil.getstandinsstate(repo)
679
685
680 result = orig(repo, node, overwrite)
686 result = orig(repo, node, overwrite)
681
687
682 filelist = None
688 filelist = None
683 if not overwrite:
689 if not overwrite:
684 newstandins = lfutil.getstandinsstate(repo)
690 newstandins = lfutil.getstandinsstate(repo)
685 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
691 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
686 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
692 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
687 return result
693 return result
688
694
689 def hgmerge(orig, repo, node, force=None, remind=True):
695 def hgmerge(orig, repo, node, force=None, remind=True):
690 result = orig(repo, node, force, remind)
696 result = orig(repo, node, force, remind)
691 lfcommands.updatelfiles(repo.ui, repo)
697 lfcommands.updatelfiles(repo.ui, repo)
692 return result
698 return result
693
699
694 # When we rebase a repository with remotely changed largefiles, we need to
700 # When we rebase a repository with remotely changed largefiles, we need to
695 # take some extra care so that the largefiles are correctly updated in the
701 # take some extra care so that the largefiles are correctly updated in the
696 # working copy
702 # working copy
697 def overridepull(orig, ui, repo, source=None, **opts):
703 def overridepull(orig, ui, repo, source=None, **opts):
698 revsprepull = len(repo)
704 revsprepull = len(repo)
699 if not source:
705 if not source:
700 source = 'default'
706 source = 'default'
701 repo.lfpullsource = source
707 repo.lfpullsource = source
702 if opts.get('rebase', False):
708 if opts.get('rebase', False):
703 repo._isrebasing = True
709 repo._isrebasing = True
704 try:
710 try:
705 if opts.get('update'):
711 if opts.get('update'):
706 del opts['update']
712 del opts['update']
707 ui.debug('--update and --rebase are not compatible, ignoring '
713 ui.debug('--update and --rebase are not compatible, ignoring '
708 'the update flag\n')
714 'the update flag\n')
709 del opts['rebase']
715 del opts['rebase']
710 origpostincoming = commands.postincoming
716 origpostincoming = commands.postincoming
711 def _dummy(*args, **kwargs):
717 def _dummy(*args, **kwargs):
712 pass
718 pass
713 commands.postincoming = _dummy
719 commands.postincoming = _dummy
714 try:
720 try:
715 result = commands.pull(ui, repo, source, **opts)
721 result = commands.pull(ui, repo, source, **opts)
716 finally:
722 finally:
717 commands.postincoming = origpostincoming
723 commands.postincoming = origpostincoming
718 revspostpull = len(repo)
724 revspostpull = len(repo)
719 if revspostpull > revsprepull:
725 if revspostpull > revsprepull:
720 result = result or rebase.rebase(ui, repo)
726 result = result or rebase.rebase(ui, repo)
721 finally:
727 finally:
722 repo._isrebasing = False
728 repo._isrebasing = False
723 else:
729 else:
724 result = orig(ui, repo, source, **opts)
730 result = orig(ui, repo, source, **opts)
725 revspostpull = len(repo)
731 revspostpull = len(repo)
726 lfrevs = opts.get('lfrev', [])
732 lfrevs = opts.get('lfrev', [])
727 if opts.get('all_largefiles'):
733 if opts.get('all_largefiles'):
728 lfrevs.append('pulled()')
734 lfrevs.append('pulled()')
729 if lfrevs and revspostpull > revsprepull:
735 if lfrevs and revspostpull > revsprepull:
730 numcached = 0
736 numcached = 0
731 repo.firstpulled = revsprepull # for pulled() revset expression
737 repo.firstpulled = revsprepull # for pulled() revset expression
732 try:
738 try:
733 for rev in scmutil.revrange(repo, lfrevs):
739 for rev in scmutil.revrange(repo, lfrevs):
734 ui.note(_('pulling largefiles for revision %s\n') % rev)
740 ui.note(_('pulling largefiles for revision %s\n') % rev)
735 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
741 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
736 numcached += len(cached)
742 numcached += len(cached)
737 finally:
743 finally:
738 del repo.firstpulled
744 del repo.firstpulled
739 ui.status(_("%d largefiles cached\n") % numcached)
745 ui.status(_("%d largefiles cached\n") % numcached)
740 return result
746 return result
741
747
742 def pulledrevsetsymbol(repo, subset, x):
748 def pulledrevsetsymbol(repo, subset, x):
743 """``pulled()``
749 """``pulled()``
744 Changesets that just has been pulled.
750 Changesets that just has been pulled.
745
751
746 Only available with largefiles from pull --lfrev expressions.
752 Only available with largefiles from pull --lfrev expressions.
747
753
748 .. container:: verbose
754 .. container:: verbose
749
755
750 Some examples:
756 Some examples:
751
757
752 - pull largefiles for all new changesets::
758 - pull largefiles for all new changesets::
753
759
754 hg pull -lfrev "pulled()"
760 hg pull -lfrev "pulled()"
755
761
756 - pull largefiles for all new branch heads::
762 - pull largefiles for all new branch heads::
757
763
758 hg pull -lfrev "head(pulled()) and not closed()"
764 hg pull -lfrev "head(pulled()) and not closed()"
759
765
760 """
766 """
761
767
762 try:
768 try:
763 firstpulled = repo.firstpulled
769 firstpulled = repo.firstpulled
764 except AttributeError:
770 except AttributeError:
765 raise util.Abort(_("pulled() only available in --lfrev"))
771 raise util.Abort(_("pulled() only available in --lfrev"))
766 return revset.baseset([r for r in subset if r >= firstpulled])
772 return revset.baseset([r for r in subset if r >= firstpulled])
767
773
768 def overrideclone(orig, ui, source, dest=None, **opts):
774 def overrideclone(orig, ui, source, dest=None, **opts):
769 d = dest
775 d = dest
770 if d is None:
776 if d is None:
771 d = hg.defaultdest(source)
777 d = hg.defaultdest(source)
772 if opts.get('all_largefiles') and not hg.islocal(d):
778 if opts.get('all_largefiles') and not hg.islocal(d):
773 raise util.Abort(_(
779 raise util.Abort(_(
774 '--all-largefiles is incompatible with non-local destination %s') %
780 '--all-largefiles is incompatible with non-local destination %s') %
775 d)
781 d)
776
782
777 return orig(ui, source, dest, **opts)
783 return orig(ui, source, dest, **opts)
778
784
779 def hgclone(orig, ui, opts, *args, **kwargs):
785 def hgclone(orig, ui, opts, *args, **kwargs):
780 result = orig(ui, opts, *args, **kwargs)
786 result = orig(ui, opts, *args, **kwargs)
781
787
782 if result is not None:
788 if result is not None:
783 sourcerepo, destrepo = result
789 sourcerepo, destrepo = result
784 repo = destrepo.local()
790 repo = destrepo.local()
785
791
786 # Caching is implicitly limited to 'rev' option, since the dest repo was
792 # Caching is implicitly limited to 'rev' option, since the dest repo was
787 # truncated at that point. The user may expect a download count with
793 # truncated at that point. The user may expect a download count with
788 # this option, so attempt whether or not this is a largefile repo.
794 # this option, so attempt whether or not this is a largefile repo.
789 if opts.get('all_largefiles'):
795 if opts.get('all_largefiles'):
790 success, missing = lfcommands.downloadlfiles(ui, repo, None)
796 success, missing = lfcommands.downloadlfiles(ui, repo, None)
791
797
792 if missing != 0:
798 if missing != 0:
793 return None
799 return None
794
800
795 return result
801 return result
796
802
797 def overriderebase(orig, ui, repo, **opts):
803 def overriderebase(orig, ui, repo, **opts):
798 repo._isrebasing = True
804 repo._isrebasing = True
799 try:
805 try:
800 return orig(ui, repo, **opts)
806 return orig(ui, repo, **opts)
801 finally:
807 finally:
802 repo._isrebasing = False
808 repo._isrebasing = False
803
809
804 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
810 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
805 prefix=None, mtime=None, subrepos=None):
811 prefix=None, mtime=None, subrepos=None):
806 # No need to lock because we are only reading history and
812 # No need to lock because we are only reading history and
807 # largefile caches, neither of which are modified.
813 # largefile caches, neither of which are modified.
808 lfcommands.cachelfiles(repo.ui, repo, node)
814 lfcommands.cachelfiles(repo.ui, repo, node)
809
815
810 if kind not in archival.archivers:
816 if kind not in archival.archivers:
811 raise util.Abort(_("unknown archive type '%s'") % kind)
817 raise util.Abort(_("unknown archive type '%s'") % kind)
812
818
813 ctx = repo[node]
819 ctx = repo[node]
814
820
815 if kind == 'files':
821 if kind == 'files':
816 if prefix:
822 if prefix:
817 raise util.Abort(
823 raise util.Abort(
818 _('cannot give prefix when archiving to files'))
824 _('cannot give prefix when archiving to files'))
819 else:
825 else:
820 prefix = archival.tidyprefix(dest, kind, prefix)
826 prefix = archival.tidyprefix(dest, kind, prefix)
821
827
822 def write(name, mode, islink, getdata):
828 def write(name, mode, islink, getdata):
823 if matchfn and not matchfn(name):
829 if matchfn and not matchfn(name):
824 return
830 return
825 data = getdata()
831 data = getdata()
826 if decode:
832 if decode:
827 data = repo.wwritedata(name, data)
833 data = repo.wwritedata(name, data)
828 archiver.addfile(prefix + name, mode, islink, data)
834 archiver.addfile(prefix + name, mode, islink, data)
829
835
830 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
836 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
831
837
832 if repo.ui.configbool("ui", "archivemeta", True):
838 if repo.ui.configbool("ui", "archivemeta", True):
833 def metadata():
839 def metadata():
834 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
840 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
835 hex(repo.changelog.node(0)), hex(node), ctx.branch())
841 hex(repo.changelog.node(0)), hex(node), ctx.branch())
836
842
837 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
843 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
838 if repo.tagtype(t) == 'global')
844 if repo.tagtype(t) == 'global')
839 if not tags:
845 if not tags:
840 repo.ui.pushbuffer()
846 repo.ui.pushbuffer()
841 opts = {'template': '{latesttag}\n{latesttagdistance}',
847 opts = {'template': '{latesttag}\n{latesttagdistance}',
842 'style': '', 'patch': None, 'git': None}
848 'style': '', 'patch': None, 'git': None}
843 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
849 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
844 ltags, dist = repo.ui.popbuffer().split('\n')
850 ltags, dist = repo.ui.popbuffer().split('\n')
845 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
851 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
846 tags += 'latesttagdistance: %s\n' % dist
852 tags += 'latesttagdistance: %s\n' % dist
847
853
848 return base + tags
854 return base + tags
849
855
850 write('.hg_archival.txt', 0644, False, metadata)
856 write('.hg_archival.txt', 0644, False, metadata)
851
857
852 for f in ctx:
858 for f in ctx:
853 ff = ctx.flags(f)
859 ff = ctx.flags(f)
854 getdata = ctx[f].data
860 getdata = ctx[f].data
855 if lfutil.isstandin(f):
861 if lfutil.isstandin(f):
856 path = lfutil.findfile(repo, getdata().strip())
862 path = lfutil.findfile(repo, getdata().strip())
857 if path is None:
863 if path is None:
858 raise util.Abort(
864 raise util.Abort(
859 _('largefile %s not found in repo store or system cache')
865 _('largefile %s not found in repo store or system cache')
860 % lfutil.splitstandin(f))
866 % lfutil.splitstandin(f))
861 f = lfutil.splitstandin(f)
867 f = lfutil.splitstandin(f)
862
868
863 def getdatafn():
869 def getdatafn():
864 fd = None
870 fd = None
865 try:
871 try:
866 fd = open(path, 'rb')
872 fd = open(path, 'rb')
867 return fd.read()
873 return fd.read()
868 finally:
874 finally:
869 if fd:
875 if fd:
870 fd.close()
876 fd.close()
871
877
872 getdata = getdatafn
878 getdata = getdatafn
873 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
879 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
874
880
875 if subrepos:
881 if subrepos:
876 for subpath in sorted(ctx.substate):
882 for subpath in sorted(ctx.substate):
877 sub = ctx.sub(subpath)
883 sub = ctx.sub(subpath)
878 submatch = match_.narrowmatcher(subpath, matchfn)
884 submatch = match_.narrowmatcher(subpath, matchfn)
879 sub.archive(repo.ui, archiver, prefix, submatch)
885 sub.archive(repo.ui, archiver, prefix, submatch)
880
886
881 archiver.done()
887 archiver.done()
882
888
883 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
889 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
884 repo._get(repo._state + ('hg',))
890 repo._get(repo._state + ('hg',))
885 rev = repo._state[1]
891 rev = repo._state[1]
886 ctx = repo._repo[rev]
892 ctx = repo._repo[rev]
887
893
888 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
894 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
889
895
890 def write(name, mode, islink, getdata):
896 def write(name, mode, islink, getdata):
891 # At this point, the standin has been replaced with the largefile name,
897 # At this point, the standin has been replaced with the largefile name,
892 # so the normal matcher works here without the lfutil variants.
898 # so the normal matcher works here without the lfutil variants.
893 if match and not match(f):
899 if match and not match(f):
894 return
900 return
895 data = getdata()
901 data = getdata()
896
902
897 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
903 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
898
904
899 for f in ctx:
905 for f in ctx:
900 ff = ctx.flags(f)
906 ff = ctx.flags(f)
901 getdata = ctx[f].data
907 getdata = ctx[f].data
902 if lfutil.isstandin(f):
908 if lfutil.isstandin(f):
903 path = lfutil.findfile(repo._repo, getdata().strip())
909 path = lfutil.findfile(repo._repo, getdata().strip())
904 if path is None:
910 if path is None:
905 raise util.Abort(
911 raise util.Abort(
906 _('largefile %s not found in repo store or system cache')
912 _('largefile %s not found in repo store or system cache')
907 % lfutil.splitstandin(f))
913 % lfutil.splitstandin(f))
908 f = lfutil.splitstandin(f)
914 f = lfutil.splitstandin(f)
909
915
910 def getdatafn():
916 def getdatafn():
911 fd = None
917 fd = None
912 try:
918 try:
913 fd = open(os.path.join(prefix, path), 'rb')
919 fd = open(os.path.join(prefix, path), 'rb')
914 return fd.read()
920 return fd.read()
915 finally:
921 finally:
916 if fd:
922 if fd:
917 fd.close()
923 fd.close()
918
924
919 getdata = getdatafn
925 getdata = getdatafn
920
926
921 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
927 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
922
928
923 for subpath in sorted(ctx.substate):
929 for subpath in sorted(ctx.substate):
924 sub = ctx.sub(subpath)
930 sub = ctx.sub(subpath)
925 submatch = match_.narrowmatcher(subpath, match)
931 submatch = match_.narrowmatcher(subpath, match)
926 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
932 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
927 submatch)
933 submatch)
928
934
929 # If a largefile is modified, the change is not reflected in its
935 # If a largefile is modified, the change is not reflected in its
930 # standin until a commit. cmdutil.bailifchanged() raises an exception
936 # standin until a commit. cmdutil.bailifchanged() raises an exception
931 # if the repo has uncommitted changes. Wrap it to also check if
937 # if the repo has uncommitted changes. Wrap it to also check if
932 # largefiles were changed. This is used by bisect and backout.
938 # largefiles were changed. This is used by bisect and backout.
933 def overridebailifchanged(orig, repo):
939 def overridebailifchanged(orig, repo):
934 orig(repo)
940 orig(repo)
935 repo.lfstatus = True
941 repo.lfstatus = True
936 modified, added, removed, deleted = repo.status()[:4]
942 modified, added, removed, deleted = repo.status()[:4]
937 repo.lfstatus = False
943 repo.lfstatus = False
938 if modified or added or removed or deleted:
944 if modified or added or removed or deleted:
939 raise util.Abort(_('uncommitted changes'))
945 raise util.Abort(_('uncommitted changes'))
940
946
941 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
947 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
942 def overridefetch(orig, ui, repo, *pats, **opts):
948 def overridefetch(orig, ui, repo, *pats, **opts):
943 repo.lfstatus = True
949 repo.lfstatus = True
944 modified, added, removed, deleted = repo.status()[:4]
950 modified, added, removed, deleted = repo.status()[:4]
945 repo.lfstatus = False
951 repo.lfstatus = False
946 if modified or added or removed or deleted:
952 if modified or added or removed or deleted:
947 raise util.Abort(_('uncommitted changes'))
953 raise util.Abort(_('uncommitted changes'))
948 return orig(ui, repo, *pats, **opts)
954 return orig(ui, repo, *pats, **opts)
949
955
950 def overrideforget(orig, ui, repo, *pats, **opts):
956 def overrideforget(orig, ui, repo, *pats, **opts):
951 installnormalfilesmatchfn(repo[None].manifest())
957 installnormalfilesmatchfn(repo[None].manifest())
952 result = orig(ui, repo, *pats, **opts)
958 result = orig(ui, repo, *pats, **opts)
953 restorematchfn()
959 restorematchfn()
954 m = scmutil.match(repo[None], pats, opts)
960 m = scmutil.match(repo[None], pats, opts)
955
961
956 try:
962 try:
957 repo.lfstatus = True
963 repo.lfstatus = True
958 s = repo.status(match=m, clean=True)
964 s = repo.status(match=m, clean=True)
959 finally:
965 finally:
960 repo.lfstatus = False
966 repo.lfstatus = False
961 forget = sorted(s[0] + s[1] + s[3] + s[6])
967 forget = sorted(s[0] + s[1] + s[3] + s[6])
962 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
968 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
963
969
964 for f in forget:
970 for f in forget:
965 if lfutil.standin(f) not in repo.dirstate and not \
971 if lfutil.standin(f) not in repo.dirstate and not \
966 os.path.isdir(m.rel(lfutil.standin(f))):
972 os.path.isdir(m.rel(lfutil.standin(f))):
967 ui.warn(_('not removing %s: file is already untracked\n')
973 ui.warn(_('not removing %s: file is already untracked\n')
968 % m.rel(f))
974 % m.rel(f))
969 result = 1
975 result = 1
970
976
971 for f in forget:
977 for f in forget:
972 if ui.verbose or not m.exact(f):
978 if ui.verbose or not m.exact(f):
973 ui.status(_('removing %s\n') % m.rel(f))
979 ui.status(_('removing %s\n') % m.rel(f))
974
980
975 # Need to lock because standin files are deleted then removed from the
981 # Need to lock because standin files are deleted then removed from the
976 # repository and we could race in-between.
982 # repository and we could race in-between.
977 wlock = repo.wlock()
983 wlock = repo.wlock()
978 try:
984 try:
979 lfdirstate = lfutil.openlfdirstate(ui, repo)
985 lfdirstate = lfutil.openlfdirstate(ui, repo)
980 for f in forget:
986 for f in forget:
981 if lfdirstate[f] == 'a':
987 if lfdirstate[f] == 'a':
982 lfdirstate.drop(f)
988 lfdirstate.drop(f)
983 else:
989 else:
984 lfdirstate.remove(f)
990 lfdirstate.remove(f)
985 lfdirstate.write()
991 lfdirstate.write()
986 standins = [lfutil.standin(f) for f in forget]
992 standins = [lfutil.standin(f) for f in forget]
987 for f in standins:
993 for f in standins:
988 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
994 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
989 repo[None].forget(standins)
995 repo[None].forget(standins)
990 finally:
996 finally:
991 wlock.release()
997 wlock.release()
992
998
993 return result
999 return result
994
1000
995 def _getoutgoings(repo, other, missing, addfunc):
1001 def _getoutgoings(repo, other, missing, addfunc):
996 """get pairs of filename and largefile hash in outgoing revisions
1002 """get pairs of filename and largefile hash in outgoing revisions
997 in 'missing'.
1003 in 'missing'.
998
1004
999 largefiles already existing on 'other' repository are ignored.
1005 largefiles already existing on 'other' repository are ignored.
1000
1006
1001 'addfunc' is invoked with each unique pairs of filename and
1007 'addfunc' is invoked with each unique pairs of filename and
1002 largefile hash value.
1008 largefile hash value.
1003 """
1009 """
1004 knowns = set()
1010 knowns = set()
1005 lfhashes = set()
1011 lfhashes = set()
1006 def dedup(fn, lfhash):
1012 def dedup(fn, lfhash):
1007 k = (fn, lfhash)
1013 k = (fn, lfhash)
1008 if k not in knowns:
1014 if k not in knowns:
1009 knowns.add(k)
1015 knowns.add(k)
1010 lfhashes.add(lfhash)
1016 lfhashes.add(lfhash)
1011 lfutil.getlfilestoupload(repo, missing, dedup)
1017 lfutil.getlfilestoupload(repo, missing, dedup)
1012 if lfhashes:
1018 if lfhashes:
1013 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1019 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1014 for fn, lfhash in knowns:
1020 for fn, lfhash in knowns:
1015 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1021 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1016 addfunc(fn, lfhash)
1022 addfunc(fn, lfhash)
1017
1023
1018 def outgoinghook(ui, repo, other, opts, missing):
1024 def outgoinghook(ui, repo, other, opts, missing):
1019 if opts.pop('large', None):
1025 if opts.pop('large', None):
1020 lfhashes = set()
1026 lfhashes = set()
1021 if ui.debugflag:
1027 if ui.debugflag:
1022 toupload = {}
1028 toupload = {}
1023 def addfunc(fn, lfhash):
1029 def addfunc(fn, lfhash):
1024 if fn not in toupload:
1030 if fn not in toupload:
1025 toupload[fn] = []
1031 toupload[fn] = []
1026 toupload[fn].append(lfhash)
1032 toupload[fn].append(lfhash)
1027 lfhashes.add(lfhash)
1033 lfhashes.add(lfhash)
1028 def showhashes(fn):
1034 def showhashes(fn):
1029 for lfhash in sorted(toupload[fn]):
1035 for lfhash in sorted(toupload[fn]):
1030 ui.debug(' %s\n' % (lfhash))
1036 ui.debug(' %s\n' % (lfhash))
1031 else:
1037 else:
1032 toupload = set()
1038 toupload = set()
1033 def addfunc(fn, lfhash):
1039 def addfunc(fn, lfhash):
1034 toupload.add(fn)
1040 toupload.add(fn)
1035 lfhashes.add(lfhash)
1041 lfhashes.add(lfhash)
1036 def showhashes(fn):
1042 def showhashes(fn):
1037 pass
1043 pass
1038 _getoutgoings(repo, other, missing, addfunc)
1044 _getoutgoings(repo, other, missing, addfunc)
1039
1045
1040 if not toupload:
1046 if not toupload:
1041 ui.status(_('largefiles: no files to upload\n'))
1047 ui.status(_('largefiles: no files to upload\n'))
1042 else:
1048 else:
1043 ui.status(_('largefiles to upload (%d entities):\n')
1049 ui.status(_('largefiles to upload (%d entities):\n')
1044 % (len(lfhashes)))
1050 % (len(lfhashes)))
1045 for file in sorted(toupload):
1051 for file in sorted(toupload):
1046 ui.status(lfutil.splitstandin(file) + '\n')
1052 ui.status(lfutil.splitstandin(file) + '\n')
1047 showhashes(file)
1053 showhashes(file)
1048 ui.status('\n')
1054 ui.status('\n')
1049
1055
1050 def summaryremotehook(ui, repo, opts, changes):
1056 def summaryremotehook(ui, repo, opts, changes):
1051 largeopt = opts.get('large', False)
1057 largeopt = opts.get('large', False)
1052 if changes is None:
1058 if changes is None:
1053 if largeopt:
1059 if largeopt:
1054 return (False, True) # only outgoing check is needed
1060 return (False, True) # only outgoing check is needed
1055 else:
1061 else:
1056 return (False, False)
1062 return (False, False)
1057 elif largeopt:
1063 elif largeopt:
1058 url, branch, peer, outgoing = changes[1]
1064 url, branch, peer, outgoing = changes[1]
1059 if peer is None:
1065 if peer is None:
1060 # i18n: column positioning for "hg summary"
1066 # i18n: column positioning for "hg summary"
1061 ui.status(_('largefiles: (no remote repo)\n'))
1067 ui.status(_('largefiles: (no remote repo)\n'))
1062 return
1068 return
1063
1069
1064 toupload = set()
1070 toupload = set()
1065 lfhashes = set()
1071 lfhashes = set()
1066 def addfunc(fn, lfhash):
1072 def addfunc(fn, lfhash):
1067 toupload.add(fn)
1073 toupload.add(fn)
1068 lfhashes.add(lfhash)
1074 lfhashes.add(lfhash)
1069 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1075 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1070
1076
1071 if not toupload:
1077 if not toupload:
1072 # i18n: column positioning for "hg summary"
1078 # i18n: column positioning for "hg summary"
1073 ui.status(_('largefiles: (no files to upload)\n'))
1079 ui.status(_('largefiles: (no files to upload)\n'))
1074 else:
1080 else:
1075 # i18n: column positioning for "hg summary"
1081 # i18n: column positioning for "hg summary"
1076 ui.status(_('largefiles: %d entities for %d files to upload\n')
1082 ui.status(_('largefiles: %d entities for %d files to upload\n')
1077 % (len(lfhashes), len(toupload)))
1083 % (len(lfhashes), len(toupload)))
1078
1084
1079 def overridesummary(orig, ui, repo, *pats, **opts):
1085 def overridesummary(orig, ui, repo, *pats, **opts):
1080 try:
1086 try:
1081 repo.lfstatus = True
1087 repo.lfstatus = True
1082 orig(ui, repo, *pats, **opts)
1088 orig(ui, repo, *pats, **opts)
1083 finally:
1089 finally:
1084 repo.lfstatus = False
1090 repo.lfstatus = False
1085
1091
1086 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1092 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1087 similarity=None):
1093 similarity=None):
1088 if not lfutil.islfilesrepo(repo):
1094 if not lfutil.islfilesrepo(repo):
1089 return orig(repo, pats, opts, dry_run, similarity)
1095 return orig(repo, pats, opts, dry_run, similarity)
1090 # Get the list of missing largefiles so we can remove them
1096 # Get the list of missing largefiles so we can remove them
1091 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1097 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1092 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1098 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1093 False, False)
1099 False, False)
1094 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1100 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1095
1101
1096 # Call into the normal remove code, but the removing of the standin, we want
1102 # Call into the normal remove code, but the removing of the standin, we want
1097 # to have handled by original addremove. Monkey patching here makes sure
1103 # to have handled by original addremove. Monkey patching here makes sure
1098 # we don't remove the standin in the largefiles code, preventing a very
1104 # we don't remove the standin in the largefiles code, preventing a very
1099 # confused state later.
1105 # confused state later.
1100 if missing:
1106 if missing:
1101 m = [repo.wjoin(f) for f in missing]
1107 m = [repo.wjoin(f) for f in missing]
1102 repo._isaddremove = True
1108 repo._isaddremove = True
1103 removelargefiles(repo.ui, repo, *m, **opts)
1109 removelargefiles(repo.ui, repo, *m, **opts)
1104 repo._isaddremove = False
1110 repo._isaddremove = False
1105 # Call into the normal add code, and any files that *should* be added as
1111 # Call into the normal add code, and any files that *should* be added as
1106 # largefiles will be
1112 # largefiles will be
1107 addlargefiles(repo.ui, repo, *pats, **opts)
1113 addlargefiles(repo.ui, repo, *pats, **opts)
1108 # Now that we've handled largefiles, hand off to the original addremove
1114 # Now that we've handled largefiles, hand off to the original addremove
1109 # function to take care of the rest. Make sure it doesn't do anything with
1115 # function to take care of the rest. Make sure it doesn't do anything with
1110 # largefiles by installing a matcher that will ignore them.
1116 # largefiles by installing a matcher that will ignore them.
1111 installnormalfilesmatchfn(repo[None].manifest())
1117 installnormalfilesmatchfn(repo[None].manifest())
1112 result = orig(repo, pats, opts, dry_run, similarity)
1118 result = orig(repo, pats, opts, dry_run, similarity)
1113 restorematchfn()
1119 restorematchfn()
1114 return result
1120 return result
1115
1121
1116 # Calling purge with --all will cause the largefiles to be deleted.
1122 # Calling purge with --all will cause the largefiles to be deleted.
1117 # Override repo.status to prevent this from happening.
1123 # Override repo.status to prevent this from happening.
1118 def overridepurge(orig, ui, repo, *dirs, **opts):
1124 def overridepurge(orig, ui, repo, *dirs, **opts):
1119 # XXX large file status is buggy when used on repo proxy.
1125 # XXX large file status is buggy when used on repo proxy.
1120 # XXX this needs to be investigate.
1126 # XXX this needs to be investigate.
1121 repo = repo.unfiltered()
1127 repo = repo.unfiltered()
1122 oldstatus = repo.status
1128 oldstatus = repo.status
1123 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1129 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1124 clean=False, unknown=False, listsubrepos=False):
1130 clean=False, unknown=False, listsubrepos=False):
1125 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1131 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1126 listsubrepos)
1132 listsubrepos)
1127 lfdirstate = lfutil.openlfdirstate(ui, repo)
1133 lfdirstate = lfutil.openlfdirstate(ui, repo)
1128 modified, added, removed, deleted, unknown, ignored, clean = r
1134 modified, added, removed, deleted, unknown, ignored, clean = r
1129 unknown = [f for f in unknown if lfdirstate[f] == '?']
1135 unknown = [f for f in unknown if lfdirstate[f] == '?']
1130 ignored = [f for f in ignored if lfdirstate[f] == '?']
1136 ignored = [f for f in ignored if lfdirstate[f] == '?']
1131 return modified, added, removed, deleted, unknown, ignored, clean
1137 return modified, added, removed, deleted, unknown, ignored, clean
1132 repo.status = overridestatus
1138 repo.status = overridestatus
1133 orig(ui, repo, *dirs, **opts)
1139 orig(ui, repo, *dirs, **opts)
1134 repo.status = oldstatus
1140 repo.status = oldstatus
1135
1141
1136 def overriderollback(orig, ui, repo, **opts):
1142 def overriderollback(orig, ui, repo, **opts):
1137 result = orig(ui, repo, **opts)
1143 result = orig(ui, repo, **opts)
1138 merge.update(repo, node=None, branchmerge=False, force=True,
1144 merge.update(repo, node=None, branchmerge=False, force=True,
1139 partial=lfutil.isstandin)
1145 partial=lfutil.isstandin)
1140 wlock = repo.wlock()
1146 wlock = repo.wlock()
1141 try:
1147 try:
1142 lfdirstate = lfutil.openlfdirstate(ui, repo)
1148 lfdirstate = lfutil.openlfdirstate(ui, repo)
1143 lfiles = lfutil.listlfiles(repo)
1149 lfiles = lfutil.listlfiles(repo)
1144 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1150 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1145 for file in lfiles:
1151 for file in lfiles:
1146 if file in oldlfiles:
1152 if file in oldlfiles:
1147 lfdirstate.normallookup(file)
1153 lfdirstate.normallookup(file)
1148 else:
1154 else:
1149 lfdirstate.add(file)
1155 lfdirstate.add(file)
1150 lfdirstate.write()
1156 lfdirstate.write()
1151 finally:
1157 finally:
1152 wlock.release()
1158 wlock.release()
1153 return result
1159 return result
1154
1160
1155 def overridetransplant(orig, ui, repo, *revs, **opts):
1161 def overridetransplant(orig, ui, repo, *revs, **opts):
1156 try:
1162 try:
1157 oldstandins = lfutil.getstandinsstate(repo)
1163 oldstandins = lfutil.getstandinsstate(repo)
1158 repo._istransplanting = True
1164 repo._istransplanting = True
1159 result = orig(ui, repo, *revs, **opts)
1165 result = orig(ui, repo, *revs, **opts)
1160 newstandins = lfutil.getstandinsstate(repo)
1166 newstandins = lfutil.getstandinsstate(repo)
1161 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1167 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1162 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1168 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1163 printmessage=True)
1169 printmessage=True)
1164 finally:
1170 finally:
1165 repo._istransplanting = False
1171 repo._istransplanting = False
1166 return result
1172 return result
1167
1173
1168 def overridecat(orig, ui, repo, file1, *pats, **opts):
1174 def overridecat(orig, ui, repo, file1, *pats, **opts):
1169 ctx = scmutil.revsingle(repo, opts.get('rev'))
1175 ctx = scmutil.revsingle(repo, opts.get('rev'))
1170 err = 1
1176 err = 1
1171 notbad = set()
1177 notbad = set()
1172 m = scmutil.match(ctx, (file1,) + pats, opts)
1178 m = scmutil.match(ctx, (file1,) + pats, opts)
1173 origmatchfn = m.matchfn
1179 origmatchfn = m.matchfn
1174 def lfmatchfn(f):
1180 def lfmatchfn(f):
1175 if origmatchfn(f):
1181 if origmatchfn(f):
1176 return True
1182 return True
1177 lf = lfutil.splitstandin(f)
1183 lf = lfutil.splitstandin(f)
1178 if lf is None:
1184 if lf is None:
1179 return False
1185 return False
1180 notbad.add(lf)
1186 notbad.add(lf)
1181 return origmatchfn(lf)
1187 return origmatchfn(lf)
1182 m.matchfn = lfmatchfn
1188 m.matchfn = lfmatchfn
1183 origbadfn = m.bad
1189 origbadfn = m.bad
1184 def lfbadfn(f, msg):
1190 def lfbadfn(f, msg):
1185 if not f in notbad:
1191 if not f in notbad:
1186 origbadfn(f, msg)
1192 origbadfn(f, msg)
1187 m.bad = lfbadfn
1193 m.bad = lfbadfn
1188 for f in ctx.walk(m):
1194 for f in ctx.walk(m):
1189 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1195 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1190 pathname=f)
1196 pathname=f)
1191 lf = lfutil.splitstandin(f)
1197 lf = lfutil.splitstandin(f)
1192 if lf is None or origmatchfn(f):
1198 if lf is None or origmatchfn(f):
1193 # duplicating unreachable code from commands.cat
1199 # duplicating unreachable code from commands.cat
1194 data = ctx[f].data()
1200 data = ctx[f].data()
1195 if opts.get('decode'):
1201 if opts.get('decode'):
1196 data = repo.wwritedata(f, data)
1202 data = repo.wwritedata(f, data)
1197 fp.write(data)
1203 fp.write(data)
1198 else:
1204 else:
1199 hash = lfutil.readstandin(repo, lf, ctx.rev())
1205 hash = lfutil.readstandin(repo, lf, ctx.rev())
1200 if not lfutil.inusercache(repo.ui, hash):
1206 if not lfutil.inusercache(repo.ui, hash):
1201 store = basestore._openstore(repo)
1207 store = basestore._openstore(repo)
1202 success, missing = store.get([(lf, hash)])
1208 success, missing = store.get([(lf, hash)])
1203 if len(success) != 1:
1209 if len(success) != 1:
1204 raise util.Abort(
1210 raise util.Abort(
1205 _('largefile %s is not in cache and could not be '
1211 _('largefile %s is not in cache and could not be '
1206 'downloaded') % lf)
1212 'downloaded') % lf)
1207 path = lfutil.usercachepath(repo.ui, hash)
1213 path = lfutil.usercachepath(repo.ui, hash)
1208 fpin = open(path, "rb")
1214 fpin = open(path, "rb")
1209 for chunk in util.filechunkiter(fpin, 128 * 1024):
1215 for chunk in util.filechunkiter(fpin, 128 * 1024):
1210 fp.write(chunk)
1216 fp.write(chunk)
1211 fpin.close()
1217 fpin.close()
1212 fp.close()
1218 fp.close()
1213 err = 0
1219 err = 0
1214 return err
1220 return err
1215
1221
1216 def mercurialsinkbefore(orig, sink):
1222 def mercurialsinkbefore(orig, sink):
1217 sink.repo._isconverting = True
1223 sink.repo._isconverting = True
1218 orig(sink)
1224 orig(sink)
1219
1225
1220 def mercurialsinkafter(orig, sink):
1226 def mercurialsinkafter(orig, sink):
1221 sink.repo._isconverting = False
1227 sink.repo._isconverting = False
1222 orig(sink)
1228 orig(sink)
@@ -1,82 +1,102 b''
1 This file focuses mainly on updating largefiles in the working
1 This file focuses mainly on updating largefiles in the working
2 directory (and ".hg/largefiles/dirstate")
2 directory (and ".hg/largefiles/dirstate")
3
3
4 $ cat >> $HGRCPATH <<EOF
4 $ cat >> $HGRCPATH <<EOF
5 > [ui]
5 > [ui]
6 > merge = internal:fail
6 > merge = internal:fail
7 > [extensions]
7 > [extensions]
8 > largefiles =
8 > largefiles =
9 > EOF
9 > EOF
10
10
11 $ hg init repo
11 $ hg init repo
12 $ cd repo
12 $ cd repo
13
13
14 $ echo large1 > large1
14 $ echo large1 > large1
15 $ echo large2 > large2
15 $ echo large2 > large2
16 $ hg add --large large1 large2
16 $ hg add --large large1 large2
17 $ echo normal1 > normal1
17 $ echo normal1 > normal1
18 $ hg add normal1
18 $ hg add normal1
19 $ hg commit -m '#0'
19 $ hg commit -m '#0'
20 $ echo 'large1 in #1' > large1
20 $ echo 'large1 in #1' > large1
21 $ echo 'normal1 in #1' > normal1
21 $ echo 'normal1 in #1' > normal1
22 $ hg commit -m '#1'
22 $ hg commit -m '#1'
23 $ hg update -q -C 0
23 $ hg update -q -C 0
24 $ echo 'large2 in #2' > large2
24 $ echo 'large2 in #2' > large2
25 $ hg commit -m '#2'
25 $ hg commit -m '#2'
26 created new head
26 created new head
27
27
28 Test that "hg merge" updates largefiles from "other" correctly
28 Test that "hg merge" updates largefiles from "other" correctly
29
29
30 (getting largefiles from "other" normally)
30 (getting largefiles from "other" normally)
31
31
32 $ hg status -A large1
32 $ hg status -A large1
33 C large1
33 C large1
34 $ cat large1
34 $ cat large1
35 large1
35 large1
36 $ cat .hglf/large1
36 $ cat .hglf/large1
37 4669e532d5b2c093a78eca010077e708a071bb64
37 4669e532d5b2c093a78eca010077e708a071bb64
38 $ hg merge --config debug.dirstate.delaywrite=2
38 $ hg merge --config debug.dirstate.delaywrite=2
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 (branch merge, don't forget to commit)
40 (branch merge, don't forget to commit)
41 getting changed largefiles
41 getting changed largefiles
42 1 largefiles updated, 0 removed
42 1 largefiles updated, 0 removed
43 $ hg status -A large1
43 $ hg status -A large1
44 M large1
44 M large1
45 $ cat large1
45 $ cat large1
46 large1 in #1
46 large1 in #1
47 $ cat .hglf/large1
47 $ cat .hglf/large1
48 58e24f733a964da346e2407a2bee99d9001184f5
48 58e24f733a964da346e2407a2bee99d9001184f5
49 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
49 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
50 -4669e532d5b2c093a78eca010077e708a071bb64
50 -4669e532d5b2c093a78eca010077e708a071bb64
51 +58e24f733a964da346e2407a2bee99d9001184f5
51 +58e24f733a964da346e2407a2bee99d9001184f5
52
52
53 (getting largefiles from "other" via conflict prompt)
53 (getting largefiles from "other" via conflict prompt)
54
54
55 $ hg update -q -C 2
55 $ hg update -q -C 2
56 $ echo 'large1 in #3' > large1
56 $ echo 'large1 in #3' > large1
57 $ echo 'normal1 in #3' > normal1
57 $ echo 'normal1 in #3' > normal1
58 $ hg commit -m '#3'
58 $ hg commit -m '#3'
59 $ cat .hglf/large1
59 $ cat .hglf/large1
60 e5bb990443d6a92aaf7223813720f7566c9dd05b
60 e5bb990443d6a92aaf7223813720f7566c9dd05b
61 $ hg merge --config debug.dirstate.delaywrite=2 --config ui.interactive=True <<EOF
61 $ hg merge --config debug.dirstate.delaywrite=2 --config ui.interactive=True <<EOF
62 > o
62 > o
63 > EOF
63 > EOF
64 largefile large1 has a merge conflict
64 largefile large1 has a merge conflict
65 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
65 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
66 keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or
66 keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or
67 take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5? merging normal1
67 take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5? merging normal1
68 warning: conflicts during merge.
68 warning: conflicts during merge.
69 merging normal1 incomplete! (edit conflicts, then use 'hg resolve --mark')
69 merging normal1 incomplete! (edit conflicts, then use 'hg resolve --mark')
70 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
70 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
71 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
71 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
72 getting changed largefiles
72 getting changed largefiles
73 1 largefiles updated, 0 removed
73 1 largefiles updated, 0 removed
74 [1]
74 [1]
75 $ hg status -A large1
75 $ hg status -A large1
76 M large1
76 M large1
77 $ cat large1
77 $ cat large1
78 large1 in #1
78 large1 in #1
79 $ cat .hglf/large1
79 $ cat .hglf/large1
80 58e24f733a964da346e2407a2bee99d9001184f5
80 58e24f733a964da346e2407a2bee99d9001184f5
81
81
82 Test that "hg revert -r REV" updates largefiles from "REV" correctly
83
84 $ hg update -q -C 3
85 $ hg status -A large1
86 C large1
87 $ cat large1
88 large1 in #3
89 $ cat .hglf/large1
90 e5bb990443d6a92aaf7223813720f7566c9dd05b
91 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
92 -4669e532d5b2c093a78eca010077e708a071bb64
93 +58e24f733a964da346e2407a2bee99d9001184f5
94 $ hg revert --no-backup -r 1 --config debug.dirstate.delaywrite=2 large1
95 $ hg status -A large1
96 M large1
97 $ cat large1
98 large1 in #1
99 $ cat .hglf/large1
100 58e24f733a964da346e2407a2bee99d9001184f5
101
82 $ cd ..
102 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now