##// END OF EJS Templates
largefiles: get function to write status messages via "getstatuswriter()"...
FUJIWARA Katsunori -
r23189:fb139f55 default
parent child Browse files
Show More
@@ -1,573 +1,578 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os, errno
11 import os, errno
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error, \
14 from mercurial import util, match as match_, hg, node, context, error, \
15 cmdutil, scmutil, commands
15 cmdutil, scmutil, commands
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.lock import release
17 from mercurial.lock import release
18
18
19 import lfutil
19 import lfutil
20 import basestore
20 import basestore
21
21
22 # -- Commands ----------------------------------------------------------
22 # -- Commands ----------------------------------------------------------
23
23
24 cmdtable = {}
24 cmdtable = {}
25 command = cmdutil.command(cmdtable)
25 command = cmdutil.command(cmdtable)
26
26
27 @command('lfconvert',
27 @command('lfconvert',
28 [('s', 'size', '',
28 [('s', 'size', '',
29 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
29 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
30 ('', 'to-normal', False,
30 ('', 'to-normal', False,
31 _('convert from a largefiles repo to a normal repo')),
31 _('convert from a largefiles repo to a normal repo')),
32 ],
32 ],
33 _('hg lfconvert SOURCE DEST [FILE ...]'),
33 _('hg lfconvert SOURCE DEST [FILE ...]'),
34 norepo=True,
34 norepo=True,
35 inferrepo=True)
35 inferrepo=True)
36 def lfconvert(ui, src, dest, *pats, **opts):
36 def lfconvert(ui, src, dest, *pats, **opts):
37 '''convert a normal repository to a largefiles repository
37 '''convert a normal repository to a largefiles repository
38
38
39 Convert repository SOURCE to a new repository DEST, identical to
39 Convert repository SOURCE to a new repository DEST, identical to
40 SOURCE except that certain files will be converted as largefiles:
40 SOURCE except that certain files will be converted as largefiles:
41 specifically, any file that matches any PATTERN *or* whose size is
41 specifically, any file that matches any PATTERN *or* whose size is
42 above the minimum size threshold is converted as a largefile. The
42 above the minimum size threshold is converted as a largefile. The
43 size used to determine whether or not to track a file as a
43 size used to determine whether or not to track a file as a
44 largefile is the size of the first version of the file. The
44 largefile is the size of the first version of the file. The
45 minimum size can be specified either with --size or in
45 minimum size can be specified either with --size or in
46 configuration as ``largefiles.size``.
46 configuration as ``largefiles.size``.
47
47
48 After running this command you will need to make sure that
48 After running this command you will need to make sure that
49 largefiles is enabled anywhere you intend to push the new
49 largefiles is enabled anywhere you intend to push the new
50 repository.
50 repository.
51
51
52 Use --to-normal to convert largefiles back to normal files; after
52 Use --to-normal to convert largefiles back to normal files; after
53 this, the DEST repository can be used without largefiles at all.'''
53 this, the DEST repository can be used without largefiles at all.'''
54
54
55 if opts['to_normal']:
55 if opts['to_normal']:
56 tolfile = False
56 tolfile = False
57 else:
57 else:
58 tolfile = True
58 tolfile = True
59 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
59 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
60
60
61 if not hg.islocal(src):
61 if not hg.islocal(src):
62 raise util.Abort(_('%s is not a local Mercurial repo') % src)
62 raise util.Abort(_('%s is not a local Mercurial repo') % src)
63 if not hg.islocal(dest):
63 if not hg.islocal(dest):
64 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
64 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
65
65
66 rsrc = hg.repository(ui, src)
66 rsrc = hg.repository(ui, src)
67 ui.status(_('initializing destination %s\n') % dest)
67 ui.status(_('initializing destination %s\n') % dest)
68 rdst = hg.repository(ui, dest, create=True)
68 rdst = hg.repository(ui, dest, create=True)
69
69
70 success = False
70 success = False
71 dstwlock = dstlock = None
71 dstwlock = dstlock = None
72 try:
72 try:
73 # Lock destination to prevent modification while it is converted to.
73 # Lock destination to prevent modification while it is converted to.
74 # Don't need to lock src because we are just reading from its history
74 # Don't need to lock src because we are just reading from its history
75 # which can't change.
75 # which can't change.
76 dstwlock = rdst.wlock()
76 dstwlock = rdst.wlock()
77 dstlock = rdst.lock()
77 dstlock = rdst.lock()
78
78
79 # Get a list of all changesets in the source. The easy way to do this
79 # Get a list of all changesets in the source. The easy way to do this
80 # is to simply walk the changelog, using changelog.nodesbetween().
80 # is to simply walk the changelog, using changelog.nodesbetween().
81 # Take a look at mercurial/revlog.py:639 for more details.
81 # Take a look at mercurial/revlog.py:639 for more details.
82 # Use a generator instead of a list to decrease memory usage
82 # Use a generator instead of a list to decrease memory usage
83 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
83 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
84 rsrc.heads())[0])
84 rsrc.heads())[0])
85 revmap = {node.nullid: node.nullid}
85 revmap = {node.nullid: node.nullid}
86 if tolfile:
86 if tolfile:
87 lfiles = set()
87 lfiles = set()
88 normalfiles = set()
88 normalfiles = set()
89 if not pats:
89 if not pats:
90 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
90 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
91 if pats:
91 if pats:
92 matcher = match_.match(rsrc.root, '', list(pats))
92 matcher = match_.match(rsrc.root, '', list(pats))
93 else:
93 else:
94 matcher = None
94 matcher = None
95
95
96 lfiletohash = {}
96 lfiletohash = {}
97 for ctx in ctxs:
97 for ctx in ctxs:
98 ui.progress(_('converting revisions'), ctx.rev(),
98 ui.progress(_('converting revisions'), ctx.rev(),
99 unit=_('revision'), total=rsrc['tip'].rev())
99 unit=_('revision'), total=rsrc['tip'].rev())
100 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
100 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
101 lfiles, normalfiles, matcher, size, lfiletohash)
101 lfiles, normalfiles, matcher, size, lfiletohash)
102 ui.progress(_('converting revisions'), None)
102 ui.progress(_('converting revisions'), None)
103
103
104 if os.path.exists(rdst.wjoin(lfutil.shortname)):
104 if os.path.exists(rdst.wjoin(lfutil.shortname)):
105 shutil.rmtree(rdst.wjoin(lfutil.shortname))
105 shutil.rmtree(rdst.wjoin(lfutil.shortname))
106
106
107 for f in lfiletohash.keys():
107 for f in lfiletohash.keys():
108 if os.path.isfile(rdst.wjoin(f)):
108 if os.path.isfile(rdst.wjoin(f)):
109 os.unlink(rdst.wjoin(f))
109 os.unlink(rdst.wjoin(f))
110 try:
110 try:
111 os.removedirs(os.path.dirname(rdst.wjoin(f)))
111 os.removedirs(os.path.dirname(rdst.wjoin(f)))
112 except OSError:
112 except OSError:
113 pass
113 pass
114
114
115 # If there were any files converted to largefiles, add largefiles
115 # If there were any files converted to largefiles, add largefiles
116 # to the destination repository's requirements.
116 # to the destination repository's requirements.
117 if lfiles:
117 if lfiles:
118 rdst.requirements.add('largefiles')
118 rdst.requirements.add('largefiles')
119 rdst._writerequirements()
119 rdst._writerequirements()
120 else:
120 else:
121 for ctx in ctxs:
121 for ctx in ctxs:
122 ui.progress(_('converting revisions'), ctx.rev(),
122 ui.progress(_('converting revisions'), ctx.rev(),
123 unit=_('revision'), total=rsrc['tip'].rev())
123 unit=_('revision'), total=rsrc['tip'].rev())
124 _addchangeset(ui, rsrc, rdst, ctx, revmap)
124 _addchangeset(ui, rsrc, rdst, ctx, revmap)
125
125
126 ui.progress(_('converting revisions'), None)
126 ui.progress(_('converting revisions'), None)
127 success = True
127 success = True
128 finally:
128 finally:
129 rdst.dirstate.clear()
129 rdst.dirstate.clear()
130 release(dstlock, dstwlock)
130 release(dstlock, dstwlock)
131 if not success:
131 if not success:
132 # we failed, remove the new directory
132 # we failed, remove the new directory
133 shutil.rmtree(rdst.root)
133 shutil.rmtree(rdst.root)
134
134
135 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
135 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
136 # Convert src parents to dst parents
136 # Convert src parents to dst parents
137 parents = _convertparents(ctx, revmap)
137 parents = _convertparents(ctx, revmap)
138
138
139 # Generate list of changed files
139 # Generate list of changed files
140 files = _getchangedfiles(ctx, parents)
140 files = _getchangedfiles(ctx, parents)
141
141
142 def getfilectx(repo, memctx, f):
142 def getfilectx(repo, memctx, f):
143 if lfutil.standin(f) in files:
143 if lfutil.standin(f) in files:
144 # if the file isn't in the manifest then it was removed
144 # if the file isn't in the manifest then it was removed
145 # or renamed, raise IOError to indicate this
145 # or renamed, raise IOError to indicate this
146 try:
146 try:
147 fctx = ctx.filectx(lfutil.standin(f))
147 fctx = ctx.filectx(lfutil.standin(f))
148 except error.LookupError:
148 except error.LookupError:
149 return None
149 return None
150 renamed = fctx.renamed()
150 renamed = fctx.renamed()
151 if renamed:
151 if renamed:
152 renamed = lfutil.splitstandin(renamed[0])
152 renamed = lfutil.splitstandin(renamed[0])
153
153
154 hash = fctx.data().strip()
154 hash = fctx.data().strip()
155 path = lfutil.findfile(rsrc, hash)
155 path = lfutil.findfile(rsrc, hash)
156
156
157 # If one file is missing, likely all files from this rev are
157 # If one file is missing, likely all files from this rev are
158 if path is None:
158 if path is None:
159 cachelfiles(ui, rsrc, ctx.node())
159 cachelfiles(ui, rsrc, ctx.node())
160 path = lfutil.findfile(rsrc, hash)
160 path = lfutil.findfile(rsrc, hash)
161
161
162 if path is None:
162 if path is None:
163 raise util.Abort(
163 raise util.Abort(
164 _("missing largefile \'%s\' from revision %s")
164 _("missing largefile \'%s\' from revision %s")
165 % (f, node.hex(ctx.node())))
165 % (f, node.hex(ctx.node())))
166
166
167 data = ''
167 data = ''
168 fd = None
168 fd = None
169 try:
169 try:
170 fd = open(path, 'rb')
170 fd = open(path, 'rb')
171 data = fd.read()
171 data = fd.read()
172 finally:
172 finally:
173 if fd:
173 if fd:
174 fd.close()
174 fd.close()
175 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
175 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
176 'x' in fctx.flags(), renamed)
176 'x' in fctx.flags(), renamed)
177 else:
177 else:
178 return _getnormalcontext(repo, ctx, f, revmap)
178 return _getnormalcontext(repo, ctx, f, revmap)
179
179
180 dstfiles = []
180 dstfiles = []
181 for file in files:
181 for file in files:
182 if lfutil.isstandin(file):
182 if lfutil.isstandin(file):
183 dstfiles.append(lfutil.splitstandin(file))
183 dstfiles.append(lfutil.splitstandin(file))
184 else:
184 else:
185 dstfiles.append(file)
185 dstfiles.append(file)
186 # Commit
186 # Commit
187 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
187 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
188
188
189 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
189 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
190 matcher, size, lfiletohash):
190 matcher, size, lfiletohash):
191 # Convert src parents to dst parents
191 # Convert src parents to dst parents
192 parents = _convertparents(ctx, revmap)
192 parents = _convertparents(ctx, revmap)
193
193
194 # Generate list of changed files
194 # Generate list of changed files
195 files = _getchangedfiles(ctx, parents)
195 files = _getchangedfiles(ctx, parents)
196
196
197 dstfiles = []
197 dstfiles = []
198 for f in files:
198 for f in files:
199 if f not in lfiles and f not in normalfiles:
199 if f not in lfiles and f not in normalfiles:
200 islfile = _islfile(f, ctx, matcher, size)
200 islfile = _islfile(f, ctx, matcher, size)
201 # If this file was renamed or copied then copy
201 # If this file was renamed or copied then copy
202 # the largefile-ness of its predecessor
202 # the largefile-ness of its predecessor
203 if f in ctx.manifest():
203 if f in ctx.manifest():
204 fctx = ctx.filectx(f)
204 fctx = ctx.filectx(f)
205 renamed = fctx.renamed()
205 renamed = fctx.renamed()
206 renamedlfile = renamed and renamed[0] in lfiles
206 renamedlfile = renamed and renamed[0] in lfiles
207 islfile |= renamedlfile
207 islfile |= renamedlfile
208 if 'l' in fctx.flags():
208 if 'l' in fctx.flags():
209 if renamedlfile:
209 if renamedlfile:
210 raise util.Abort(
210 raise util.Abort(
211 _('renamed/copied largefile %s becomes symlink')
211 _('renamed/copied largefile %s becomes symlink')
212 % f)
212 % f)
213 islfile = False
213 islfile = False
214 if islfile:
214 if islfile:
215 lfiles.add(f)
215 lfiles.add(f)
216 else:
216 else:
217 normalfiles.add(f)
217 normalfiles.add(f)
218
218
219 if f in lfiles:
219 if f in lfiles:
220 dstfiles.append(lfutil.standin(f))
220 dstfiles.append(lfutil.standin(f))
221 # largefile in manifest if it has not been removed/renamed
221 # largefile in manifest if it has not been removed/renamed
222 if f in ctx.manifest():
222 if f in ctx.manifest():
223 fctx = ctx.filectx(f)
223 fctx = ctx.filectx(f)
224 if 'l' in fctx.flags():
224 if 'l' in fctx.flags():
225 renamed = fctx.renamed()
225 renamed = fctx.renamed()
226 if renamed and renamed[0] in lfiles:
226 if renamed and renamed[0] in lfiles:
227 raise util.Abort(_('largefile %s becomes symlink') % f)
227 raise util.Abort(_('largefile %s becomes symlink') % f)
228
228
229 # largefile was modified, update standins
229 # largefile was modified, update standins
230 m = util.sha1('')
230 m = util.sha1('')
231 m.update(ctx[f].data())
231 m.update(ctx[f].data())
232 hash = m.hexdigest()
232 hash = m.hexdigest()
233 if f not in lfiletohash or lfiletohash[f] != hash:
233 if f not in lfiletohash or lfiletohash[f] != hash:
234 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
234 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
235 executable = 'x' in ctx[f].flags()
235 executable = 'x' in ctx[f].flags()
236 lfutil.writestandin(rdst, lfutil.standin(f), hash,
236 lfutil.writestandin(rdst, lfutil.standin(f), hash,
237 executable)
237 executable)
238 lfiletohash[f] = hash
238 lfiletohash[f] = hash
239 else:
239 else:
240 # normal file
240 # normal file
241 dstfiles.append(f)
241 dstfiles.append(f)
242
242
243 def getfilectx(repo, memctx, f):
243 def getfilectx(repo, memctx, f):
244 if lfutil.isstandin(f):
244 if lfutil.isstandin(f):
245 # if the file isn't in the manifest then it was removed
245 # if the file isn't in the manifest then it was removed
246 # or renamed, raise IOError to indicate this
246 # or renamed, raise IOError to indicate this
247 srcfname = lfutil.splitstandin(f)
247 srcfname = lfutil.splitstandin(f)
248 try:
248 try:
249 fctx = ctx.filectx(srcfname)
249 fctx = ctx.filectx(srcfname)
250 except error.LookupError:
250 except error.LookupError:
251 return None
251 return None
252 renamed = fctx.renamed()
252 renamed = fctx.renamed()
253 if renamed:
253 if renamed:
254 # standin is always a largefile because largefile-ness
254 # standin is always a largefile because largefile-ness
255 # doesn't change after rename or copy
255 # doesn't change after rename or copy
256 renamed = lfutil.standin(renamed[0])
256 renamed = lfutil.standin(renamed[0])
257
257
258 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
258 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
259 'l' in fctx.flags(), 'x' in fctx.flags(),
259 'l' in fctx.flags(), 'x' in fctx.flags(),
260 renamed)
260 renamed)
261 else:
261 else:
262 return _getnormalcontext(repo, ctx, f, revmap)
262 return _getnormalcontext(repo, ctx, f, revmap)
263
263
264 # Commit
264 # Commit
265 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
265 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
266
266
267 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
267 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
268 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
268 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
269 getfilectx, ctx.user(), ctx.date(), ctx.extra())
269 getfilectx, ctx.user(), ctx.date(), ctx.extra())
270 ret = rdst.commitctx(mctx)
270 ret = rdst.commitctx(mctx)
271 rdst.setparents(ret)
271 rdst.setparents(ret)
272 revmap[ctx.node()] = rdst.changelog.tip()
272 revmap[ctx.node()] = rdst.changelog.tip()
273
273
274 # Generate list of changed files
274 # Generate list of changed files
275 def _getchangedfiles(ctx, parents):
275 def _getchangedfiles(ctx, parents):
276 files = set(ctx.files())
276 files = set(ctx.files())
277 if node.nullid not in parents:
277 if node.nullid not in parents:
278 mc = ctx.manifest()
278 mc = ctx.manifest()
279 mp1 = ctx.parents()[0].manifest()
279 mp1 = ctx.parents()[0].manifest()
280 mp2 = ctx.parents()[1].manifest()
280 mp2 = ctx.parents()[1].manifest()
281 files |= (set(mp1) | set(mp2)) - set(mc)
281 files |= (set(mp1) | set(mp2)) - set(mc)
282 for f in mc:
282 for f in mc:
283 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
283 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
284 files.add(f)
284 files.add(f)
285 return files
285 return files
286
286
287 # Convert src parents to dst parents
287 # Convert src parents to dst parents
288 def _convertparents(ctx, revmap):
288 def _convertparents(ctx, revmap):
289 parents = []
289 parents = []
290 for p in ctx.parents():
290 for p in ctx.parents():
291 parents.append(revmap[p.node()])
291 parents.append(revmap[p.node()])
292 while len(parents) < 2:
292 while len(parents) < 2:
293 parents.append(node.nullid)
293 parents.append(node.nullid)
294 return parents
294 return parents
295
295
296 # Get memfilectx for a normal file
296 # Get memfilectx for a normal file
297 def _getnormalcontext(repo, ctx, f, revmap):
297 def _getnormalcontext(repo, ctx, f, revmap):
298 try:
298 try:
299 fctx = ctx.filectx(f)
299 fctx = ctx.filectx(f)
300 except error.LookupError:
300 except error.LookupError:
301 return None
301 return None
302 renamed = fctx.renamed()
302 renamed = fctx.renamed()
303 if renamed:
303 if renamed:
304 renamed = renamed[0]
304 renamed = renamed[0]
305
305
306 data = fctx.data()
306 data = fctx.data()
307 if f == '.hgtags':
307 if f == '.hgtags':
308 data = _converttags (repo.ui, revmap, data)
308 data = _converttags (repo.ui, revmap, data)
309 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
309 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
310 'x' in fctx.flags(), renamed)
310 'x' in fctx.flags(), renamed)
311
311
312 # Remap tag data using a revision map
312 # Remap tag data using a revision map
313 def _converttags(ui, revmap, data):
313 def _converttags(ui, revmap, data):
314 newdata = []
314 newdata = []
315 for line in data.splitlines():
315 for line in data.splitlines():
316 try:
316 try:
317 id, name = line.split(' ', 1)
317 id, name = line.split(' ', 1)
318 except ValueError:
318 except ValueError:
319 ui.warn(_('skipping incorrectly formatted tag %s\n')
319 ui.warn(_('skipping incorrectly formatted tag %s\n')
320 % line)
320 % line)
321 continue
321 continue
322 try:
322 try:
323 newid = node.bin(id)
323 newid = node.bin(id)
324 except TypeError:
324 except TypeError:
325 ui.warn(_('skipping incorrectly formatted id %s\n')
325 ui.warn(_('skipping incorrectly formatted id %s\n')
326 % id)
326 % id)
327 continue
327 continue
328 try:
328 try:
329 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
329 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
330 name))
330 name))
331 except KeyError:
331 except KeyError:
332 ui.warn(_('no mapping for id %s\n') % id)
332 ui.warn(_('no mapping for id %s\n') % id)
333 continue
333 continue
334 return ''.join(newdata)
334 return ''.join(newdata)
335
335
336 def _islfile(file, ctx, matcher, size):
336 def _islfile(file, ctx, matcher, size):
337 '''Return true if file should be considered a largefile, i.e.
337 '''Return true if file should be considered a largefile, i.e.
338 matcher matches it or it is larger than size.'''
338 matcher matches it or it is larger than size.'''
339 # never store special .hg* files as largefiles
339 # never store special .hg* files as largefiles
340 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
340 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
341 return False
341 return False
342 if matcher and matcher(file):
342 if matcher and matcher(file):
343 return True
343 return True
344 try:
344 try:
345 return ctx.filectx(file).size() >= size * 1024 * 1024
345 return ctx.filectx(file).size() >= size * 1024 * 1024
346 except error.LookupError:
346 except error.LookupError:
347 return False
347 return False
348
348
349 def uploadlfiles(ui, rsrc, rdst, files):
349 def uploadlfiles(ui, rsrc, rdst, files):
350 '''upload largefiles to the central store'''
350 '''upload largefiles to the central store'''
351
351
352 if not files:
352 if not files:
353 return
353 return
354
354
355 store = basestore._openstore(rsrc, rdst, put=True)
355 store = basestore._openstore(rsrc, rdst, put=True)
356
356
357 at = 0
357 at = 0
358 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
358 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
359 retval = store.exists(files)
359 retval = store.exists(files)
360 files = filter(lambda h: not retval[h], files)
360 files = filter(lambda h: not retval[h], files)
361 ui.debug("%d largefiles need to be uploaded\n" % len(files))
361 ui.debug("%d largefiles need to be uploaded\n" % len(files))
362
362
363 for hash in files:
363 for hash in files:
364 ui.progress(_('uploading largefiles'), at, unit='largefile',
364 ui.progress(_('uploading largefiles'), at, unit='largefile',
365 total=len(files))
365 total=len(files))
366 source = lfutil.findfile(rsrc, hash)
366 source = lfutil.findfile(rsrc, hash)
367 if not source:
367 if not source:
368 raise util.Abort(_('largefile %s missing from store'
368 raise util.Abort(_('largefile %s missing from store'
369 ' (needs to be uploaded)') % hash)
369 ' (needs to be uploaded)') % hash)
370 # XXX check for errors here
370 # XXX check for errors here
371 store.put(source, hash)
371 store.put(source, hash)
372 at += 1
372 at += 1
373 ui.progress(_('uploading largefiles'), None)
373 ui.progress(_('uploading largefiles'), None)
374
374
375 def verifylfiles(ui, repo, all=False, contents=False):
375 def verifylfiles(ui, repo, all=False, contents=False):
376 '''Verify that every largefile revision in the current changeset
376 '''Verify that every largefile revision in the current changeset
377 exists in the central store. With --contents, also verify that
377 exists in the central store. With --contents, also verify that
378 the contents of each local largefile file revision are correct (SHA-1 hash
378 the contents of each local largefile file revision are correct (SHA-1 hash
379 matches the revision ID). With --all, check every changeset in
379 matches the revision ID). With --all, check every changeset in
380 this repository.'''
380 this repository.'''
381 if all:
381 if all:
382 # Pass a list to the function rather than an iterator because we know a
382 # Pass a list to the function rather than an iterator because we know a
383 # list will work.
383 # list will work.
384 revs = range(len(repo))
384 revs = range(len(repo))
385 else:
385 else:
386 revs = ['.']
386 revs = ['.']
387
387
388 store = basestore._openstore(repo)
388 store = basestore._openstore(repo)
389 return store.verify(revs, contents=contents)
389 return store.verify(revs, contents=contents)
390
390
391 def cachelfiles(ui, repo, node, filelist=None):
391 def cachelfiles(ui, repo, node, filelist=None):
392 '''cachelfiles ensures that all largefiles needed by the specified revision
392 '''cachelfiles ensures that all largefiles needed by the specified revision
393 are present in the repository's largefile cache.
393 are present in the repository's largefile cache.
394
394
395 returns a tuple (cached, missing). cached is the list of files downloaded
395 returns a tuple (cached, missing). cached is the list of files downloaded
396 by this operation; missing is the list of files that were needed but could
396 by this operation; missing is the list of files that were needed but could
397 not be found.'''
397 not be found.'''
398 lfiles = lfutil.listlfiles(repo, node)
398 lfiles = lfutil.listlfiles(repo, node)
399 if filelist:
399 if filelist:
400 lfiles = set(lfiles) & set(filelist)
400 lfiles = set(lfiles) & set(filelist)
401 toget = []
401 toget = []
402
402
403 for lfile in lfiles:
403 for lfile in lfiles:
404 try:
404 try:
405 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
405 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
406 except IOError, err:
406 except IOError, err:
407 if err.errno == errno.ENOENT:
407 if err.errno == errno.ENOENT:
408 continue # node must be None and standin wasn't found in wctx
408 continue # node must be None and standin wasn't found in wctx
409 raise
409 raise
410 if not lfutil.findfile(repo, expectedhash):
410 if not lfutil.findfile(repo, expectedhash):
411 toget.append((lfile, expectedhash))
411 toget.append((lfile, expectedhash))
412
412
413 if toget:
413 if toget:
414 store = basestore._openstore(repo)
414 store = basestore._openstore(repo)
415 ret = store.get(toget)
415 ret = store.get(toget)
416 return ret
416 return ret
417
417
418 return ([], [])
418 return ([], [])
419
419
420 def downloadlfiles(ui, repo, rev=None):
420 def downloadlfiles(ui, repo, rev=None):
421 matchfn = scmutil.match(repo[None],
421 matchfn = scmutil.match(repo[None],
422 [repo.wjoin(lfutil.shortname)], {})
422 [repo.wjoin(lfutil.shortname)], {})
423 def prepare(ctx, fns):
423 def prepare(ctx, fns):
424 pass
424 pass
425 totalsuccess = 0
425 totalsuccess = 0
426 totalmissing = 0
426 totalmissing = 0
427 if rev != []: # walkchangerevs on empty list would return all revs
427 if rev != []: # walkchangerevs on empty list would return all revs
428 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
428 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
429 prepare):
429 prepare):
430 success, missing = cachelfiles(ui, repo, ctx.node())
430 success, missing = cachelfiles(ui, repo, ctx.node())
431 totalsuccess += len(success)
431 totalsuccess += len(success)
432 totalmissing += len(missing)
432 totalmissing += len(missing)
433 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
433 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
434 if totalmissing > 0:
434 if totalmissing > 0:
435 ui.status(_("%d largefiles failed to download\n") % totalmissing)
435 ui.status(_("%d largefiles failed to download\n") % totalmissing)
436 return totalsuccess, totalmissing
436 return totalsuccess, totalmissing
437
437
438 def updatelfiles(ui, repo, filelist=None, printmessage=True,
438 def updatelfiles(ui, repo, filelist=None, printmessage=None,
439 normallookup=False):
439 normallookup=False):
440 '''Update largefiles according to standins in the working directory
441
442 If ``printmessage`` is other than ``None``, it means "print (or
443 ignore, for false) message forcibly".
444 '''
445 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
440 wlock = repo.wlock()
446 wlock = repo.wlock()
441 try:
447 try:
442 lfdirstate = lfutil.openlfdirstate(ui, repo)
448 lfdirstate = lfutil.openlfdirstate(ui, repo)
443 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
449 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
444
450
445 if filelist is not None:
451 if filelist is not None:
446 filelist = set(filelist)
452 filelist = set(filelist)
447 lfiles = [f for f in lfiles if f in filelist]
453 lfiles = [f for f in lfiles if f in filelist]
448
454
449 update = {}
455 update = {}
450 updated, removed = 0, 0
456 updated, removed = 0, 0
451 for lfile in lfiles:
457 for lfile in lfiles:
452 abslfile = repo.wjoin(lfile)
458 abslfile = repo.wjoin(lfile)
453 absstandin = repo.wjoin(lfutil.standin(lfile))
459 absstandin = repo.wjoin(lfutil.standin(lfile))
454 if os.path.exists(absstandin):
460 if os.path.exists(absstandin):
455 if (os.path.exists(absstandin + '.orig') and
461 if (os.path.exists(absstandin + '.orig') and
456 os.path.exists(abslfile)):
462 os.path.exists(abslfile)):
457 shutil.copyfile(abslfile, abslfile + '.orig')
463 shutil.copyfile(abslfile, abslfile + '.orig')
458 util.unlinkpath(absstandin + '.orig')
464 util.unlinkpath(absstandin + '.orig')
459 expecthash = lfutil.readstandin(repo, lfile)
465 expecthash = lfutil.readstandin(repo, lfile)
460 if (expecthash != '' and
466 if (expecthash != '' and
461 (not os.path.exists(abslfile) or
467 (not os.path.exists(abslfile) or
462 expecthash != lfutil.hashfile(abslfile))):
468 expecthash != lfutil.hashfile(abslfile))):
463 if lfile not in repo[None]: # not switched to normal file
469 if lfile not in repo[None]: # not switched to normal file
464 util.unlinkpath(abslfile, ignoremissing=True)
470 util.unlinkpath(abslfile, ignoremissing=True)
465 # use normallookup() to allocate an entry in largefiles
471 # use normallookup() to allocate an entry in largefiles
466 # dirstate, because lack of it misleads
472 # dirstate, because lack of it misleads
467 # lfilesrepo.status() into recognition that such cache
473 # lfilesrepo.status() into recognition that such cache
468 # missing files are removed.
474 # missing files are removed.
469 lfdirstate.normallookup(lfile)
475 lfdirstate.normallookup(lfile)
470 update[lfile] = expecthash
476 update[lfile] = expecthash
471 else:
477 else:
472 # Remove lfiles for which the standin is deleted, unless the
478 # Remove lfiles for which the standin is deleted, unless the
473 # lfile is added to the repository again. This happens when a
479 # lfile is added to the repository again. This happens when a
474 # largefile is converted back to a normal file: the standin
480 # largefile is converted back to a normal file: the standin
475 # disappears, but a new (normal) file appears as the lfile.
481 # disappears, but a new (normal) file appears as the lfile.
476 if (os.path.exists(abslfile) and
482 if (os.path.exists(abslfile) and
477 repo.dirstate.normalize(lfile) not in repo[None]):
483 repo.dirstate.normalize(lfile) not in repo[None]):
478 util.unlinkpath(abslfile)
484 util.unlinkpath(abslfile)
479 removed += 1
485 removed += 1
480
486
481 # largefile processing might be slow and be interrupted - be prepared
487 # largefile processing might be slow and be interrupted - be prepared
482 lfdirstate.write()
488 lfdirstate.write()
483
489
484 if lfiles:
490 if lfiles:
485 if printmessage:
491 statuswriter(_('getting changed largefiles\n'))
486 ui.status(_('getting changed largefiles\n'))
487 cachelfiles(ui, repo, None, lfiles)
492 cachelfiles(ui, repo, None, lfiles)
488
493
489 for lfile in lfiles:
494 for lfile in lfiles:
490 update1 = 0
495 update1 = 0
491
496
492 expecthash = update.get(lfile)
497 expecthash = update.get(lfile)
493 if expecthash:
498 if expecthash:
494 if not lfutil.copyfromcache(repo, expecthash, lfile):
499 if not lfutil.copyfromcache(repo, expecthash, lfile):
495 # failed ... but already removed and set to normallookup
500 # failed ... but already removed and set to normallookup
496 continue
501 continue
497 # Synchronize largefile dirstate to the last modified
502 # Synchronize largefile dirstate to the last modified
498 # time of the file
503 # time of the file
499 lfdirstate.normal(lfile)
504 lfdirstate.normal(lfile)
500 update1 = 1
505 update1 = 1
501
506
502 # copy the state of largefile standin from the repository's
507 # copy the state of largefile standin from the repository's
503 # dirstate to its state in the lfdirstate.
508 # dirstate to its state in the lfdirstate.
504 abslfile = repo.wjoin(lfile)
509 abslfile = repo.wjoin(lfile)
505 absstandin = repo.wjoin(lfutil.standin(lfile))
510 absstandin = repo.wjoin(lfutil.standin(lfile))
506 if os.path.exists(absstandin):
511 if os.path.exists(absstandin):
507 mode = os.stat(absstandin).st_mode
512 mode = os.stat(absstandin).st_mode
508 if mode != os.stat(abslfile).st_mode:
513 if mode != os.stat(abslfile).st_mode:
509 os.chmod(abslfile, mode)
514 os.chmod(abslfile, mode)
510 update1 = 1
515 update1 = 1
511
516
512 updated += update1
517 updated += update1
513
518
514 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
519 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
515
520
516 if filelist is not None:
521 if filelist is not None:
517 # If "local largefile" is chosen at file merging, it is
522 # If "local largefile" is chosen at file merging, it is
518 # not listed in "filelist" (= dirstate syncing is
523 # not listed in "filelist" (= dirstate syncing is
519 # omitted), because the standin file is not changed before and
524 # omitted), because the standin file is not changed before and
520 # after merging.
525 # after merging.
521 # But the status of such files may have to be changed by
526 # But the status of such files may have to be changed by
522 # merging. For example, locally modified ("M") largefile
527 # merging. For example, locally modified ("M") largefile
523 # has to become re-added("A"), if it is "normal" file in
528 # has to become re-added("A"), if it is "normal" file in
524 # the target revision of linear-merging.
529 # the target revision of linear-merging.
525 for lfile in lfdirstate:
530 for lfile in lfdirstate:
526 if lfile not in filelist:
531 if lfile not in filelist:
527 lfutil.synclfdirstate(repo, lfdirstate, lfile, True)
532 lfutil.synclfdirstate(repo, lfdirstate, lfile, True)
528
533
529 lfdirstate.write()
534 lfdirstate.write()
530 if printmessage and lfiles:
535 if lfiles:
531 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
536 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
532 removed))
537 removed))
533 finally:
538 finally:
534 wlock.release()
539 wlock.release()
535
540
536 @command('lfpull',
541 @command('lfpull',
537 [('r', 'rev', [], _('pull largefiles for these revisions'))
542 [('r', 'rev', [], _('pull largefiles for these revisions'))
538 ] + commands.remoteopts,
543 ] + commands.remoteopts,
539 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
544 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
540 def lfpull(ui, repo, source="default", **opts):
545 def lfpull(ui, repo, source="default", **opts):
541 """pull largefiles for the specified revisions from the specified source
546 """pull largefiles for the specified revisions from the specified source
542
547
543 Pull largefiles that are referenced from local changesets but missing
548 Pull largefiles that are referenced from local changesets but missing
544 locally, pulling from a remote repository to the local cache.
549 locally, pulling from a remote repository to the local cache.
545
550
546 If SOURCE is omitted, the 'default' path will be used.
551 If SOURCE is omitted, the 'default' path will be used.
547 See :hg:`help urls` for more information.
552 See :hg:`help urls` for more information.
548
553
549 .. container:: verbose
554 .. container:: verbose
550
555
551 Some examples:
556 Some examples:
552
557
553 - pull largefiles for all branch heads::
558 - pull largefiles for all branch heads::
554
559
555 hg lfpull -r "head() and not closed()"
560 hg lfpull -r "head() and not closed()"
556
561
557 - pull largefiles on the default branch::
562 - pull largefiles on the default branch::
558
563
559 hg lfpull -r "branch(default)"
564 hg lfpull -r "branch(default)"
560 """
565 """
561 repo.lfpullsource = source
566 repo.lfpullsource = source
562
567
563 revs = opts.get('rev', [])
568 revs = opts.get('rev', [])
564 if not revs:
569 if not revs:
565 raise util.Abort(_('no revisions specified'))
570 raise util.Abort(_('no revisions specified'))
566 revs = scmutil.revrange(repo, revs)
571 revs = scmutil.revrange(repo, revs)
567
572
568 numcached = 0
573 numcached = 0
569 for rev in revs:
574 for rev in revs:
570 ui.note(_('pulling largefiles for revision %s\n') % rev)
575 ui.note(_('pulling largefiles for revision %s\n') % rev)
571 (cached, missing) = cachelfiles(ui, repo, rev)
576 (cached, missing) = cachelfiles(ui, repo, rev)
572 numcached += len(cached)
577 numcached += len(cached)
573 ui.status(_("%d largefiles cached\n") % numcached)
578 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,1301 +1,1303 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18
18
19 import lfutil
19 import lfutil
20 import lfcommands
20 import lfcommands
21 import basestore
21 import basestore
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def installnormalfilesmatchfn(manifest):
25 def installnormalfilesmatchfn(manifest):
26 '''installmatchfn with a matchfn that ignores all largefiles'''
26 '''installmatchfn with a matchfn that ignores all largefiles'''
27 def overridematch(ctx, pats=[], opts={}, globbed=False,
27 def overridematch(ctx, pats=[], opts={}, globbed=False,
28 default='relpath'):
28 default='relpath'):
29 match = oldmatch(ctx, pats, opts, globbed, default)
29 match = oldmatch(ctx, pats, opts, globbed, default)
30 m = copy.copy(match)
30 m = copy.copy(match)
31 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
31 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
32 manifest)
32 manifest)
33 m._files = filter(notlfile, m._files)
33 m._files = filter(notlfile, m._files)
34 m._fmap = set(m._files)
34 m._fmap = set(m._files)
35 m._always = False
35 m._always = False
36 origmatchfn = m.matchfn
36 origmatchfn = m.matchfn
37 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
37 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
38 return m
38 return m
39 oldmatch = installmatchfn(overridematch)
39 oldmatch = installmatchfn(overridematch)
40
40
41 def installmatchfn(f):
41 def installmatchfn(f):
42 '''monkey patch the scmutil module with a custom match function.
42 '''monkey patch the scmutil module with a custom match function.
43 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
43 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
44 oldmatch = scmutil.match
44 oldmatch = scmutil.match
45 setattr(f, 'oldmatch', oldmatch)
45 setattr(f, 'oldmatch', oldmatch)
46 scmutil.match = f
46 scmutil.match = f
47 return oldmatch
47 return oldmatch
48
48
49 def restorematchfn():
49 def restorematchfn():
50 '''restores scmutil.match to what it was before installmatchfn
50 '''restores scmutil.match to what it was before installmatchfn
51 was called. no-op if scmutil.match is its original function.
51 was called. no-op if scmutil.match is its original function.
52
52
53 Note that n calls to installmatchfn will require n calls to
53 Note that n calls to installmatchfn will require n calls to
54 restore matchfn to reverse'''
54 restore matchfn to reverse'''
55 scmutil.match = getattr(scmutil.match, 'oldmatch')
55 scmutil.match = getattr(scmutil.match, 'oldmatch')
56
56
57 def installmatchandpatsfn(f):
57 def installmatchandpatsfn(f):
58 oldmatchandpats = scmutil.matchandpats
58 oldmatchandpats = scmutil.matchandpats
59 setattr(f, 'oldmatchandpats', oldmatchandpats)
59 setattr(f, 'oldmatchandpats', oldmatchandpats)
60 scmutil.matchandpats = f
60 scmutil.matchandpats = f
61 return oldmatchandpats
61 return oldmatchandpats
62
62
63 def restorematchandpatsfn():
63 def restorematchandpatsfn():
64 '''restores scmutil.matchandpats to what it was before
64 '''restores scmutil.matchandpats to what it was before
65 installmatchandpatsfn was called. No-op if scmutil.matchandpats
65 installmatchandpatsfn was called. No-op if scmutil.matchandpats
66 is its original function.
66 is its original function.
67
67
68 Note that n calls to installmatchandpatsfn will require n calls
68 Note that n calls to installmatchandpatsfn will require n calls
69 to restore matchfn to reverse'''
69 to restore matchfn to reverse'''
70 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
70 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
71 scmutil.matchandpats)
71 scmutil.matchandpats)
72
72
73 def addlargefiles(ui, repo, *pats, **opts):
73 def addlargefiles(ui, repo, *pats, **opts):
74 large = opts.pop('large', None)
74 large = opts.pop('large', None)
75 lfsize = lfutil.getminsize(
75 lfsize = lfutil.getminsize(
76 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
76 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
77
77
78 lfmatcher = None
78 lfmatcher = None
79 if lfutil.islfilesrepo(repo):
79 if lfutil.islfilesrepo(repo):
80 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
80 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
81 if lfpats:
81 if lfpats:
82 lfmatcher = match_.match(repo.root, '', list(lfpats))
82 lfmatcher = match_.match(repo.root, '', list(lfpats))
83
83
84 lfnames = []
84 lfnames = []
85 m = scmutil.match(repo[None], pats, opts)
85 m = scmutil.match(repo[None], pats, opts)
86 m.bad = lambda x, y: None
86 m.bad = lambda x, y: None
87 wctx = repo[None]
87 wctx = repo[None]
88 for f in repo.walk(m):
88 for f in repo.walk(m):
89 exact = m.exact(f)
89 exact = m.exact(f)
90 lfile = lfutil.standin(f) in wctx
90 lfile = lfutil.standin(f) in wctx
91 nfile = f in wctx
91 nfile = f in wctx
92 exists = lfile or nfile
92 exists = lfile or nfile
93
93
94 # Don't warn the user when they attempt to add a normal tracked file.
94 # Don't warn the user when they attempt to add a normal tracked file.
95 # The normal add code will do that for us.
95 # The normal add code will do that for us.
96 if exact and exists:
96 if exact and exists:
97 if lfile:
97 if lfile:
98 ui.warn(_('%s already a largefile\n') % f)
98 ui.warn(_('%s already a largefile\n') % f)
99 continue
99 continue
100
100
101 if (exact or not exists) and not lfutil.isstandin(f):
101 if (exact or not exists) and not lfutil.isstandin(f):
102 wfile = repo.wjoin(f)
102 wfile = repo.wjoin(f)
103
103
104 # In case the file was removed previously, but not committed
104 # In case the file was removed previously, but not committed
105 # (issue3507)
105 # (issue3507)
106 if not os.path.exists(wfile):
106 if not os.path.exists(wfile):
107 continue
107 continue
108
108
109 abovemin = (lfsize and
109 abovemin = (lfsize and
110 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
110 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
111 if large or abovemin or (lfmatcher and lfmatcher(f)):
111 if large or abovemin or (lfmatcher and lfmatcher(f)):
112 lfnames.append(f)
112 lfnames.append(f)
113 if ui.verbose or not exact:
113 if ui.verbose or not exact:
114 ui.status(_('adding %s as a largefile\n') % m.rel(f))
114 ui.status(_('adding %s as a largefile\n') % m.rel(f))
115
115
116 bad = []
116 bad = []
117
117
118 # Need to lock, otherwise there could be a race condition between
118 # Need to lock, otherwise there could be a race condition between
119 # when standins are created and added to the repo.
119 # when standins are created and added to the repo.
120 wlock = repo.wlock()
120 wlock = repo.wlock()
121 try:
121 try:
122 if not opts.get('dry_run'):
122 if not opts.get('dry_run'):
123 standins = []
123 standins = []
124 lfdirstate = lfutil.openlfdirstate(ui, repo)
124 lfdirstate = lfutil.openlfdirstate(ui, repo)
125 for f in lfnames:
125 for f in lfnames:
126 standinname = lfutil.standin(f)
126 standinname = lfutil.standin(f)
127 lfutil.writestandin(repo, standinname, hash='',
127 lfutil.writestandin(repo, standinname, hash='',
128 executable=lfutil.getexecutable(repo.wjoin(f)))
128 executable=lfutil.getexecutable(repo.wjoin(f)))
129 standins.append(standinname)
129 standins.append(standinname)
130 if lfdirstate[f] == 'r':
130 if lfdirstate[f] == 'r':
131 lfdirstate.normallookup(f)
131 lfdirstate.normallookup(f)
132 else:
132 else:
133 lfdirstate.add(f)
133 lfdirstate.add(f)
134 lfdirstate.write()
134 lfdirstate.write()
135 bad += [lfutil.splitstandin(f)
135 bad += [lfutil.splitstandin(f)
136 for f in repo[None].add(standins)
136 for f in repo[None].add(standins)
137 if f in m.files()]
137 if f in m.files()]
138 finally:
138 finally:
139 wlock.release()
139 wlock.release()
140 return bad
140 return bad
141
141
142 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
142 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
143 after = opts.get('after')
143 after = opts.get('after')
144 if not pats and not after:
144 if not pats and not after:
145 raise util.Abort(_('no files specified'))
145 raise util.Abort(_('no files specified'))
146 m = scmutil.match(repo[None], pats, opts)
146 m = scmutil.match(repo[None], pats, opts)
147 try:
147 try:
148 repo.lfstatus = True
148 repo.lfstatus = True
149 s = repo.status(match=m, clean=True)
149 s = repo.status(match=m, clean=True)
150 finally:
150 finally:
151 repo.lfstatus = False
151 repo.lfstatus = False
152 manifest = repo[None].manifest()
152 manifest = repo[None].manifest()
153 modified, added, deleted, clean = [[f for f in list
153 modified, added, deleted, clean = [[f for f in list
154 if lfutil.standin(f) in manifest]
154 if lfutil.standin(f) in manifest]
155 for list in (s.modified, s.added,
155 for list in (s.modified, s.added,
156 s.deleted, s.clean)]
156 s.deleted, s.clean)]
157
157
158 def warn(files, msg):
158 def warn(files, msg):
159 for f in files:
159 for f in files:
160 ui.warn(msg % m.rel(f))
160 ui.warn(msg % m.rel(f))
161 return int(len(files) > 0)
161 return int(len(files) > 0)
162
162
163 result = 0
163 result = 0
164
164
165 if after:
165 if after:
166 remove = deleted
166 remove = deleted
167 result = warn(modified + added + clean,
167 result = warn(modified + added + clean,
168 _('not removing %s: file still exists\n'))
168 _('not removing %s: file still exists\n'))
169 else:
169 else:
170 remove = deleted + clean
170 remove = deleted + clean
171 result = warn(modified, _('not removing %s: file is modified (use -f'
171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 ' to force removal)\n'))
172 ' to force removal)\n'))
173 result = warn(added, _('not removing %s: file has been marked for add'
173 result = warn(added, _('not removing %s: file has been marked for add'
174 ' (use forget to undo)\n')) or result
174 ' (use forget to undo)\n')) or result
175
175
176 for f in sorted(remove):
176 for f in sorted(remove):
177 if ui.verbose or not m.exact(f):
177 if ui.verbose or not m.exact(f):
178 ui.status(_('removing %s\n') % m.rel(f))
178 ui.status(_('removing %s\n') % m.rel(f))
179
179
180 # Need to lock because standin files are deleted then removed from the
180 # Need to lock because standin files are deleted then removed from the
181 # repository and we could race in-between.
181 # repository and we could race in-between.
182 wlock = repo.wlock()
182 wlock = repo.wlock()
183 try:
183 try:
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 for f in remove:
185 for f in remove:
186 if not after:
186 if not after:
187 # If this is being called by addremove, notify the user that we
187 # If this is being called by addremove, notify the user that we
188 # are removing the file.
188 # are removing the file.
189 if isaddremove:
189 if isaddremove:
190 ui.status(_('removing %s\n') % f)
190 ui.status(_('removing %s\n') % f)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 lfdirstate.remove(f)
192 lfdirstate.remove(f)
193 lfdirstate.write()
193 lfdirstate.write()
194 remove = [lfutil.standin(f) for f in remove]
194 remove = [lfutil.standin(f) for f in remove]
195 # If this is being called by addremove, let the original addremove
195 # If this is being called by addremove, let the original addremove
196 # function handle this.
196 # function handle this.
197 if not isaddremove:
197 if not isaddremove:
198 for f in remove:
198 for f in remove:
199 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
199 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
200 repo[None].forget(remove)
200 repo[None].forget(remove)
201 finally:
201 finally:
202 wlock.release()
202 wlock.release()
203
203
204 return result
204 return result
205
205
206 # For overriding mercurial.hgweb.webcommands so that largefiles will
206 # For overriding mercurial.hgweb.webcommands so that largefiles will
207 # appear at their right place in the manifests.
207 # appear at their right place in the manifests.
208 def decodepath(orig, path):
208 def decodepath(orig, path):
209 return lfutil.splitstandin(path) or path
209 return lfutil.splitstandin(path) or path
210
210
211 # -- Wrappers: modify existing commands --------------------------------
211 # -- Wrappers: modify existing commands --------------------------------
212
212
213 # Add works by going through the files that the user wanted to add and
213 # Add works by going through the files that the user wanted to add and
214 # checking if they should be added as largefiles. Then it makes a new
214 # checking if they should be added as largefiles. Then it makes a new
215 # matcher which matches only the normal files and runs the original
215 # matcher which matches only the normal files and runs the original
216 # version of add.
216 # version of add.
217 def overrideadd(orig, ui, repo, *pats, **opts):
217 def overrideadd(orig, ui, repo, *pats, **opts):
218 normal = opts.pop('normal')
218 normal = opts.pop('normal')
219 if normal:
219 if normal:
220 if opts.get('large'):
220 if opts.get('large'):
221 raise util.Abort(_('--normal cannot be used with --large'))
221 raise util.Abort(_('--normal cannot be used with --large'))
222 return orig(ui, repo, *pats, **opts)
222 return orig(ui, repo, *pats, **opts)
223 bad = addlargefiles(ui, repo, *pats, **opts)
223 bad = addlargefiles(ui, repo, *pats, **opts)
224 installnormalfilesmatchfn(repo[None].manifest())
224 installnormalfilesmatchfn(repo[None].manifest())
225 result = orig(ui, repo, *pats, **opts)
225 result = orig(ui, repo, *pats, **opts)
226 restorematchfn()
226 restorematchfn()
227
227
228 return (result == 1 or bad) and 1 or 0
228 return (result == 1 or bad) and 1 or 0
229
229
230 def overrideremove(orig, ui, repo, *pats, **opts):
230 def overrideremove(orig, ui, repo, *pats, **opts):
231 installnormalfilesmatchfn(repo[None].manifest())
231 installnormalfilesmatchfn(repo[None].manifest())
232 result = orig(ui, repo, *pats, **opts)
232 result = orig(ui, repo, *pats, **opts)
233 restorematchfn()
233 restorematchfn()
234 return removelargefiles(ui, repo, False, *pats, **opts) or result
234 return removelargefiles(ui, repo, False, *pats, **opts) or result
235
235
236 def overridestatusfn(orig, repo, rev2, **opts):
236 def overridestatusfn(orig, repo, rev2, **opts):
237 try:
237 try:
238 repo._repo.lfstatus = True
238 repo._repo.lfstatus = True
239 return orig(repo, rev2, **opts)
239 return orig(repo, rev2, **opts)
240 finally:
240 finally:
241 repo._repo.lfstatus = False
241 repo._repo.lfstatus = False
242
242
243 def overridestatus(orig, ui, repo, *pats, **opts):
243 def overridestatus(orig, ui, repo, *pats, **opts):
244 try:
244 try:
245 repo.lfstatus = True
245 repo.lfstatus = True
246 return orig(ui, repo, *pats, **opts)
246 return orig(ui, repo, *pats, **opts)
247 finally:
247 finally:
248 repo.lfstatus = False
248 repo.lfstatus = False
249
249
250 def overridedirty(orig, repo, ignoreupdate=False):
250 def overridedirty(orig, repo, ignoreupdate=False):
251 try:
251 try:
252 repo._repo.lfstatus = True
252 repo._repo.lfstatus = True
253 return orig(repo, ignoreupdate)
253 return orig(repo, ignoreupdate)
254 finally:
254 finally:
255 repo._repo.lfstatus = False
255 repo._repo.lfstatus = False
256
256
257 def overridelog(orig, ui, repo, *pats, **opts):
257 def overridelog(orig, ui, repo, *pats, **opts):
258 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
258 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
259 default='relpath'):
259 default='relpath'):
260 """Matcher that merges root directory with .hglf, suitable for log.
260 """Matcher that merges root directory with .hglf, suitable for log.
261 It is still possible to match .hglf directly.
261 It is still possible to match .hglf directly.
262 For any listed files run log on the standin too.
262 For any listed files run log on the standin too.
263 matchfn tries both the given filename and with .hglf stripped.
263 matchfn tries both the given filename and with .hglf stripped.
264 """
264 """
265 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
265 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
266 m, p = copy.copy(matchandpats)
266 m, p = copy.copy(matchandpats)
267
267
268 if m.always():
268 if m.always():
269 # We want to match everything anyway, so there's no benefit trying
269 # We want to match everything anyway, so there's no benefit trying
270 # to add standins.
270 # to add standins.
271 return matchandpats
271 return matchandpats
272
272
273 pats = set(p)
273 pats = set(p)
274 # TODO: handling of patterns in both cases below
274 # TODO: handling of patterns in both cases below
275 if m._cwd:
275 if m._cwd:
276 if os.path.isabs(m._cwd):
276 if os.path.isabs(m._cwd):
277 # TODO: handle largefile magic when invoked from other cwd
277 # TODO: handle largefile magic when invoked from other cwd
278 return matchandpats
278 return matchandpats
279 back = (m._cwd.count('/') + 1) * '../'
279 back = (m._cwd.count('/') + 1) * '../'
280 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
280 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
281 else:
281 else:
282 pats.update(lfutil.standin(f) for f in p)
282 pats.update(lfutil.standin(f) for f in p)
283
283
284 for i in range(0, len(m._files)):
284 for i in range(0, len(m._files)):
285 standin = lfutil.standin(m._files[i])
285 standin = lfutil.standin(m._files[i])
286 if standin in repo[ctx.node()]:
286 if standin in repo[ctx.node()]:
287 m._files[i] = standin
287 m._files[i] = standin
288 elif m._files[i] not in repo[ctx.node()]:
288 elif m._files[i] not in repo[ctx.node()]:
289 m._files.append(standin)
289 m._files.append(standin)
290 pats.add(standin)
290 pats.add(standin)
291
291
292 m._fmap = set(m._files)
292 m._fmap = set(m._files)
293 m._always = False
293 m._always = False
294 origmatchfn = m.matchfn
294 origmatchfn = m.matchfn
295 def lfmatchfn(f):
295 def lfmatchfn(f):
296 lf = lfutil.splitstandin(f)
296 lf = lfutil.splitstandin(f)
297 if lf is not None and origmatchfn(lf):
297 if lf is not None and origmatchfn(lf):
298 return True
298 return True
299 r = origmatchfn(f)
299 r = origmatchfn(f)
300 return r
300 return r
301 m.matchfn = lfmatchfn
301 m.matchfn = lfmatchfn
302
302
303 return m, pats
303 return m, pats
304
304
305 # For hg log --patch, the match object is used in two different senses:
305 # For hg log --patch, the match object is used in two different senses:
306 # (1) to determine what revisions should be printed out, and
306 # (1) to determine what revisions should be printed out, and
307 # (2) to determine what files to print out diffs for.
307 # (2) to determine what files to print out diffs for.
308 # The magic matchandpats override should be used for case (1) but not for
308 # The magic matchandpats override should be used for case (1) but not for
309 # case (2).
309 # case (2).
310 def overridemakelogfilematcher(repo, pats, opts):
310 def overridemakelogfilematcher(repo, pats, opts):
311 pctx = repo[None]
311 pctx = repo[None]
312 match, pats = oldmatchandpats(pctx, pats, opts)
312 match, pats = oldmatchandpats(pctx, pats, opts)
313 return lambda rev: match
313 return lambda rev: match
314
314
315 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
315 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
316 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
316 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
317 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
317 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
318
318
319 try:
319 try:
320 return orig(ui, repo, *pats, **opts)
320 return orig(ui, repo, *pats, **opts)
321 finally:
321 finally:
322 restorematchandpatsfn()
322 restorematchandpatsfn()
323 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
323 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
324
324
325 def overrideverify(orig, ui, repo, *pats, **opts):
325 def overrideverify(orig, ui, repo, *pats, **opts):
326 large = opts.pop('large', False)
326 large = opts.pop('large', False)
327 all = opts.pop('lfa', False)
327 all = opts.pop('lfa', False)
328 contents = opts.pop('lfc', False)
328 contents = opts.pop('lfc', False)
329
329
330 result = orig(ui, repo, *pats, **opts)
330 result = orig(ui, repo, *pats, **opts)
331 if large or all or contents:
331 if large or all or contents:
332 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
332 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
333 return result
333 return result
334
334
335 def overridedebugstate(orig, ui, repo, *pats, **opts):
335 def overridedebugstate(orig, ui, repo, *pats, **opts):
336 large = opts.pop('large', False)
336 large = opts.pop('large', False)
337 if large:
337 if large:
338 class fakerepo(object):
338 class fakerepo(object):
339 dirstate = lfutil.openlfdirstate(ui, repo)
339 dirstate = lfutil.openlfdirstate(ui, repo)
340 orig(ui, fakerepo, *pats, **opts)
340 orig(ui, fakerepo, *pats, **opts)
341 else:
341 else:
342 orig(ui, repo, *pats, **opts)
342 orig(ui, repo, *pats, **opts)
343
343
344 # Override needs to refresh standins so that update's normal merge
344 # Override needs to refresh standins so that update's normal merge
345 # will go through properly. Then the other update hook (overriding repo.update)
345 # will go through properly. Then the other update hook (overriding repo.update)
346 # will get the new files. Filemerge is also overridden so that the merge
346 # will get the new files. Filemerge is also overridden so that the merge
347 # will merge standins correctly.
347 # will merge standins correctly.
348 def overrideupdate(orig, ui, repo, *pats, **opts):
348 def overrideupdate(orig, ui, repo, *pats, **opts):
349 # Need to lock between the standins getting updated and their
349 # Need to lock between the standins getting updated and their
350 # largefiles getting updated
350 # largefiles getting updated
351 wlock = repo.wlock()
351 wlock = repo.wlock()
352 try:
352 try:
353 if opts['check']:
353 if opts['check']:
354 lfdirstate = lfutil.openlfdirstate(ui, repo)
354 lfdirstate = lfutil.openlfdirstate(ui, repo)
355 unsure, s = lfdirstate.status(
355 unsure, s = lfdirstate.status(
356 match_.always(repo.root, repo.getcwd()),
356 match_.always(repo.root, repo.getcwd()),
357 [], False, False, False)
357 [], False, False, False)
358
358
359 mod = len(s.modified) > 0
359 mod = len(s.modified) > 0
360 for lfile in unsure:
360 for lfile in unsure:
361 standin = lfutil.standin(lfile)
361 standin = lfutil.standin(lfile)
362 if repo['.'][standin].data().strip() != \
362 if repo['.'][standin].data().strip() != \
363 lfutil.hashfile(repo.wjoin(lfile)):
363 lfutil.hashfile(repo.wjoin(lfile)):
364 mod = True
364 mod = True
365 else:
365 else:
366 lfdirstate.normal(lfile)
366 lfdirstate.normal(lfile)
367 lfdirstate.write()
367 lfdirstate.write()
368 if mod:
368 if mod:
369 raise util.Abort(_('uncommitted changes'))
369 raise util.Abort(_('uncommitted changes'))
370 return orig(ui, repo, *pats, **opts)
370 return orig(ui, repo, *pats, **opts)
371 finally:
371 finally:
372 wlock.release()
372 wlock.release()
373
373
374 # Before starting the manifest merge, merge.updates will call
374 # Before starting the manifest merge, merge.updates will call
375 # _checkunknown to check if there are any files in the merged-in
375 # _checkunknown to check if there are any files in the merged-in
376 # changeset that collide with unknown files in the working copy.
376 # changeset that collide with unknown files in the working copy.
377 #
377 #
378 # The largefiles are seen as unknown, so this prevents us from merging
378 # The largefiles are seen as unknown, so this prevents us from merging
379 # in a file 'foo' if we already have a largefile with the same name.
379 # in a file 'foo' if we already have a largefile with the same name.
380 #
380 #
381 # The overridden function filters the unknown files by removing any
381 # The overridden function filters the unknown files by removing any
382 # largefiles. This makes the merge proceed and we can then handle this
382 # largefiles. This makes the merge proceed and we can then handle this
383 # case further in the overridden manifestmerge function below.
383 # case further in the overridden manifestmerge function below.
384 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
384 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
385 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
385 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
386 return False
386 return False
387 return origfn(repo, wctx, mctx, f)
387 return origfn(repo, wctx, mctx, f)
388
388
389 # The manifest merge handles conflicts on the manifest level. We want
389 # The manifest merge handles conflicts on the manifest level. We want
390 # to handle changes in largefile-ness of files at this level too.
390 # to handle changes in largefile-ness of files at this level too.
391 #
391 #
392 # The strategy is to run the original manifestmerge and then process
392 # The strategy is to run the original manifestmerge and then process
393 # the action list it outputs. There are two cases we need to deal with:
393 # the action list it outputs. There are two cases we need to deal with:
394 #
394 #
395 # 1. Normal file in p1, largefile in p2. Here the largefile is
395 # 1. Normal file in p1, largefile in p2. Here the largefile is
396 # detected via its standin file, which will enter the working copy
396 # detected via its standin file, which will enter the working copy
397 # with a "get" action. It is not "merge" since the standin is all
397 # with a "get" action. It is not "merge" since the standin is all
398 # Mercurial is concerned with at this level -- the link to the
398 # Mercurial is concerned with at this level -- the link to the
399 # existing normal file is not relevant here.
399 # existing normal file is not relevant here.
400 #
400 #
401 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
401 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
402 # since the largefile will be present in the working copy and
402 # since the largefile will be present in the working copy and
403 # different from the normal file in p2. Mercurial therefore
403 # different from the normal file in p2. Mercurial therefore
404 # triggers a merge action.
404 # triggers a merge action.
405 #
405 #
406 # In both cases, we prompt the user and emit new actions to either
406 # In both cases, we prompt the user and emit new actions to either
407 # remove the standin (if the normal file was kept) or to remove the
407 # remove the standin (if the normal file was kept) or to remove the
408 # normal file and get the standin (if the largefile was kept). The
408 # normal file and get the standin (if the largefile was kept). The
409 # default prompt answer is to use the largefile version since it was
409 # default prompt answer is to use the largefile version since it was
410 # presumably changed on purpose.
410 # presumably changed on purpose.
411 #
411 #
412 # Finally, the merge.applyupdates function will then take care of
412 # Finally, the merge.applyupdates function will then take care of
413 # writing the files into the working copy and lfcommands.updatelfiles
413 # writing the files into the working copy and lfcommands.updatelfiles
414 # will update the largefiles.
414 # will update the largefiles.
415 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
415 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
416 partial, acceptremote, followcopies):
416 partial, acceptremote, followcopies):
417 overwrite = force and not branchmerge
417 overwrite = force and not branchmerge
418 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
418 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
419 acceptremote, followcopies)
419 acceptremote, followcopies)
420
420
421 if overwrite:
421 if overwrite:
422 return actions
422 return actions
423
423
424 removes = set(a[0] for a in actions['r'])
424 removes = set(a[0] for a in actions['r'])
425
425
426 newglist = []
426 newglist = []
427 lfmr = [] # LargeFiles: Mark as Removed
427 lfmr = [] # LargeFiles: Mark as Removed
428 for action in actions['g']:
428 for action in actions['g']:
429 f, args, msg = action
429 f, args, msg = action
430 splitstandin = f and lfutil.splitstandin(f)
430 splitstandin = f and lfutil.splitstandin(f)
431 if (splitstandin is not None and
431 if (splitstandin is not None and
432 splitstandin in p1 and splitstandin not in removes):
432 splitstandin in p1 and splitstandin not in removes):
433 # Case 1: normal file in the working copy, largefile in
433 # Case 1: normal file in the working copy, largefile in
434 # the second parent
434 # the second parent
435 lfile = splitstandin
435 lfile = splitstandin
436 standin = f
436 standin = f
437 msg = _('remote turned local normal file %s into a largefile\n'
437 msg = _('remote turned local normal file %s into a largefile\n'
438 'use (l)argefile or keep (n)ormal file?'
438 'use (l)argefile or keep (n)ormal file?'
439 '$$ &Largefile $$ &Normal file') % lfile
439 '$$ &Largefile $$ &Normal file') % lfile
440 if repo.ui.promptchoice(msg, 0) == 0:
440 if repo.ui.promptchoice(msg, 0) == 0:
441 actions['r'].append((lfile, None, msg))
441 actions['r'].append((lfile, None, msg))
442 newglist.append((standin, (p2.flags(standin),), msg))
442 newglist.append((standin, (p2.flags(standin),), msg))
443 else:
443 else:
444 actions['r'].append((standin, None, msg))
444 actions['r'].append((standin, None, msg))
445 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
445 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
446 # Case 2: largefile in the working copy, normal file in
446 # Case 2: largefile in the working copy, normal file in
447 # the second parent
447 # the second parent
448 standin = lfutil.standin(f)
448 standin = lfutil.standin(f)
449 lfile = f
449 lfile = f
450 msg = _('remote turned local largefile %s into a normal file\n'
450 msg = _('remote turned local largefile %s into a normal file\n'
451 'keep (l)argefile or use (n)ormal file?'
451 'keep (l)argefile or use (n)ormal file?'
452 '$$ &Largefile $$ &Normal file') % lfile
452 '$$ &Largefile $$ &Normal file') % lfile
453 if repo.ui.promptchoice(msg, 0) == 0:
453 if repo.ui.promptchoice(msg, 0) == 0:
454 if branchmerge:
454 if branchmerge:
455 # largefile can be restored from standin safely
455 # largefile can be restored from standin safely
456 actions['r'].append((lfile, None, msg))
456 actions['r'].append((lfile, None, msg))
457 else:
457 else:
458 # "lfile" should be marked as "removed" without
458 # "lfile" should be marked as "removed" without
459 # removal of itself
459 # removal of itself
460 lfmr.append((lfile, None, msg))
460 lfmr.append((lfile, None, msg))
461
461
462 # linear-merge should treat this largefile as 're-added'
462 # linear-merge should treat this largefile as 're-added'
463 actions['a'].append((standin, None, msg))
463 actions['a'].append((standin, None, msg))
464 else:
464 else:
465 actions['r'].append((standin, None, msg))
465 actions['r'].append((standin, None, msg))
466 newglist.append((lfile, (p2.flags(lfile),), msg))
466 newglist.append((lfile, (p2.flags(lfile),), msg))
467 else:
467 else:
468 newglist.append(action)
468 newglist.append(action)
469
469
470 newglist.sort()
470 newglist.sort()
471 actions['g'] = newglist
471 actions['g'] = newglist
472 if lfmr:
472 if lfmr:
473 lfmr.sort()
473 lfmr.sort()
474 actions['lfmr'] = lfmr
474 actions['lfmr'] = lfmr
475
475
476 return actions
476 return actions
477
477
478 def mergerecordupdates(orig, repo, actions, branchmerge):
478 def mergerecordupdates(orig, repo, actions, branchmerge):
479 if 'lfmr' in actions:
479 if 'lfmr' in actions:
480 # this should be executed before 'orig', to execute 'remove'
480 # this should be executed before 'orig', to execute 'remove'
481 # before all other actions
481 # before all other actions
482 for lfile, args, msg in actions['lfmr']:
482 for lfile, args, msg in actions['lfmr']:
483 repo.dirstate.remove(lfile)
483 repo.dirstate.remove(lfile)
484
484
485 return orig(repo, actions, branchmerge)
485 return orig(repo, actions, branchmerge)
486
486
487
487
488 # Override filemerge to prompt the user about how they wish to merge
488 # Override filemerge to prompt the user about how they wish to merge
489 # largefiles. This will handle identical edits without prompting the user.
489 # largefiles. This will handle identical edits without prompting the user.
490 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
490 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
491 if not lfutil.isstandin(orig):
491 if not lfutil.isstandin(orig):
492 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
492 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
493
493
494 ahash = fca.data().strip().lower()
494 ahash = fca.data().strip().lower()
495 dhash = fcd.data().strip().lower()
495 dhash = fcd.data().strip().lower()
496 ohash = fco.data().strip().lower()
496 ohash = fco.data().strip().lower()
497 if (ohash != ahash and
497 if (ohash != ahash and
498 ohash != dhash and
498 ohash != dhash and
499 (dhash == ahash or
499 (dhash == ahash or
500 repo.ui.promptchoice(
500 repo.ui.promptchoice(
501 _('largefile %s has a merge conflict\nancestor was %s\n'
501 _('largefile %s has a merge conflict\nancestor was %s\n'
502 'keep (l)ocal %s or\ntake (o)ther %s?'
502 'keep (l)ocal %s or\ntake (o)ther %s?'
503 '$$ &Local $$ &Other') %
503 '$$ &Local $$ &Other') %
504 (lfutil.splitstandin(orig), ahash, dhash, ohash),
504 (lfutil.splitstandin(orig), ahash, dhash, ohash),
505 0) == 1)):
505 0) == 1)):
506 repo.wwrite(fcd.path(), fco.data(), fco.flags())
506 repo.wwrite(fcd.path(), fco.data(), fco.flags())
507 return 0
507 return 0
508
508
509 # Copy first changes the matchers to match standins instead of
509 # Copy first changes the matchers to match standins instead of
510 # largefiles. Then it overrides util.copyfile in that function it
510 # largefiles. Then it overrides util.copyfile in that function it
511 # checks if the destination largefile already exists. It also keeps a
511 # checks if the destination largefile already exists. It also keeps a
512 # list of copied files so that the largefiles can be copied and the
512 # list of copied files so that the largefiles can be copied and the
513 # dirstate updated.
513 # dirstate updated.
514 def overridecopy(orig, ui, repo, pats, opts, rename=False):
514 def overridecopy(orig, ui, repo, pats, opts, rename=False):
515 # doesn't remove largefile on rename
515 # doesn't remove largefile on rename
516 if len(pats) < 2:
516 if len(pats) < 2:
517 # this isn't legal, let the original function deal with it
517 # this isn't legal, let the original function deal with it
518 return orig(ui, repo, pats, opts, rename)
518 return orig(ui, repo, pats, opts, rename)
519
519
520 def makestandin(relpath):
520 def makestandin(relpath):
521 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
521 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
522 return os.path.join(repo.wjoin(lfutil.standin(path)))
522 return os.path.join(repo.wjoin(lfutil.standin(path)))
523
523
524 fullpats = scmutil.expandpats(pats)
524 fullpats = scmutil.expandpats(pats)
525 dest = fullpats[-1]
525 dest = fullpats[-1]
526
526
527 if os.path.isdir(dest):
527 if os.path.isdir(dest):
528 if not os.path.isdir(makestandin(dest)):
528 if not os.path.isdir(makestandin(dest)):
529 os.makedirs(makestandin(dest))
529 os.makedirs(makestandin(dest))
530 # This could copy both lfiles and normal files in one command,
530 # This could copy both lfiles and normal files in one command,
531 # but we don't want to do that. First replace their matcher to
531 # but we don't want to do that. First replace their matcher to
532 # only match normal files and run it, then replace it to just
532 # only match normal files and run it, then replace it to just
533 # match largefiles and run it again.
533 # match largefiles and run it again.
534 nonormalfiles = False
534 nonormalfiles = False
535 nolfiles = False
535 nolfiles = False
536 installnormalfilesmatchfn(repo[None].manifest())
536 installnormalfilesmatchfn(repo[None].manifest())
537 try:
537 try:
538 try:
538 try:
539 result = orig(ui, repo, pats, opts, rename)
539 result = orig(ui, repo, pats, opts, rename)
540 except util.Abort, e:
540 except util.Abort, e:
541 if str(e) != _('no files to copy'):
541 if str(e) != _('no files to copy'):
542 raise e
542 raise e
543 else:
543 else:
544 nonormalfiles = True
544 nonormalfiles = True
545 result = 0
545 result = 0
546 finally:
546 finally:
547 restorematchfn()
547 restorematchfn()
548
548
549 # The first rename can cause our current working directory to be removed.
549 # The first rename can cause our current working directory to be removed.
550 # In that case there is nothing left to copy/rename so just quit.
550 # In that case there is nothing left to copy/rename so just quit.
551 try:
551 try:
552 repo.getcwd()
552 repo.getcwd()
553 except OSError:
553 except OSError:
554 return result
554 return result
555
555
556 try:
556 try:
557 try:
557 try:
558 # When we call orig below it creates the standins but we don't add
558 # When we call orig below it creates the standins but we don't add
559 # them to the dir state until later so lock during that time.
559 # them to the dir state until later so lock during that time.
560 wlock = repo.wlock()
560 wlock = repo.wlock()
561
561
562 manifest = repo[None].manifest()
562 manifest = repo[None].manifest()
563 def overridematch(ctx, pats=[], opts={}, globbed=False,
563 def overridematch(ctx, pats=[], opts={}, globbed=False,
564 default='relpath'):
564 default='relpath'):
565 newpats = []
565 newpats = []
566 # The patterns were previously mangled to add the standin
566 # The patterns were previously mangled to add the standin
567 # directory; we need to remove that now
567 # directory; we need to remove that now
568 for pat in pats:
568 for pat in pats:
569 if match_.patkind(pat) is None and lfutil.shortname in pat:
569 if match_.patkind(pat) is None and lfutil.shortname in pat:
570 newpats.append(pat.replace(lfutil.shortname, ''))
570 newpats.append(pat.replace(lfutil.shortname, ''))
571 else:
571 else:
572 newpats.append(pat)
572 newpats.append(pat)
573 match = oldmatch(ctx, newpats, opts, globbed, default)
573 match = oldmatch(ctx, newpats, opts, globbed, default)
574 m = copy.copy(match)
574 m = copy.copy(match)
575 lfile = lambda f: lfutil.standin(f) in manifest
575 lfile = lambda f: lfutil.standin(f) in manifest
576 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
576 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
577 m._fmap = set(m._files)
577 m._fmap = set(m._files)
578 origmatchfn = m.matchfn
578 origmatchfn = m.matchfn
579 m.matchfn = lambda f: (lfutil.isstandin(f) and
579 m.matchfn = lambda f: (lfutil.isstandin(f) and
580 (f in manifest) and
580 (f in manifest) and
581 origmatchfn(lfutil.splitstandin(f)) or
581 origmatchfn(lfutil.splitstandin(f)) or
582 None)
582 None)
583 return m
583 return m
584 oldmatch = installmatchfn(overridematch)
584 oldmatch = installmatchfn(overridematch)
585 listpats = []
585 listpats = []
586 for pat in pats:
586 for pat in pats:
587 if match_.patkind(pat) is not None:
587 if match_.patkind(pat) is not None:
588 listpats.append(pat)
588 listpats.append(pat)
589 else:
589 else:
590 listpats.append(makestandin(pat))
590 listpats.append(makestandin(pat))
591
591
592 try:
592 try:
593 origcopyfile = util.copyfile
593 origcopyfile = util.copyfile
594 copiedfiles = []
594 copiedfiles = []
595 def overridecopyfile(src, dest):
595 def overridecopyfile(src, dest):
596 if (lfutil.shortname in src and
596 if (lfutil.shortname in src and
597 dest.startswith(repo.wjoin(lfutil.shortname))):
597 dest.startswith(repo.wjoin(lfutil.shortname))):
598 destlfile = dest.replace(lfutil.shortname, '')
598 destlfile = dest.replace(lfutil.shortname, '')
599 if not opts['force'] and os.path.exists(destlfile):
599 if not opts['force'] and os.path.exists(destlfile):
600 raise IOError('',
600 raise IOError('',
601 _('destination largefile already exists'))
601 _('destination largefile already exists'))
602 copiedfiles.append((src, dest))
602 copiedfiles.append((src, dest))
603 origcopyfile(src, dest)
603 origcopyfile(src, dest)
604
604
605 util.copyfile = overridecopyfile
605 util.copyfile = overridecopyfile
606 result += orig(ui, repo, listpats, opts, rename)
606 result += orig(ui, repo, listpats, opts, rename)
607 finally:
607 finally:
608 util.copyfile = origcopyfile
608 util.copyfile = origcopyfile
609
609
610 lfdirstate = lfutil.openlfdirstate(ui, repo)
610 lfdirstate = lfutil.openlfdirstate(ui, repo)
611 for (src, dest) in copiedfiles:
611 for (src, dest) in copiedfiles:
612 if (lfutil.shortname in src and
612 if (lfutil.shortname in src and
613 dest.startswith(repo.wjoin(lfutil.shortname))):
613 dest.startswith(repo.wjoin(lfutil.shortname))):
614 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
614 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
615 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
615 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
616 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
616 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
617 if not os.path.isdir(destlfiledir):
617 if not os.path.isdir(destlfiledir):
618 os.makedirs(destlfiledir)
618 os.makedirs(destlfiledir)
619 if rename:
619 if rename:
620 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
620 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
621
621
622 # The file is gone, but this deletes any empty parent
622 # The file is gone, but this deletes any empty parent
623 # directories as a side-effect.
623 # directories as a side-effect.
624 util.unlinkpath(repo.wjoin(srclfile), True)
624 util.unlinkpath(repo.wjoin(srclfile), True)
625 lfdirstate.remove(srclfile)
625 lfdirstate.remove(srclfile)
626 else:
626 else:
627 util.copyfile(repo.wjoin(srclfile),
627 util.copyfile(repo.wjoin(srclfile),
628 repo.wjoin(destlfile))
628 repo.wjoin(destlfile))
629
629
630 lfdirstate.add(destlfile)
630 lfdirstate.add(destlfile)
631 lfdirstate.write()
631 lfdirstate.write()
632 except util.Abort, e:
632 except util.Abort, e:
633 if str(e) != _('no files to copy'):
633 if str(e) != _('no files to copy'):
634 raise e
634 raise e
635 else:
635 else:
636 nolfiles = True
636 nolfiles = True
637 finally:
637 finally:
638 restorematchfn()
638 restorematchfn()
639 wlock.release()
639 wlock.release()
640
640
641 if nolfiles and nonormalfiles:
641 if nolfiles and nonormalfiles:
642 raise util.Abort(_('no files to copy'))
642 raise util.Abort(_('no files to copy'))
643
643
644 return result
644 return result
645
645
646 # When the user calls revert, we have to be careful to not revert any
646 # When the user calls revert, we have to be careful to not revert any
647 # changes to other largefiles accidentally. This means we have to keep
647 # changes to other largefiles accidentally. This means we have to keep
648 # track of the largefiles that are being reverted so we only pull down
648 # track of the largefiles that are being reverted so we only pull down
649 # the necessary largefiles.
649 # the necessary largefiles.
650 #
650 #
651 # Standins are only updated (to match the hash of largefiles) before
651 # Standins are only updated (to match the hash of largefiles) before
652 # commits. Update the standins then run the original revert, changing
652 # commits. Update the standins then run the original revert, changing
653 # the matcher to hit standins instead of largefiles. Based on the
653 # the matcher to hit standins instead of largefiles. Based on the
654 # resulting standins update the largefiles.
654 # resulting standins update the largefiles.
655 def overriderevert(orig, ui, repo, *pats, **opts):
655 def overriderevert(orig, ui, repo, *pats, **opts):
656 # Because we put the standins in a bad state (by updating them)
656 # Because we put the standins in a bad state (by updating them)
657 # and then return them to a correct state we need to lock to
657 # and then return them to a correct state we need to lock to
658 # prevent others from changing them in their incorrect state.
658 # prevent others from changing them in their incorrect state.
659 wlock = repo.wlock()
659 wlock = repo.wlock()
660 try:
660 try:
661 lfdirstate = lfutil.openlfdirstate(ui, repo)
661 lfdirstate = lfutil.openlfdirstate(ui, repo)
662 s = lfutil.lfdirstatestatus(lfdirstate, repo)
662 s = lfutil.lfdirstatestatus(lfdirstate, repo)
663 lfdirstate.write()
663 lfdirstate.write()
664 for lfile in s.modified:
664 for lfile in s.modified:
665 lfutil.updatestandin(repo, lfutil.standin(lfile))
665 lfutil.updatestandin(repo, lfutil.standin(lfile))
666 for lfile in s.deleted:
666 for lfile in s.deleted:
667 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
667 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
668 os.unlink(repo.wjoin(lfutil.standin(lfile)))
668 os.unlink(repo.wjoin(lfutil.standin(lfile)))
669
669
670 oldstandins = lfutil.getstandinsstate(repo)
670 oldstandins = lfutil.getstandinsstate(repo)
671
671
672 def overridematch(ctx, pats=[], opts={}, globbed=False,
672 def overridematch(ctx, pats=[], opts={}, globbed=False,
673 default='relpath'):
673 default='relpath'):
674 match = oldmatch(ctx, pats, opts, globbed, default)
674 match = oldmatch(ctx, pats, opts, globbed, default)
675 m = copy.copy(match)
675 m = copy.copy(match)
676 def tostandin(f):
676 def tostandin(f):
677 if lfutil.standin(f) in ctx:
677 if lfutil.standin(f) in ctx:
678 return lfutil.standin(f)
678 return lfutil.standin(f)
679 elif lfutil.standin(f) in repo[None]:
679 elif lfutil.standin(f) in repo[None]:
680 return None
680 return None
681 return f
681 return f
682 m._files = [tostandin(f) for f in m._files]
682 m._files = [tostandin(f) for f in m._files]
683 m._files = [f for f in m._files if f is not None]
683 m._files = [f for f in m._files if f is not None]
684 m._fmap = set(m._files)
684 m._fmap = set(m._files)
685 origmatchfn = m.matchfn
685 origmatchfn = m.matchfn
686 def matchfn(f):
686 def matchfn(f):
687 if lfutil.isstandin(f):
687 if lfutil.isstandin(f):
688 return (origmatchfn(lfutil.splitstandin(f)) and
688 return (origmatchfn(lfutil.splitstandin(f)) and
689 (f in repo[None] or f in ctx))
689 (f in repo[None] or f in ctx))
690 return origmatchfn(f)
690 return origmatchfn(f)
691 m.matchfn = matchfn
691 m.matchfn = matchfn
692 return m
692 return m
693 oldmatch = installmatchfn(overridematch)
693 oldmatch = installmatchfn(overridematch)
694 try:
694 try:
695 orig(ui, repo, *pats, **opts)
695 orig(ui, repo, *pats, **opts)
696 finally:
696 finally:
697 restorematchfn()
697 restorematchfn()
698
698
699 newstandins = lfutil.getstandinsstate(repo)
699 newstandins = lfutil.getstandinsstate(repo)
700 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
700 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
701 # lfdirstate should be 'normallookup'-ed for updated files,
701 # lfdirstate should be 'normallookup'-ed for updated files,
702 # because reverting doesn't touch dirstate for 'normal' files
702 # because reverting doesn't touch dirstate for 'normal' files
703 # when target revision is explicitly specified: in such case,
703 # when target revision is explicitly specified: in such case,
704 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
704 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
705 # of target (standin) file.
705 # of target (standin) file.
706 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
706 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
707 normallookup=True)
707 normallookup=True)
708
708
709 finally:
709 finally:
710 wlock.release()
710 wlock.release()
711
711
712 # after pulling changesets, we need to take some extra care to get
712 # after pulling changesets, we need to take some extra care to get
713 # largefiles updated remotely
713 # largefiles updated remotely
714 def overridepull(orig, ui, repo, source=None, **opts):
714 def overridepull(orig, ui, repo, source=None, **opts):
715 revsprepull = len(repo)
715 revsprepull = len(repo)
716 if not source:
716 if not source:
717 source = 'default'
717 source = 'default'
718 repo.lfpullsource = source
718 repo.lfpullsource = source
719 result = orig(ui, repo, source, **opts)
719 result = orig(ui, repo, source, **opts)
720 revspostpull = len(repo)
720 revspostpull = len(repo)
721 lfrevs = opts.get('lfrev', [])
721 lfrevs = opts.get('lfrev', [])
722 if opts.get('all_largefiles'):
722 if opts.get('all_largefiles'):
723 lfrevs.append('pulled()')
723 lfrevs.append('pulled()')
724 if lfrevs and revspostpull > revsprepull:
724 if lfrevs and revspostpull > revsprepull:
725 numcached = 0
725 numcached = 0
726 repo.firstpulled = revsprepull # for pulled() revset expression
726 repo.firstpulled = revsprepull # for pulled() revset expression
727 try:
727 try:
728 for rev in scmutil.revrange(repo, lfrevs):
728 for rev in scmutil.revrange(repo, lfrevs):
729 ui.note(_('pulling largefiles for revision %s\n') % rev)
729 ui.note(_('pulling largefiles for revision %s\n') % rev)
730 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
730 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
731 numcached += len(cached)
731 numcached += len(cached)
732 finally:
732 finally:
733 del repo.firstpulled
733 del repo.firstpulled
734 ui.status(_("%d largefiles cached\n") % numcached)
734 ui.status(_("%d largefiles cached\n") % numcached)
735 return result
735 return result
736
736
737 def pulledrevsetsymbol(repo, subset, x):
737 def pulledrevsetsymbol(repo, subset, x):
738 """``pulled()``
738 """``pulled()``
739 Changesets that just has been pulled.
739 Changesets that just has been pulled.
740
740
741 Only available with largefiles from pull --lfrev expressions.
741 Only available with largefiles from pull --lfrev expressions.
742
742
743 .. container:: verbose
743 .. container:: verbose
744
744
745 Some examples:
745 Some examples:
746
746
747 - pull largefiles for all new changesets::
747 - pull largefiles for all new changesets::
748
748
749 hg pull -lfrev "pulled()"
749 hg pull -lfrev "pulled()"
750
750
751 - pull largefiles for all new branch heads::
751 - pull largefiles for all new branch heads::
752
752
753 hg pull -lfrev "head(pulled()) and not closed()"
753 hg pull -lfrev "head(pulled()) and not closed()"
754
754
755 """
755 """
756
756
757 try:
757 try:
758 firstpulled = repo.firstpulled
758 firstpulled = repo.firstpulled
759 except AttributeError:
759 except AttributeError:
760 raise util.Abort(_("pulled() only available in --lfrev"))
760 raise util.Abort(_("pulled() only available in --lfrev"))
761 return revset.baseset([r for r in subset if r >= firstpulled])
761 return revset.baseset([r for r in subset if r >= firstpulled])
762
762
763 def overrideclone(orig, ui, source, dest=None, **opts):
763 def overrideclone(orig, ui, source, dest=None, **opts):
764 d = dest
764 d = dest
765 if d is None:
765 if d is None:
766 d = hg.defaultdest(source)
766 d = hg.defaultdest(source)
767 if opts.get('all_largefiles') and not hg.islocal(d):
767 if opts.get('all_largefiles') and not hg.islocal(d):
768 raise util.Abort(_(
768 raise util.Abort(_(
769 '--all-largefiles is incompatible with non-local destination %s') %
769 '--all-largefiles is incompatible with non-local destination %s') %
770 d)
770 d)
771
771
772 return orig(ui, source, dest, **opts)
772 return orig(ui, source, dest, **opts)
773
773
774 def hgclone(orig, ui, opts, *args, **kwargs):
774 def hgclone(orig, ui, opts, *args, **kwargs):
775 result = orig(ui, opts, *args, **kwargs)
775 result = orig(ui, opts, *args, **kwargs)
776
776
777 if result is not None:
777 if result is not None:
778 sourcerepo, destrepo = result
778 sourcerepo, destrepo = result
779 repo = destrepo.local()
779 repo = destrepo.local()
780
780
781 # Caching is implicitly limited to 'rev' option, since the dest repo was
781 # Caching is implicitly limited to 'rev' option, since the dest repo was
782 # truncated at that point. The user may expect a download count with
782 # truncated at that point. The user may expect a download count with
783 # this option, so attempt whether or not this is a largefile repo.
783 # this option, so attempt whether or not this is a largefile repo.
784 if opts.get('all_largefiles'):
784 if opts.get('all_largefiles'):
785 success, missing = lfcommands.downloadlfiles(ui, repo, None)
785 success, missing = lfcommands.downloadlfiles(ui, repo, None)
786
786
787 if missing != 0:
787 if missing != 0:
788 return None
788 return None
789
789
790 return result
790 return result
791
791
792 def overriderebase(orig, ui, repo, **opts):
792 def overriderebase(orig, ui, repo, **opts):
793 resuming = opts.get('continue')
793 resuming = opts.get('continue')
794 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
794 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
795 repo._isrebasing = True
795 repo._isrebasing = True
796 try:
796 try:
797 return orig(ui, repo, **opts)
797 return orig(ui, repo, **opts)
798 finally:
798 finally:
799 repo._isrebasing = False
799 repo._isrebasing = False
800 repo._lfcommithooks.pop()
800 repo._lfcommithooks.pop()
801
801
802 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
802 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
803 prefix=None, mtime=None, subrepos=None):
803 prefix=None, mtime=None, subrepos=None):
804 # No need to lock because we are only reading history and
804 # No need to lock because we are only reading history and
805 # largefile caches, neither of which are modified.
805 # largefile caches, neither of which are modified.
806 lfcommands.cachelfiles(repo.ui, repo, node)
806 lfcommands.cachelfiles(repo.ui, repo, node)
807
807
808 if kind not in archival.archivers:
808 if kind not in archival.archivers:
809 raise util.Abort(_("unknown archive type '%s'") % kind)
809 raise util.Abort(_("unknown archive type '%s'") % kind)
810
810
811 ctx = repo[node]
811 ctx = repo[node]
812
812
813 if kind == 'files':
813 if kind == 'files':
814 if prefix:
814 if prefix:
815 raise util.Abort(
815 raise util.Abort(
816 _('cannot give prefix when archiving to files'))
816 _('cannot give prefix when archiving to files'))
817 else:
817 else:
818 prefix = archival.tidyprefix(dest, kind, prefix)
818 prefix = archival.tidyprefix(dest, kind, prefix)
819
819
820 def write(name, mode, islink, getdata):
820 def write(name, mode, islink, getdata):
821 if matchfn and not matchfn(name):
821 if matchfn and not matchfn(name):
822 return
822 return
823 data = getdata()
823 data = getdata()
824 if decode:
824 if decode:
825 data = repo.wwritedata(name, data)
825 data = repo.wwritedata(name, data)
826 archiver.addfile(prefix + name, mode, islink, data)
826 archiver.addfile(prefix + name, mode, islink, data)
827
827
828 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
828 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
829
829
830 if repo.ui.configbool("ui", "archivemeta", True):
830 if repo.ui.configbool("ui", "archivemeta", True):
831 def metadata():
831 def metadata():
832 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
832 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
833 hex(repo.changelog.node(0)), hex(node), ctx.branch())
833 hex(repo.changelog.node(0)), hex(node), ctx.branch())
834
834
835 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
835 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
836 if repo.tagtype(t) == 'global')
836 if repo.tagtype(t) == 'global')
837 if not tags:
837 if not tags:
838 repo.ui.pushbuffer()
838 repo.ui.pushbuffer()
839 opts = {'template': '{latesttag}\n{latesttagdistance}',
839 opts = {'template': '{latesttag}\n{latesttagdistance}',
840 'style': '', 'patch': None, 'git': None}
840 'style': '', 'patch': None, 'git': None}
841 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
841 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
842 ltags, dist = repo.ui.popbuffer().split('\n')
842 ltags, dist = repo.ui.popbuffer().split('\n')
843 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
843 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
844 tags += 'latesttagdistance: %s\n' % dist
844 tags += 'latesttagdistance: %s\n' % dist
845
845
846 return base + tags
846 return base + tags
847
847
848 write('.hg_archival.txt', 0644, False, metadata)
848 write('.hg_archival.txt', 0644, False, metadata)
849
849
850 for f in ctx:
850 for f in ctx:
851 ff = ctx.flags(f)
851 ff = ctx.flags(f)
852 getdata = ctx[f].data
852 getdata = ctx[f].data
853 if lfutil.isstandin(f):
853 if lfutil.isstandin(f):
854 path = lfutil.findfile(repo, getdata().strip())
854 path = lfutil.findfile(repo, getdata().strip())
855 if path is None:
855 if path is None:
856 raise util.Abort(
856 raise util.Abort(
857 _('largefile %s not found in repo store or system cache')
857 _('largefile %s not found in repo store or system cache')
858 % lfutil.splitstandin(f))
858 % lfutil.splitstandin(f))
859 f = lfutil.splitstandin(f)
859 f = lfutil.splitstandin(f)
860
860
861 def getdatafn():
861 def getdatafn():
862 fd = None
862 fd = None
863 try:
863 try:
864 fd = open(path, 'rb')
864 fd = open(path, 'rb')
865 return fd.read()
865 return fd.read()
866 finally:
866 finally:
867 if fd:
867 if fd:
868 fd.close()
868 fd.close()
869
869
870 getdata = getdatafn
870 getdata = getdatafn
871 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
871 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
872
872
873 if subrepos:
873 if subrepos:
874 for subpath in sorted(ctx.substate):
874 for subpath in sorted(ctx.substate):
875 sub = ctx.sub(subpath)
875 sub = ctx.sub(subpath)
876 submatch = match_.narrowmatcher(subpath, matchfn)
876 submatch = match_.narrowmatcher(subpath, matchfn)
877 sub.archive(repo.ui, archiver, prefix, submatch)
877 sub.archive(repo.ui, archiver, prefix, submatch)
878
878
879 archiver.done()
879 archiver.done()
880
880
881 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
881 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
882 repo._get(repo._state + ('hg',))
882 repo._get(repo._state + ('hg',))
883 rev = repo._state[1]
883 rev = repo._state[1]
884 ctx = repo._repo[rev]
884 ctx = repo._repo[rev]
885
885
886 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
886 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
887
887
888 def write(name, mode, islink, getdata):
888 def write(name, mode, islink, getdata):
889 # At this point, the standin has been replaced with the largefile name,
889 # At this point, the standin has been replaced with the largefile name,
890 # so the normal matcher works here without the lfutil variants.
890 # so the normal matcher works here without the lfutil variants.
891 if match and not match(f):
891 if match and not match(f):
892 return
892 return
893 data = getdata()
893 data = getdata()
894
894
895 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
895 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
896
896
897 for f in ctx:
897 for f in ctx:
898 ff = ctx.flags(f)
898 ff = ctx.flags(f)
899 getdata = ctx[f].data
899 getdata = ctx[f].data
900 if lfutil.isstandin(f):
900 if lfutil.isstandin(f):
901 path = lfutil.findfile(repo._repo, getdata().strip())
901 path = lfutil.findfile(repo._repo, getdata().strip())
902 if path is None:
902 if path is None:
903 raise util.Abort(
903 raise util.Abort(
904 _('largefile %s not found in repo store or system cache')
904 _('largefile %s not found in repo store or system cache')
905 % lfutil.splitstandin(f))
905 % lfutil.splitstandin(f))
906 f = lfutil.splitstandin(f)
906 f = lfutil.splitstandin(f)
907
907
908 def getdatafn():
908 def getdatafn():
909 fd = None
909 fd = None
910 try:
910 try:
911 fd = open(os.path.join(prefix, path), 'rb')
911 fd = open(os.path.join(prefix, path), 'rb')
912 return fd.read()
912 return fd.read()
913 finally:
913 finally:
914 if fd:
914 if fd:
915 fd.close()
915 fd.close()
916
916
917 getdata = getdatafn
917 getdata = getdatafn
918
918
919 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
919 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
920
920
921 for subpath in sorted(ctx.substate):
921 for subpath in sorted(ctx.substate):
922 sub = ctx.sub(subpath)
922 sub = ctx.sub(subpath)
923 submatch = match_.narrowmatcher(subpath, match)
923 submatch = match_.narrowmatcher(subpath, match)
924 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
924 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
925 submatch)
925 submatch)
926
926
927 # If a largefile is modified, the change is not reflected in its
927 # If a largefile is modified, the change is not reflected in its
928 # standin until a commit. cmdutil.bailifchanged() raises an exception
928 # standin until a commit. cmdutil.bailifchanged() raises an exception
929 # if the repo has uncommitted changes. Wrap it to also check if
929 # if the repo has uncommitted changes. Wrap it to also check if
930 # largefiles were changed. This is used by bisect and backout.
930 # largefiles were changed. This is used by bisect and backout.
931 def overridebailifchanged(orig, repo):
931 def overridebailifchanged(orig, repo):
932 orig(repo)
932 orig(repo)
933 repo.lfstatus = True
933 repo.lfstatus = True
934 s = repo.status()
934 s = repo.status()
935 repo.lfstatus = False
935 repo.lfstatus = False
936 if s.modified or s.added or s.removed or s.deleted:
936 if s.modified or s.added or s.removed or s.deleted:
937 raise util.Abort(_('uncommitted changes'))
937 raise util.Abort(_('uncommitted changes'))
938
938
939 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
939 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
940 def overridefetch(orig, ui, repo, *pats, **opts):
940 def overridefetch(orig, ui, repo, *pats, **opts):
941 repo.lfstatus = True
941 repo.lfstatus = True
942 s = repo.status()
942 s = repo.status()
943 repo.lfstatus = False
943 repo.lfstatus = False
944 if s.modified or s.added or s.removed or s.deleted:
944 if s.modified or s.added or s.removed or s.deleted:
945 raise util.Abort(_('uncommitted changes'))
945 raise util.Abort(_('uncommitted changes'))
946 return orig(ui, repo, *pats, **opts)
946 return orig(ui, repo, *pats, **opts)
947
947
948 def overrideforget(orig, ui, repo, *pats, **opts):
948 def overrideforget(orig, ui, repo, *pats, **opts):
949 installnormalfilesmatchfn(repo[None].manifest())
949 installnormalfilesmatchfn(repo[None].manifest())
950 result = orig(ui, repo, *pats, **opts)
950 result = orig(ui, repo, *pats, **opts)
951 restorematchfn()
951 restorematchfn()
952 m = scmutil.match(repo[None], pats, opts)
952 m = scmutil.match(repo[None], pats, opts)
953
953
954 try:
954 try:
955 repo.lfstatus = True
955 repo.lfstatus = True
956 s = repo.status(match=m, clean=True)
956 s = repo.status(match=m, clean=True)
957 finally:
957 finally:
958 repo.lfstatus = False
958 repo.lfstatus = False
959 forget = sorted(s.modified + s.added + s.deleted + s.clean)
959 forget = sorted(s.modified + s.added + s.deleted + s.clean)
960 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
960 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
961
961
962 for f in forget:
962 for f in forget:
963 if lfutil.standin(f) not in repo.dirstate and not \
963 if lfutil.standin(f) not in repo.dirstate and not \
964 os.path.isdir(m.rel(lfutil.standin(f))):
964 os.path.isdir(m.rel(lfutil.standin(f))):
965 ui.warn(_('not removing %s: file is already untracked\n')
965 ui.warn(_('not removing %s: file is already untracked\n')
966 % m.rel(f))
966 % m.rel(f))
967 result = 1
967 result = 1
968
968
969 for f in forget:
969 for f in forget:
970 if ui.verbose or not m.exact(f):
970 if ui.verbose or not m.exact(f):
971 ui.status(_('removing %s\n') % m.rel(f))
971 ui.status(_('removing %s\n') % m.rel(f))
972
972
973 # Need to lock because standin files are deleted then removed from the
973 # Need to lock because standin files are deleted then removed from the
974 # repository and we could race in-between.
974 # repository and we could race in-between.
975 wlock = repo.wlock()
975 wlock = repo.wlock()
976 try:
976 try:
977 lfdirstate = lfutil.openlfdirstate(ui, repo)
977 lfdirstate = lfutil.openlfdirstate(ui, repo)
978 for f in forget:
978 for f in forget:
979 if lfdirstate[f] == 'a':
979 if lfdirstate[f] == 'a':
980 lfdirstate.drop(f)
980 lfdirstate.drop(f)
981 else:
981 else:
982 lfdirstate.remove(f)
982 lfdirstate.remove(f)
983 lfdirstate.write()
983 lfdirstate.write()
984 standins = [lfutil.standin(f) for f in forget]
984 standins = [lfutil.standin(f) for f in forget]
985 for f in standins:
985 for f in standins:
986 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
986 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
987 repo[None].forget(standins)
987 repo[None].forget(standins)
988 finally:
988 finally:
989 wlock.release()
989 wlock.release()
990
990
991 return result
991 return result
992
992
993 def _getoutgoings(repo, other, missing, addfunc):
993 def _getoutgoings(repo, other, missing, addfunc):
994 """get pairs of filename and largefile hash in outgoing revisions
994 """get pairs of filename and largefile hash in outgoing revisions
995 in 'missing'.
995 in 'missing'.
996
996
997 largefiles already existing on 'other' repository are ignored.
997 largefiles already existing on 'other' repository are ignored.
998
998
999 'addfunc' is invoked with each unique pairs of filename and
999 'addfunc' is invoked with each unique pairs of filename and
1000 largefile hash value.
1000 largefile hash value.
1001 """
1001 """
1002 knowns = set()
1002 knowns = set()
1003 lfhashes = set()
1003 lfhashes = set()
1004 def dedup(fn, lfhash):
1004 def dedup(fn, lfhash):
1005 k = (fn, lfhash)
1005 k = (fn, lfhash)
1006 if k not in knowns:
1006 if k not in knowns:
1007 knowns.add(k)
1007 knowns.add(k)
1008 lfhashes.add(lfhash)
1008 lfhashes.add(lfhash)
1009 lfutil.getlfilestoupload(repo, missing, dedup)
1009 lfutil.getlfilestoupload(repo, missing, dedup)
1010 if lfhashes:
1010 if lfhashes:
1011 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1011 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1012 for fn, lfhash in knowns:
1012 for fn, lfhash in knowns:
1013 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1013 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1014 addfunc(fn, lfhash)
1014 addfunc(fn, lfhash)
1015
1015
1016 def outgoinghook(ui, repo, other, opts, missing):
1016 def outgoinghook(ui, repo, other, opts, missing):
1017 if opts.pop('large', None):
1017 if opts.pop('large', None):
1018 lfhashes = set()
1018 lfhashes = set()
1019 if ui.debugflag:
1019 if ui.debugflag:
1020 toupload = {}
1020 toupload = {}
1021 def addfunc(fn, lfhash):
1021 def addfunc(fn, lfhash):
1022 if fn not in toupload:
1022 if fn not in toupload:
1023 toupload[fn] = []
1023 toupload[fn] = []
1024 toupload[fn].append(lfhash)
1024 toupload[fn].append(lfhash)
1025 lfhashes.add(lfhash)
1025 lfhashes.add(lfhash)
1026 def showhashes(fn):
1026 def showhashes(fn):
1027 for lfhash in sorted(toupload[fn]):
1027 for lfhash in sorted(toupload[fn]):
1028 ui.debug(' %s\n' % (lfhash))
1028 ui.debug(' %s\n' % (lfhash))
1029 else:
1029 else:
1030 toupload = set()
1030 toupload = set()
1031 def addfunc(fn, lfhash):
1031 def addfunc(fn, lfhash):
1032 toupload.add(fn)
1032 toupload.add(fn)
1033 lfhashes.add(lfhash)
1033 lfhashes.add(lfhash)
1034 def showhashes(fn):
1034 def showhashes(fn):
1035 pass
1035 pass
1036 _getoutgoings(repo, other, missing, addfunc)
1036 _getoutgoings(repo, other, missing, addfunc)
1037
1037
1038 if not toupload:
1038 if not toupload:
1039 ui.status(_('largefiles: no files to upload\n'))
1039 ui.status(_('largefiles: no files to upload\n'))
1040 else:
1040 else:
1041 ui.status(_('largefiles to upload (%d entities):\n')
1041 ui.status(_('largefiles to upload (%d entities):\n')
1042 % (len(lfhashes)))
1042 % (len(lfhashes)))
1043 for file in sorted(toupload):
1043 for file in sorted(toupload):
1044 ui.status(lfutil.splitstandin(file) + '\n')
1044 ui.status(lfutil.splitstandin(file) + '\n')
1045 showhashes(file)
1045 showhashes(file)
1046 ui.status('\n')
1046 ui.status('\n')
1047
1047
1048 def summaryremotehook(ui, repo, opts, changes):
1048 def summaryremotehook(ui, repo, opts, changes):
1049 largeopt = opts.get('large', False)
1049 largeopt = opts.get('large', False)
1050 if changes is None:
1050 if changes is None:
1051 if largeopt:
1051 if largeopt:
1052 return (False, True) # only outgoing check is needed
1052 return (False, True) # only outgoing check is needed
1053 else:
1053 else:
1054 return (False, False)
1054 return (False, False)
1055 elif largeopt:
1055 elif largeopt:
1056 url, branch, peer, outgoing = changes[1]
1056 url, branch, peer, outgoing = changes[1]
1057 if peer is None:
1057 if peer is None:
1058 # i18n: column positioning for "hg summary"
1058 # i18n: column positioning for "hg summary"
1059 ui.status(_('largefiles: (no remote repo)\n'))
1059 ui.status(_('largefiles: (no remote repo)\n'))
1060 return
1060 return
1061
1061
1062 toupload = set()
1062 toupload = set()
1063 lfhashes = set()
1063 lfhashes = set()
1064 def addfunc(fn, lfhash):
1064 def addfunc(fn, lfhash):
1065 toupload.add(fn)
1065 toupload.add(fn)
1066 lfhashes.add(lfhash)
1066 lfhashes.add(lfhash)
1067 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1067 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1068
1068
1069 if not toupload:
1069 if not toupload:
1070 # i18n: column positioning for "hg summary"
1070 # i18n: column positioning for "hg summary"
1071 ui.status(_('largefiles: (no files to upload)\n'))
1071 ui.status(_('largefiles: (no files to upload)\n'))
1072 else:
1072 else:
1073 # i18n: column positioning for "hg summary"
1073 # i18n: column positioning for "hg summary"
1074 ui.status(_('largefiles: %d entities for %d files to upload\n')
1074 ui.status(_('largefiles: %d entities for %d files to upload\n')
1075 % (len(lfhashes), len(toupload)))
1075 % (len(lfhashes), len(toupload)))
1076
1076
1077 def overridesummary(orig, ui, repo, *pats, **opts):
1077 def overridesummary(orig, ui, repo, *pats, **opts):
1078 try:
1078 try:
1079 repo.lfstatus = True
1079 repo.lfstatus = True
1080 orig(ui, repo, *pats, **opts)
1080 orig(ui, repo, *pats, **opts)
1081 finally:
1081 finally:
1082 repo.lfstatus = False
1082 repo.lfstatus = False
1083
1083
1084 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1084 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1085 similarity=None):
1085 similarity=None):
1086 if not lfutil.islfilesrepo(repo):
1086 if not lfutil.islfilesrepo(repo):
1087 return orig(repo, pats, opts, dry_run, similarity)
1087 return orig(repo, pats, opts, dry_run, similarity)
1088 # Get the list of missing largefiles so we can remove them
1088 # Get the list of missing largefiles so we can remove them
1089 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1089 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1090 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1090 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1091 False, False, False)
1091 False, False, False)
1092
1092
1093 # Call into the normal remove code, but the removing of the standin, we want
1093 # Call into the normal remove code, but the removing of the standin, we want
1094 # to have handled by original addremove. Monkey patching here makes sure
1094 # to have handled by original addremove. Monkey patching here makes sure
1095 # we don't remove the standin in the largefiles code, preventing a very
1095 # we don't remove the standin in the largefiles code, preventing a very
1096 # confused state later.
1096 # confused state later.
1097 if s.deleted:
1097 if s.deleted:
1098 m = [repo.wjoin(f) for f in s.deleted]
1098 m = [repo.wjoin(f) for f in s.deleted]
1099 removelargefiles(repo.ui, repo, True, *m, **opts)
1099 removelargefiles(repo.ui, repo, True, *m, **opts)
1100 # Call into the normal add code, and any files that *should* be added as
1100 # Call into the normal add code, and any files that *should* be added as
1101 # largefiles will be
1101 # largefiles will be
1102 addlargefiles(repo.ui, repo, *pats, **opts)
1102 addlargefiles(repo.ui, repo, *pats, **opts)
1103 # Now that we've handled largefiles, hand off to the original addremove
1103 # Now that we've handled largefiles, hand off to the original addremove
1104 # function to take care of the rest. Make sure it doesn't do anything with
1104 # function to take care of the rest. Make sure it doesn't do anything with
1105 # largefiles by installing a matcher that will ignore them.
1105 # largefiles by installing a matcher that will ignore them.
1106 installnormalfilesmatchfn(repo[None].manifest())
1106 installnormalfilesmatchfn(repo[None].manifest())
1107 result = orig(repo, pats, opts, dry_run, similarity)
1107 result = orig(repo, pats, opts, dry_run, similarity)
1108 restorematchfn()
1108 restorematchfn()
1109 return result
1109 return result
1110
1110
1111 # Calling purge with --all will cause the largefiles to be deleted.
1111 # Calling purge with --all will cause the largefiles to be deleted.
1112 # Override repo.status to prevent this from happening.
1112 # Override repo.status to prevent this from happening.
1113 def overridepurge(orig, ui, repo, *dirs, **opts):
1113 def overridepurge(orig, ui, repo, *dirs, **opts):
1114 # XXX large file status is buggy when used on repo proxy.
1114 # XXX large file status is buggy when used on repo proxy.
1115 # XXX this needs to be investigate.
1115 # XXX this needs to be investigate.
1116 repo = repo.unfiltered()
1116 repo = repo.unfiltered()
1117 oldstatus = repo.status
1117 oldstatus = repo.status
1118 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1118 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1119 clean=False, unknown=False, listsubrepos=False):
1119 clean=False, unknown=False, listsubrepos=False):
1120 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1120 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1121 listsubrepos)
1121 listsubrepos)
1122 lfdirstate = lfutil.openlfdirstate(ui, repo)
1122 lfdirstate = lfutil.openlfdirstate(ui, repo)
1123 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1123 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1124 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1124 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1125 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1125 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1126 unknown, ignored, r.clean)
1126 unknown, ignored, r.clean)
1127 repo.status = overridestatus
1127 repo.status = overridestatus
1128 orig(ui, repo, *dirs, **opts)
1128 orig(ui, repo, *dirs, **opts)
1129 repo.status = oldstatus
1129 repo.status = oldstatus
1130 def overriderollback(orig, ui, repo, **opts):
1130 def overriderollback(orig, ui, repo, **opts):
1131 wlock = repo.wlock()
1131 wlock = repo.wlock()
1132 try:
1132 try:
1133 before = repo.dirstate.parents()
1133 before = repo.dirstate.parents()
1134 orphans = set(f for f in repo.dirstate
1134 orphans = set(f for f in repo.dirstate
1135 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1135 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1136 result = orig(ui, repo, **opts)
1136 result = orig(ui, repo, **opts)
1137 after = repo.dirstate.parents()
1137 after = repo.dirstate.parents()
1138 if before == after:
1138 if before == after:
1139 return result # no need to restore standins
1139 return result # no need to restore standins
1140
1140
1141 pctx = repo['.']
1141 pctx = repo['.']
1142 for f in repo.dirstate:
1142 for f in repo.dirstate:
1143 if lfutil.isstandin(f):
1143 if lfutil.isstandin(f):
1144 orphans.discard(f)
1144 orphans.discard(f)
1145 if repo.dirstate[f] == 'r':
1145 if repo.dirstate[f] == 'r':
1146 repo.wvfs.unlinkpath(f, ignoremissing=True)
1146 repo.wvfs.unlinkpath(f, ignoremissing=True)
1147 elif f in pctx:
1147 elif f in pctx:
1148 fctx = pctx[f]
1148 fctx = pctx[f]
1149 repo.wwrite(f, fctx.data(), fctx.flags())
1149 repo.wwrite(f, fctx.data(), fctx.flags())
1150 else:
1150 else:
1151 # content of standin is not so important in 'a',
1151 # content of standin is not so important in 'a',
1152 # 'm' or 'n' (coming from the 2nd parent) cases
1152 # 'm' or 'n' (coming from the 2nd parent) cases
1153 lfutil.writestandin(repo, f, '', False)
1153 lfutil.writestandin(repo, f, '', False)
1154 for standin in orphans:
1154 for standin in orphans:
1155 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1155 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1156
1156
1157 lfdirstate = lfutil.openlfdirstate(ui, repo)
1157 lfdirstate = lfutil.openlfdirstate(ui, repo)
1158 orphans = set(lfdirstate)
1158 orphans = set(lfdirstate)
1159 lfiles = lfutil.listlfiles(repo)
1159 lfiles = lfutil.listlfiles(repo)
1160 for file in lfiles:
1160 for file in lfiles:
1161 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1161 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1162 orphans.discard(file)
1162 orphans.discard(file)
1163 for lfile in orphans:
1163 for lfile in orphans:
1164 lfdirstate.drop(lfile)
1164 lfdirstate.drop(lfile)
1165 lfdirstate.write()
1165 lfdirstate.write()
1166 finally:
1166 finally:
1167 wlock.release()
1167 wlock.release()
1168 return result
1168 return result
1169
1169
1170 def overridetransplant(orig, ui, repo, *revs, **opts):
1170 def overridetransplant(orig, ui, repo, *revs, **opts):
1171 try:
1171 try:
1172 oldstandins = lfutil.getstandinsstate(repo)
1172 oldstandins = lfutil.getstandinsstate(repo)
1173 repo._istransplanting = True
1173 repo._istransplanting = True
1174 result = orig(ui, repo, *revs, **opts)
1174 result = orig(ui, repo, *revs, **opts)
1175 newstandins = lfutil.getstandinsstate(repo)
1175 newstandins = lfutil.getstandinsstate(repo)
1176 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1176 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1177 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1177 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1178 printmessage=True)
1178 printmessage=True)
1179 finally:
1179 finally:
1180 repo._istransplanting = False
1180 repo._istransplanting = False
1181 return result
1181 return result
1182
1182
1183 def overridecat(orig, ui, repo, file1, *pats, **opts):
1183 def overridecat(orig, ui, repo, file1, *pats, **opts):
1184 ctx = scmutil.revsingle(repo, opts.get('rev'))
1184 ctx = scmutil.revsingle(repo, opts.get('rev'))
1185 err = 1
1185 err = 1
1186 notbad = set()
1186 notbad = set()
1187 m = scmutil.match(ctx, (file1,) + pats, opts)
1187 m = scmutil.match(ctx, (file1,) + pats, opts)
1188 origmatchfn = m.matchfn
1188 origmatchfn = m.matchfn
1189 def lfmatchfn(f):
1189 def lfmatchfn(f):
1190 if origmatchfn(f):
1190 if origmatchfn(f):
1191 return True
1191 return True
1192 lf = lfutil.splitstandin(f)
1192 lf = lfutil.splitstandin(f)
1193 if lf is None:
1193 if lf is None:
1194 return False
1194 return False
1195 notbad.add(lf)
1195 notbad.add(lf)
1196 return origmatchfn(lf)
1196 return origmatchfn(lf)
1197 m.matchfn = lfmatchfn
1197 m.matchfn = lfmatchfn
1198 origbadfn = m.bad
1198 origbadfn = m.bad
1199 def lfbadfn(f, msg):
1199 def lfbadfn(f, msg):
1200 if not f in notbad:
1200 if not f in notbad:
1201 origbadfn(f, msg)
1201 origbadfn(f, msg)
1202 m.bad = lfbadfn
1202 m.bad = lfbadfn
1203 for f in ctx.walk(m):
1203 for f in ctx.walk(m):
1204 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1204 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1205 pathname=f)
1205 pathname=f)
1206 lf = lfutil.splitstandin(f)
1206 lf = lfutil.splitstandin(f)
1207 if lf is None or origmatchfn(f):
1207 if lf is None or origmatchfn(f):
1208 # duplicating unreachable code from commands.cat
1208 # duplicating unreachable code from commands.cat
1209 data = ctx[f].data()
1209 data = ctx[f].data()
1210 if opts.get('decode'):
1210 if opts.get('decode'):
1211 data = repo.wwritedata(f, data)
1211 data = repo.wwritedata(f, data)
1212 fp.write(data)
1212 fp.write(data)
1213 else:
1213 else:
1214 hash = lfutil.readstandin(repo, lf, ctx.rev())
1214 hash = lfutil.readstandin(repo, lf, ctx.rev())
1215 if not lfutil.inusercache(repo.ui, hash):
1215 if not lfutil.inusercache(repo.ui, hash):
1216 store = basestore._openstore(repo)
1216 store = basestore._openstore(repo)
1217 success, missing = store.get([(lf, hash)])
1217 success, missing = store.get([(lf, hash)])
1218 if len(success) != 1:
1218 if len(success) != 1:
1219 raise util.Abort(
1219 raise util.Abort(
1220 _('largefile %s is not in cache and could not be '
1220 _('largefile %s is not in cache and could not be '
1221 'downloaded') % lf)
1221 'downloaded') % lf)
1222 path = lfutil.usercachepath(repo.ui, hash)
1222 path = lfutil.usercachepath(repo.ui, hash)
1223 fpin = open(path, "rb")
1223 fpin = open(path, "rb")
1224 for chunk in util.filechunkiter(fpin, 128 * 1024):
1224 for chunk in util.filechunkiter(fpin, 128 * 1024):
1225 fp.write(chunk)
1225 fp.write(chunk)
1226 fpin.close()
1226 fpin.close()
1227 fp.close()
1227 fp.close()
1228 err = 0
1228 err = 0
1229 return err
1229 return err
1230
1230
1231 def mercurialsinkbefore(orig, sink):
1231 def mercurialsinkbefore(orig, sink):
1232 sink.repo._isconverting = True
1232 sink.repo._isconverting = True
1233 orig(sink)
1233 orig(sink)
1234
1234
1235 def mercurialsinkafter(orig, sink):
1235 def mercurialsinkafter(orig, sink):
1236 sink.repo._isconverting = False
1236 sink.repo._isconverting = False
1237 orig(sink)
1237 orig(sink)
1238
1238
1239 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1239 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1240 *args, **kwargs):
1240 *args, **kwargs):
1241 wlock = repo.wlock()
1241 wlock = repo.wlock()
1242 try:
1242 try:
1243 # branch | | |
1243 # branch | | |
1244 # merge | force | partial | action
1244 # merge | force | partial | action
1245 # -------+-------+---------+--------------
1245 # -------+-------+---------+--------------
1246 # x | x | x | linear-merge
1246 # x | x | x | linear-merge
1247 # o | x | x | branch-merge
1247 # o | x | x | branch-merge
1248 # x | o | x | overwrite (as clean update)
1248 # x | o | x | overwrite (as clean update)
1249 # o | o | x | force-branch-merge (*1)
1249 # o | o | x | force-branch-merge (*1)
1250 # x | x | o | (*)
1250 # x | x | o | (*)
1251 # o | x | o | (*)
1251 # o | x | o | (*)
1252 # x | o | o | overwrite (as revert)
1252 # x | o | o | overwrite (as revert)
1253 # o | o | o | (*)
1253 # o | o | o | (*)
1254 #
1254 #
1255 # (*) don't care
1255 # (*) don't care
1256 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1256 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1257
1257
1258 linearmerge = not branchmerge and not force and not partial
1258 linearmerge = not branchmerge and not force and not partial
1259
1259
1260 if linearmerge or (branchmerge and force and not partial):
1260 if linearmerge or (branchmerge and force and not partial):
1261 # update standins for linear-merge or force-branch-merge,
1261 # update standins for linear-merge or force-branch-merge,
1262 # because largefiles in the working directory may be modified
1262 # because largefiles in the working directory may be modified
1263 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1263 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1264 unsure, s = lfdirstate.status(match_.always(repo.root,
1264 unsure, s = lfdirstate.status(match_.always(repo.root,
1265 repo.getcwd()),
1265 repo.getcwd()),
1266 [], False, False, False)
1266 [], False, False, False)
1267 for lfile in unsure + s.modified + s.added:
1267 for lfile in unsure + s.modified + s.added:
1268 lfutil.updatestandin(repo, lfutil.standin(lfile))
1268 lfutil.updatestandin(repo, lfutil.standin(lfile))
1269
1269
1270 if linearmerge:
1270 if linearmerge:
1271 # Only call updatelfiles on the standins that have changed
1271 # Only call updatelfiles on the standins that have changed
1272 # to save time
1272 # to save time
1273 oldstandins = lfutil.getstandinsstate(repo)
1273 oldstandins = lfutil.getstandinsstate(repo)
1274
1274
1275 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1275 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1276
1276
1277 filelist = None
1277 filelist = None
1278 if linearmerge:
1278 if linearmerge:
1279 newstandins = lfutil.getstandinsstate(repo)
1279 newstandins = lfutil.getstandinsstate(repo)
1280 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1280 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1281
1281
1282 # suppress status message while automated committing
1282 printmessage = None
1283 printmessage = not (getattr(repo, "_isrebasing", False) or
1283 if (getattr(repo, "_isrebasing", False) or
1284 getattr(repo, "_istransplanting", False))
1284 getattr(repo, "_istransplanting", False)):
1285 # suppress status message while automated committing
1286 printmessage = False
1285 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1287 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1286 printmessage=printmessage,
1288 printmessage=printmessage,
1287 normallookup=partial)
1289 normallookup=partial)
1288
1290
1289 return result
1291 return result
1290 finally:
1292 finally:
1291 wlock.release()
1293 wlock.release()
1292
1294
1293 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1295 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1294 result = orig(repo, files, *args, **kwargs)
1296 result = orig(repo, files, *args, **kwargs)
1295
1297
1296 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1298 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1297 if filelist:
1299 if filelist:
1298 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1300 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1299 printmessage=False, normallookup=True)
1301 printmessage=False, normallookup=True)
1300
1302
1301 return result
1303 return result
General Comments 0
You need to be logged in to leave comments. Login now