##// END OF EJS Templates
largefiles: avoid redundant changectx looking up at each repetitions...
FUJIWARA Katsunori -
r31654:1af4a164 default
parent child Browse files
Show More
@@ -1,577 +1,579
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 commands,
21 commands,
22 context,
22 context,
23 error,
23 error,
24 hg,
24 hg,
25 lock,
25 lock,
26 match as matchmod,
26 match as matchmod,
27 node,
27 node,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 from ..convert import (
32 from ..convert import (
33 convcmd,
33 convcmd,
34 filemap,
34 filemap,
35 )
35 )
36
36
37 from . import (
37 from . import (
38 lfutil,
38 lfutil,
39 storefactory
39 storefactory
40 )
40 )
41
41
42 release = lock.release
42 release = lock.release
43
43
44 # -- Commands ----------------------------------------------------------
44 # -- Commands ----------------------------------------------------------
45
45
46 cmdtable = {}
46 cmdtable = {}
47 command = cmdutil.command(cmdtable)
47 command = cmdutil.command(cmdtable)
48
48
49 @command('lfconvert',
49 @command('lfconvert',
50 [('s', 'size', '',
50 [('s', 'size', '',
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 ('', 'to-normal', False,
52 ('', 'to-normal', False,
53 _('convert from a largefiles repo to a normal repo')),
53 _('convert from a largefiles repo to a normal repo')),
54 ],
54 ],
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 norepo=True,
56 norepo=True,
57 inferrepo=True)
57 inferrepo=True)
58 def lfconvert(ui, src, dest, *pats, **opts):
58 def lfconvert(ui, src, dest, *pats, **opts):
59 '''convert a normal repository to a largefiles repository
59 '''convert a normal repository to a largefiles repository
60
60
61 Convert repository SOURCE to a new repository DEST, identical to
61 Convert repository SOURCE to a new repository DEST, identical to
62 SOURCE except that certain files will be converted as largefiles:
62 SOURCE except that certain files will be converted as largefiles:
63 specifically, any file that matches any PATTERN *or* whose size is
63 specifically, any file that matches any PATTERN *or* whose size is
64 above the minimum size threshold is converted as a largefile. The
64 above the minimum size threshold is converted as a largefile. The
65 size used to determine whether or not to track a file as a
65 size used to determine whether or not to track a file as a
66 largefile is the size of the first version of the file. The
66 largefile is the size of the first version of the file. The
67 minimum size can be specified either with --size or in
67 minimum size can be specified either with --size or in
68 configuration as ``largefiles.size``.
68 configuration as ``largefiles.size``.
69
69
70 After running this command you will need to make sure that
70 After running this command you will need to make sure that
71 largefiles is enabled anywhere you intend to push the new
71 largefiles is enabled anywhere you intend to push the new
72 repository.
72 repository.
73
73
74 Use --to-normal to convert largefiles back to normal files; after
74 Use --to-normal to convert largefiles back to normal files; after
75 this, the DEST repository can be used without largefiles at all.'''
75 this, the DEST repository can be used without largefiles at all.'''
76
76
77 if opts['to_normal']:
77 if opts['to_normal']:
78 tolfile = False
78 tolfile = False
79 else:
79 else:
80 tolfile = True
80 tolfile = True
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82
82
83 if not hg.islocal(src):
83 if not hg.islocal(src):
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 if not hg.islocal(dest):
85 if not hg.islocal(dest):
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87
87
88 rsrc = hg.repository(ui, src)
88 rsrc = hg.repository(ui, src)
89 ui.status(_('initializing destination %s\n') % dest)
89 ui.status(_('initializing destination %s\n') % dest)
90 rdst = hg.repository(ui, dest, create=True)
90 rdst = hg.repository(ui, dest, create=True)
91
91
92 success = False
92 success = False
93 dstwlock = dstlock = None
93 dstwlock = dstlock = None
94 try:
94 try:
95 # Get a list of all changesets in the source. The easy way to do this
95 # Get a list of all changesets in the source. The easy way to do this
96 # is to simply walk the changelog, using changelog.nodesbetween().
96 # is to simply walk the changelog, using changelog.nodesbetween().
97 # Take a look at mercurial/revlog.py:639 for more details.
97 # Take a look at mercurial/revlog.py:639 for more details.
98 # Use a generator instead of a list to decrease memory usage
98 # Use a generator instead of a list to decrease memory usage
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 rsrc.heads())[0])
100 rsrc.heads())[0])
101 revmap = {node.nullid: node.nullid}
101 revmap = {node.nullid: node.nullid}
102 if tolfile:
102 if tolfile:
103 # Lock destination to prevent modification while it is converted to.
103 # Lock destination to prevent modification while it is converted to.
104 # Don't need to lock src because we are just reading from its
104 # Don't need to lock src because we are just reading from its
105 # history which can't change.
105 # history which can't change.
106 dstwlock = rdst.wlock()
106 dstwlock = rdst.wlock()
107 dstlock = rdst.lock()
107 dstlock = rdst.lock()
108
108
109 lfiles = set()
109 lfiles = set()
110 normalfiles = set()
110 normalfiles = set()
111 if not pats:
111 if not pats:
112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
113 if pats:
113 if pats:
114 matcher = matchmod.match(rsrc.root, '', list(pats))
114 matcher = matchmod.match(rsrc.root, '', list(pats))
115 else:
115 else:
116 matcher = None
116 matcher = None
117
117
118 lfiletohash = {}
118 lfiletohash = {}
119 for ctx in ctxs:
119 for ctx in ctxs:
120 ui.progress(_('converting revisions'), ctx.rev(),
120 ui.progress(_('converting revisions'), ctx.rev(),
121 unit=_('revisions'), total=rsrc['tip'].rev())
121 unit=_('revisions'), total=rsrc['tip'].rev())
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
123 lfiles, normalfiles, matcher, size, lfiletohash)
123 lfiles, normalfiles, matcher, size, lfiletohash)
124 ui.progress(_('converting revisions'), None)
124 ui.progress(_('converting revisions'), None)
125
125
126 if rdst.wvfs.exists(lfutil.shortname):
126 if rdst.wvfs.exists(lfutil.shortname):
127 rdst.wvfs.rmtree(lfutil.shortname)
127 rdst.wvfs.rmtree(lfutil.shortname)
128
128
129 for f in lfiletohash.keys():
129 for f in lfiletohash.keys():
130 if rdst.wvfs.isfile(f):
130 if rdst.wvfs.isfile(f):
131 rdst.wvfs.unlink(f)
131 rdst.wvfs.unlink(f)
132 try:
132 try:
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
134 except OSError:
134 except OSError:
135 pass
135 pass
136
136
137 # If there were any files converted to largefiles, add largefiles
137 # If there were any files converted to largefiles, add largefiles
138 # to the destination repository's requirements.
138 # to the destination repository's requirements.
139 if lfiles:
139 if lfiles:
140 rdst.requirements.add('largefiles')
140 rdst.requirements.add('largefiles')
141 rdst._writerequirements()
141 rdst._writerequirements()
142 else:
142 else:
143 class lfsource(filemap.filemap_source):
143 class lfsource(filemap.filemap_source):
144 def __init__(self, ui, source):
144 def __init__(self, ui, source):
145 super(lfsource, self).__init__(ui, source, None)
145 super(lfsource, self).__init__(ui, source, None)
146 self.filemapper.rename[lfutil.shortname] = '.'
146 self.filemapper.rename[lfutil.shortname] = '.'
147
147
148 def getfile(self, name, rev):
148 def getfile(self, name, rev):
149 realname, realrev = rev
149 realname, realrev = rev
150 f = super(lfsource, self).getfile(name, rev)
150 f = super(lfsource, self).getfile(name, rev)
151
151
152 if (not realname.startswith(lfutil.shortnameslash)
152 if (not realname.startswith(lfutil.shortnameslash)
153 or f[0] is None):
153 or f[0] is None):
154 return f
154 return f
155
155
156 # Substitute in the largefile data for the hash
156 # Substitute in the largefile data for the hash
157 hash = f[0].strip()
157 hash = f[0].strip()
158 path = lfutil.findfile(rsrc, hash)
158 path = lfutil.findfile(rsrc, hash)
159
159
160 if path is None:
160 if path is None:
161 raise error.Abort(_("missing largefile for '%s' in %s")
161 raise error.Abort(_("missing largefile for '%s' in %s")
162 % (realname, realrev))
162 % (realname, realrev))
163 return util.readfile(path), f[1]
163 return util.readfile(path), f[1]
164
164
165 class converter(convcmd.converter):
165 class converter(convcmd.converter):
166 def __init__(self, ui, source, dest, revmapfile, opts):
166 def __init__(self, ui, source, dest, revmapfile, opts):
167 src = lfsource(ui, source)
167 src = lfsource(ui, source)
168
168
169 super(converter, self).__init__(ui, src, dest, revmapfile,
169 super(converter, self).__init__(ui, src, dest, revmapfile,
170 opts)
170 opts)
171
171
172 found, missing = downloadlfiles(ui, rsrc)
172 found, missing = downloadlfiles(ui, rsrc)
173 if missing != 0:
173 if missing != 0:
174 raise error.Abort(_("all largefiles must be present locally"))
174 raise error.Abort(_("all largefiles must be present locally"))
175
175
176 orig = convcmd.converter
176 orig = convcmd.converter
177 convcmd.converter = converter
177 convcmd.converter = converter
178
178
179 try:
179 try:
180 convcmd.convert(ui, src, dest)
180 convcmd.convert(ui, src, dest)
181 finally:
181 finally:
182 convcmd.converter = orig
182 convcmd.converter = orig
183 success = True
183 success = True
184 finally:
184 finally:
185 if tolfile:
185 if tolfile:
186 rdst.dirstate.clear()
186 rdst.dirstate.clear()
187 release(dstlock, dstwlock)
187 release(dstlock, dstwlock)
188 if not success:
188 if not success:
189 # we failed, remove the new directory
189 # we failed, remove the new directory
190 shutil.rmtree(rdst.root)
190 shutil.rmtree(rdst.root)
191
191
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
193 matcher, size, lfiletohash):
193 matcher, size, lfiletohash):
194 # Convert src parents to dst parents
194 # Convert src parents to dst parents
195 parents = _convertparents(ctx, revmap)
195 parents = _convertparents(ctx, revmap)
196
196
197 # Generate list of changed files
197 # Generate list of changed files
198 files = _getchangedfiles(ctx, parents)
198 files = _getchangedfiles(ctx, parents)
199
199
200 dstfiles = []
200 dstfiles = []
201 for f in files:
201 for f in files:
202 if f not in lfiles and f not in normalfiles:
202 if f not in lfiles and f not in normalfiles:
203 islfile = _islfile(f, ctx, matcher, size)
203 islfile = _islfile(f, ctx, matcher, size)
204 # If this file was renamed or copied then copy
204 # If this file was renamed or copied then copy
205 # the largefile-ness of its predecessor
205 # the largefile-ness of its predecessor
206 if f in ctx.manifest():
206 if f in ctx.manifest():
207 fctx = ctx.filectx(f)
207 fctx = ctx.filectx(f)
208 renamed = fctx.renamed()
208 renamed = fctx.renamed()
209 renamedlfile = renamed and renamed[0] in lfiles
209 renamedlfile = renamed and renamed[0] in lfiles
210 islfile |= renamedlfile
210 islfile |= renamedlfile
211 if 'l' in fctx.flags():
211 if 'l' in fctx.flags():
212 if renamedlfile:
212 if renamedlfile:
213 raise error.Abort(
213 raise error.Abort(
214 _('renamed/copied largefile %s becomes symlink')
214 _('renamed/copied largefile %s becomes symlink')
215 % f)
215 % f)
216 islfile = False
216 islfile = False
217 if islfile:
217 if islfile:
218 lfiles.add(f)
218 lfiles.add(f)
219 else:
219 else:
220 normalfiles.add(f)
220 normalfiles.add(f)
221
221
222 if f in lfiles:
222 if f in lfiles:
223 fstandin = lfutil.standin(f)
223 fstandin = lfutil.standin(f)
224 dstfiles.append(fstandin)
224 dstfiles.append(fstandin)
225 # largefile in manifest if it has not been removed/renamed
225 # largefile in manifest if it has not been removed/renamed
226 if f in ctx.manifest():
226 if f in ctx.manifest():
227 fctx = ctx.filectx(f)
227 fctx = ctx.filectx(f)
228 if 'l' in fctx.flags():
228 if 'l' in fctx.flags():
229 renamed = fctx.renamed()
229 renamed = fctx.renamed()
230 if renamed and renamed[0] in lfiles:
230 if renamed and renamed[0] in lfiles:
231 raise error.Abort(_('largefile %s becomes symlink') % f)
231 raise error.Abort(_('largefile %s becomes symlink') % f)
232
232
233 # largefile was modified, update standins
233 # largefile was modified, update standins
234 m = hashlib.sha1('')
234 m = hashlib.sha1('')
235 m.update(ctx[f].data())
235 m.update(ctx[f].data())
236 hash = m.hexdigest()
236 hash = m.hexdigest()
237 if f not in lfiletohash or lfiletohash[f] != hash:
237 if f not in lfiletohash or lfiletohash[f] != hash:
238 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
238 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
239 executable = 'x' in ctx[f].flags()
239 executable = 'x' in ctx[f].flags()
240 lfutil.writestandin(rdst, fstandin, hash,
240 lfutil.writestandin(rdst, fstandin, hash,
241 executable)
241 executable)
242 lfiletohash[f] = hash
242 lfiletohash[f] = hash
243 else:
243 else:
244 # normal file
244 # normal file
245 dstfiles.append(f)
245 dstfiles.append(f)
246
246
247 def getfilectx(repo, memctx, f):
247 def getfilectx(repo, memctx, f):
248 srcfname = lfutil.splitstandin(f)
248 srcfname = lfutil.splitstandin(f)
249 if srcfname is not None:
249 if srcfname is not None:
250 # if the file isn't in the manifest then it was removed
250 # if the file isn't in the manifest then it was removed
251 # or renamed, return None to indicate this
251 # or renamed, return None to indicate this
252 try:
252 try:
253 fctx = ctx.filectx(srcfname)
253 fctx = ctx.filectx(srcfname)
254 except error.LookupError:
254 except error.LookupError:
255 return None
255 return None
256 renamed = fctx.renamed()
256 renamed = fctx.renamed()
257 if renamed:
257 if renamed:
258 # standin is always a largefile because largefile-ness
258 # standin is always a largefile because largefile-ness
259 # doesn't change after rename or copy
259 # doesn't change after rename or copy
260 renamed = lfutil.standin(renamed[0])
260 renamed = lfutil.standin(renamed[0])
261
261
262 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
262 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
263 'l' in fctx.flags(), 'x' in fctx.flags(),
263 'l' in fctx.flags(), 'x' in fctx.flags(),
264 renamed)
264 renamed)
265 else:
265 else:
266 return _getnormalcontext(repo, ctx, f, revmap)
266 return _getnormalcontext(repo, ctx, f, revmap)
267
267
268 # Commit
268 # Commit
269 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
269 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
270
270
271 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
271 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
272 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
272 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
273 getfilectx, ctx.user(), ctx.date(), ctx.extra())
273 getfilectx, ctx.user(), ctx.date(), ctx.extra())
274 ret = rdst.commitctx(mctx)
274 ret = rdst.commitctx(mctx)
275 lfutil.copyalltostore(rdst, ret)
275 lfutil.copyalltostore(rdst, ret)
276 rdst.setparents(ret)
276 rdst.setparents(ret)
277 revmap[ctx.node()] = rdst.changelog.tip()
277 revmap[ctx.node()] = rdst.changelog.tip()
278
278
279 # Generate list of changed files
279 # Generate list of changed files
280 def _getchangedfiles(ctx, parents):
280 def _getchangedfiles(ctx, parents):
281 files = set(ctx.files())
281 files = set(ctx.files())
282 if node.nullid not in parents:
282 if node.nullid not in parents:
283 mc = ctx.manifest()
283 mc = ctx.manifest()
284 mp1 = ctx.parents()[0].manifest()
284 mp1 = ctx.parents()[0].manifest()
285 mp2 = ctx.parents()[1].manifest()
285 mp2 = ctx.parents()[1].manifest()
286 files |= (set(mp1) | set(mp2)) - set(mc)
286 files |= (set(mp1) | set(mp2)) - set(mc)
287 for f in mc:
287 for f in mc:
288 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
288 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
289 files.add(f)
289 files.add(f)
290 return files
290 return files
291
291
292 # Convert src parents to dst parents
292 # Convert src parents to dst parents
293 def _convertparents(ctx, revmap):
293 def _convertparents(ctx, revmap):
294 parents = []
294 parents = []
295 for p in ctx.parents():
295 for p in ctx.parents():
296 parents.append(revmap[p.node()])
296 parents.append(revmap[p.node()])
297 while len(parents) < 2:
297 while len(parents) < 2:
298 parents.append(node.nullid)
298 parents.append(node.nullid)
299 return parents
299 return parents
300
300
301 # Get memfilectx for a normal file
301 # Get memfilectx for a normal file
302 def _getnormalcontext(repo, ctx, f, revmap):
302 def _getnormalcontext(repo, ctx, f, revmap):
303 try:
303 try:
304 fctx = ctx.filectx(f)
304 fctx = ctx.filectx(f)
305 except error.LookupError:
305 except error.LookupError:
306 return None
306 return None
307 renamed = fctx.renamed()
307 renamed = fctx.renamed()
308 if renamed:
308 if renamed:
309 renamed = renamed[0]
309 renamed = renamed[0]
310
310
311 data = fctx.data()
311 data = fctx.data()
312 if f == '.hgtags':
312 if f == '.hgtags':
313 data = _converttags (repo.ui, revmap, data)
313 data = _converttags (repo.ui, revmap, data)
314 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
314 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
315 'x' in fctx.flags(), renamed)
315 'x' in fctx.flags(), renamed)
316
316
317 # Remap tag data using a revision map
317 # Remap tag data using a revision map
318 def _converttags(ui, revmap, data):
318 def _converttags(ui, revmap, data):
319 newdata = []
319 newdata = []
320 for line in data.splitlines():
320 for line in data.splitlines():
321 try:
321 try:
322 id, name = line.split(' ', 1)
322 id, name = line.split(' ', 1)
323 except ValueError:
323 except ValueError:
324 ui.warn(_('skipping incorrectly formatted tag %s\n')
324 ui.warn(_('skipping incorrectly formatted tag %s\n')
325 % line)
325 % line)
326 continue
326 continue
327 try:
327 try:
328 newid = node.bin(id)
328 newid = node.bin(id)
329 except TypeError:
329 except TypeError:
330 ui.warn(_('skipping incorrectly formatted id %s\n')
330 ui.warn(_('skipping incorrectly formatted id %s\n')
331 % id)
331 % id)
332 continue
332 continue
333 try:
333 try:
334 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
334 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
335 name))
335 name))
336 except KeyError:
336 except KeyError:
337 ui.warn(_('no mapping for id %s\n') % id)
337 ui.warn(_('no mapping for id %s\n') % id)
338 continue
338 continue
339 return ''.join(newdata)
339 return ''.join(newdata)
340
340
341 def _islfile(file, ctx, matcher, size):
341 def _islfile(file, ctx, matcher, size):
342 '''Return true if file should be considered a largefile, i.e.
342 '''Return true if file should be considered a largefile, i.e.
343 matcher matches it or it is larger than size.'''
343 matcher matches it or it is larger than size.'''
344 # never store special .hg* files as largefiles
344 # never store special .hg* files as largefiles
345 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
345 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
346 return False
346 return False
347 if matcher and matcher(file):
347 if matcher and matcher(file):
348 return True
348 return True
349 try:
349 try:
350 return ctx.filectx(file).size() >= size * 1024 * 1024
350 return ctx.filectx(file).size() >= size * 1024 * 1024
351 except error.LookupError:
351 except error.LookupError:
352 return False
352 return False
353
353
354 def uploadlfiles(ui, rsrc, rdst, files):
354 def uploadlfiles(ui, rsrc, rdst, files):
355 '''upload largefiles to the central store'''
355 '''upload largefiles to the central store'''
356
356
357 if not files:
357 if not files:
358 return
358 return
359
359
360 store = storefactory.openstore(rsrc, rdst, put=True)
360 store = storefactory.openstore(rsrc, rdst, put=True)
361
361
362 at = 0
362 at = 0
363 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
363 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
364 retval = store.exists(files)
364 retval = store.exists(files)
365 files = filter(lambda h: not retval[h], files)
365 files = filter(lambda h: not retval[h], files)
366 ui.debug("%d largefiles need to be uploaded\n" % len(files))
366 ui.debug("%d largefiles need to be uploaded\n" % len(files))
367
367
368 for hash in files:
368 for hash in files:
369 ui.progress(_('uploading largefiles'), at, unit=_('files'),
369 ui.progress(_('uploading largefiles'), at, unit=_('files'),
370 total=len(files))
370 total=len(files))
371 source = lfutil.findfile(rsrc, hash)
371 source = lfutil.findfile(rsrc, hash)
372 if not source:
372 if not source:
373 raise error.Abort(_('largefile %s missing from store'
373 raise error.Abort(_('largefile %s missing from store'
374 ' (needs to be uploaded)') % hash)
374 ' (needs to be uploaded)') % hash)
375 # XXX check for errors here
375 # XXX check for errors here
376 store.put(source, hash)
376 store.put(source, hash)
377 at += 1
377 at += 1
378 ui.progress(_('uploading largefiles'), None)
378 ui.progress(_('uploading largefiles'), None)
379
379
380 def verifylfiles(ui, repo, all=False, contents=False):
380 def verifylfiles(ui, repo, all=False, contents=False):
381 '''Verify that every largefile revision in the current changeset
381 '''Verify that every largefile revision in the current changeset
382 exists in the central store. With --contents, also verify that
382 exists in the central store. With --contents, also verify that
383 the contents of each local largefile file revision are correct (SHA-1 hash
383 the contents of each local largefile file revision are correct (SHA-1 hash
384 matches the revision ID). With --all, check every changeset in
384 matches the revision ID). With --all, check every changeset in
385 this repository.'''
385 this repository.'''
386 if all:
386 if all:
387 revs = repo.revs('all()')
387 revs = repo.revs('all()')
388 else:
388 else:
389 revs = ['.']
389 revs = ['.']
390
390
391 store = storefactory.openstore(repo)
391 store = storefactory.openstore(repo)
392 return store.verify(revs, contents=contents)
392 return store.verify(revs, contents=contents)
393
393
394 def cachelfiles(ui, repo, node, filelist=None):
394 def cachelfiles(ui, repo, node, filelist=None):
395 '''cachelfiles ensures that all largefiles needed by the specified revision
395 '''cachelfiles ensures that all largefiles needed by the specified revision
396 are present in the repository's largefile cache.
396 are present in the repository's largefile cache.
397
397
398 returns a tuple (cached, missing). cached is the list of files downloaded
398 returns a tuple (cached, missing). cached is the list of files downloaded
399 by this operation; missing is the list of files that were needed but could
399 by this operation; missing is the list of files that were needed but could
400 not be found.'''
400 not be found.'''
401 lfiles = lfutil.listlfiles(repo, node)
401 lfiles = lfutil.listlfiles(repo, node)
402 if filelist:
402 if filelist:
403 lfiles = set(lfiles) & set(filelist)
403 lfiles = set(lfiles) & set(filelist)
404 toget = []
404 toget = []
405
405
406 ctx = repo[node]
406 for lfile in lfiles:
407 for lfile in lfiles:
407 try:
408 try:
408 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
409 expectedhash = ctx[lfutil.standin(lfile)].data().strip()
409 except IOError as err:
410 except IOError as err:
410 if err.errno == errno.ENOENT:
411 if err.errno == errno.ENOENT:
411 continue # node must be None and standin wasn't found in wctx
412 continue # node must be None and standin wasn't found in wctx
412 raise
413 raise
413 if not lfutil.findfile(repo, expectedhash):
414 if not lfutil.findfile(repo, expectedhash):
414 toget.append((lfile, expectedhash))
415 toget.append((lfile, expectedhash))
415
416
416 if toget:
417 if toget:
417 store = storefactory.openstore(repo)
418 store = storefactory.openstore(repo)
418 ret = store.get(toget)
419 ret = store.get(toget)
419 return ret
420 return ret
420
421
421 return ([], [])
422 return ([], [])
422
423
423 def downloadlfiles(ui, repo, rev=None):
424 def downloadlfiles(ui, repo, rev=None):
424 matchfn = scmutil.match(repo[None],
425 matchfn = scmutil.match(repo[None],
425 [repo.wjoin(lfutil.shortname)], {})
426 [repo.wjoin(lfutil.shortname)], {})
426 def prepare(ctx, fns):
427 def prepare(ctx, fns):
427 pass
428 pass
428 totalsuccess = 0
429 totalsuccess = 0
429 totalmissing = 0
430 totalmissing = 0
430 if rev != []: # walkchangerevs on empty list would return all revs
431 if rev != []: # walkchangerevs on empty list would return all revs
431 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
432 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
432 prepare):
433 prepare):
433 success, missing = cachelfiles(ui, repo, ctx.node())
434 success, missing = cachelfiles(ui, repo, ctx.node())
434 totalsuccess += len(success)
435 totalsuccess += len(success)
435 totalmissing += len(missing)
436 totalmissing += len(missing)
436 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
437 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
437 if totalmissing > 0:
438 if totalmissing > 0:
438 ui.status(_("%d largefiles failed to download\n") % totalmissing)
439 ui.status(_("%d largefiles failed to download\n") % totalmissing)
439 return totalsuccess, totalmissing
440 return totalsuccess, totalmissing
440
441
441 def updatelfiles(ui, repo, filelist=None, printmessage=None,
442 def updatelfiles(ui, repo, filelist=None, printmessage=None,
442 normallookup=False):
443 normallookup=False):
443 '''Update largefiles according to standins in the working directory
444 '''Update largefiles according to standins in the working directory
444
445
445 If ``printmessage`` is other than ``None``, it means "print (or
446 If ``printmessage`` is other than ``None``, it means "print (or
446 ignore, for false) message forcibly".
447 ignore, for false) message forcibly".
447 '''
448 '''
448 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
449 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
449 with repo.wlock():
450 with repo.wlock():
450 lfdirstate = lfutil.openlfdirstate(ui, repo)
451 lfdirstate = lfutil.openlfdirstate(ui, repo)
451 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
452 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
452
453
453 if filelist is not None:
454 if filelist is not None:
454 filelist = set(filelist)
455 filelist = set(filelist)
455 lfiles = [f for f in lfiles if f in filelist]
456 lfiles = [f for f in lfiles if f in filelist]
456
457
457 update = {}
458 update = {}
458 updated, removed = 0, 0
459 updated, removed = 0, 0
459 wvfs = repo.wvfs
460 wvfs = repo.wvfs
461 wctx = repo[None]
460 for lfile in lfiles:
462 for lfile in lfiles:
461 rellfile = lfile
463 rellfile = lfile
462 rellfileorig = os.path.relpath(
464 rellfileorig = os.path.relpath(
463 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
465 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
464 start=repo.root)
466 start=repo.root)
465 relstandin = lfutil.standin(lfile)
467 relstandin = lfutil.standin(lfile)
466 relstandinorig = os.path.relpath(
468 relstandinorig = os.path.relpath(
467 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
469 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
468 start=repo.root)
470 start=repo.root)
469 if wvfs.exists(relstandin):
471 if wvfs.exists(relstandin):
470 if (wvfs.exists(relstandinorig) and
472 if (wvfs.exists(relstandinorig) and
471 wvfs.exists(rellfile)):
473 wvfs.exists(rellfile)):
472 shutil.copyfile(wvfs.join(rellfile),
474 shutil.copyfile(wvfs.join(rellfile),
473 wvfs.join(rellfileorig))
475 wvfs.join(rellfileorig))
474 wvfs.unlinkpath(relstandinorig)
476 wvfs.unlinkpath(relstandinorig)
475 expecthash = lfutil.readstandin(repo, lfile)
477 expecthash = lfutil.readstandin(repo, lfile)
476 if expecthash != '':
478 if expecthash != '':
477 if lfile not in repo[None]: # not switched to normal file
479 if lfile not in wctx: # not switched to normal file
478 wvfs.unlinkpath(rellfile, ignoremissing=True)
480 wvfs.unlinkpath(rellfile, ignoremissing=True)
479 # use normallookup() to allocate an entry in largefiles
481 # use normallookup() to allocate an entry in largefiles
480 # dirstate to prevent lfilesrepo.status() from reporting
482 # dirstate to prevent lfilesrepo.status() from reporting
481 # missing files as removed.
483 # missing files as removed.
482 lfdirstate.normallookup(lfile)
484 lfdirstate.normallookup(lfile)
483 update[lfile] = expecthash
485 update[lfile] = expecthash
484 else:
486 else:
485 # Remove lfiles for which the standin is deleted, unless the
487 # Remove lfiles for which the standin is deleted, unless the
486 # lfile is added to the repository again. This happens when a
488 # lfile is added to the repository again. This happens when a
487 # largefile is converted back to a normal file: the standin
489 # largefile is converted back to a normal file: the standin
488 # disappears, but a new (normal) file appears as the lfile.
490 # disappears, but a new (normal) file appears as the lfile.
489 if (wvfs.exists(rellfile) and
491 if (wvfs.exists(rellfile) and
490 repo.dirstate.normalize(lfile) not in repo[None]):
492 repo.dirstate.normalize(lfile) not in wctx):
491 wvfs.unlinkpath(rellfile)
493 wvfs.unlinkpath(rellfile)
492 removed += 1
494 removed += 1
493
495
494 # largefile processing might be slow and be interrupted - be prepared
496 # largefile processing might be slow and be interrupted - be prepared
495 lfdirstate.write()
497 lfdirstate.write()
496
498
497 if lfiles:
499 if lfiles:
498 statuswriter(_('getting changed largefiles\n'))
500 statuswriter(_('getting changed largefiles\n'))
499 cachelfiles(ui, repo, None, lfiles)
501 cachelfiles(ui, repo, None, lfiles)
500
502
501 for lfile in lfiles:
503 for lfile in lfiles:
502 update1 = 0
504 update1 = 0
503
505
504 expecthash = update.get(lfile)
506 expecthash = update.get(lfile)
505 if expecthash:
507 if expecthash:
506 if not lfutil.copyfromcache(repo, expecthash, lfile):
508 if not lfutil.copyfromcache(repo, expecthash, lfile):
507 # failed ... but already removed and set to normallookup
509 # failed ... but already removed and set to normallookup
508 continue
510 continue
509 # Synchronize largefile dirstate to the last modified
511 # Synchronize largefile dirstate to the last modified
510 # time of the file
512 # time of the file
511 lfdirstate.normal(lfile)
513 lfdirstate.normal(lfile)
512 update1 = 1
514 update1 = 1
513
515
514 # copy the exec mode of largefile standin from the repository's
516 # copy the exec mode of largefile standin from the repository's
515 # dirstate to its state in the lfdirstate.
517 # dirstate to its state in the lfdirstate.
516 rellfile = lfile
518 rellfile = lfile
517 relstandin = lfutil.standin(lfile)
519 relstandin = lfutil.standin(lfile)
518 if wvfs.exists(relstandin):
520 if wvfs.exists(relstandin):
519 # exec is decided by the users permissions using mask 0o100
521 # exec is decided by the users permissions using mask 0o100
520 standinexec = wvfs.stat(relstandin).st_mode & 0o100
522 standinexec = wvfs.stat(relstandin).st_mode & 0o100
521 st = wvfs.stat(rellfile)
523 st = wvfs.stat(rellfile)
522 mode = st.st_mode
524 mode = st.st_mode
523 if standinexec != mode & 0o100:
525 if standinexec != mode & 0o100:
524 # first remove all X bits, then shift all R bits to X
526 # first remove all X bits, then shift all R bits to X
525 mode &= ~0o111
527 mode &= ~0o111
526 if standinexec:
528 if standinexec:
527 mode |= (mode >> 2) & 0o111 & ~util.umask
529 mode |= (mode >> 2) & 0o111 & ~util.umask
528 wvfs.chmod(rellfile, mode)
530 wvfs.chmod(rellfile, mode)
529 update1 = 1
531 update1 = 1
530
532
531 updated += update1
533 updated += update1
532
534
533 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
535 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
534
536
535 lfdirstate.write()
537 lfdirstate.write()
536 if lfiles:
538 if lfiles:
537 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
539 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
538 removed))
540 removed))
539
541
540 @command('lfpull',
542 @command('lfpull',
541 [('r', 'rev', [], _('pull largefiles for these revisions'))
543 [('r', 'rev', [], _('pull largefiles for these revisions'))
542 ] + commands.remoteopts,
544 ] + commands.remoteopts,
543 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
545 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
544 def lfpull(ui, repo, source="default", **opts):
546 def lfpull(ui, repo, source="default", **opts):
545 """pull largefiles for the specified revisions from the specified source
547 """pull largefiles for the specified revisions from the specified source
546
548
547 Pull largefiles that are referenced from local changesets but missing
549 Pull largefiles that are referenced from local changesets but missing
548 locally, pulling from a remote repository to the local cache.
550 locally, pulling from a remote repository to the local cache.
549
551
550 If SOURCE is omitted, the 'default' path will be used.
552 If SOURCE is omitted, the 'default' path will be used.
551 See :hg:`help urls` for more information.
553 See :hg:`help urls` for more information.
552
554
553 .. container:: verbose
555 .. container:: verbose
554
556
555 Some examples:
557 Some examples:
556
558
557 - pull largefiles for all branch heads::
559 - pull largefiles for all branch heads::
558
560
559 hg lfpull -r "head() and not closed()"
561 hg lfpull -r "head() and not closed()"
560
562
561 - pull largefiles on the default branch::
563 - pull largefiles on the default branch::
562
564
563 hg lfpull -r "branch(default)"
565 hg lfpull -r "branch(default)"
564 """
566 """
565 repo.lfpullsource = source
567 repo.lfpullsource = source
566
568
567 revs = opts.get('rev', [])
569 revs = opts.get('rev', [])
568 if not revs:
570 if not revs:
569 raise error.Abort(_('no revisions specified'))
571 raise error.Abort(_('no revisions specified'))
570 revs = scmutil.revrange(repo, revs)
572 revs = scmutil.revrange(repo, revs)
571
573
572 numcached = 0
574 numcached = 0
573 for rev in revs:
575 for rev in revs:
574 ui.note(_('pulling largefiles for revision %s\n') % rev)
576 ui.note(_('pulling largefiles for revision %s\n') % rev)
575 (cached, missing) = cachelfiles(ui, repo, rev)
577 (cached, missing) = cachelfiles(ui, repo, rev)
576 numcached += len(cached)
578 numcached += len(cached)
577 ui.status(_("%d largefiles cached\n") % numcached)
579 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,1458 +1,1460
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial import (
17 from mercurial import (
18 archival,
18 archival,
19 cmdutil,
19 cmdutil,
20 error,
20 error,
21 hg,
21 hg,
22 match as matchmod,
22 match as matchmod,
23 pathutil,
23 pathutil,
24 registrar,
24 registrar,
25 scmutil,
25 scmutil,
26 smartset,
26 smartset,
27 util,
27 util,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 lfcommands,
31 lfcommands,
32 lfutil,
32 lfutil,
33 storefactory,
33 storefactory,
34 )
34 )
35
35
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37
37
38 def composelargefilematcher(match, manifest):
38 def composelargefilematcher(match, manifest):
39 '''create a matcher that matches only the largefiles in the original
39 '''create a matcher that matches only the largefiles in the original
40 matcher'''
40 matcher'''
41 m = copy.copy(match)
41 m = copy.copy(match)
42 lfile = lambda f: lfutil.standin(f) in manifest
42 lfile = lambda f: lfutil.standin(f) in manifest
43 m._files = filter(lfile, m._files)
43 m._files = filter(lfile, m._files)
44 m._fileroots = set(m._files)
44 m._fileroots = set(m._files)
45 m._always = False
45 m._always = False
46 origmatchfn = m.matchfn
46 origmatchfn = m.matchfn
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 return m
48 return m
49
49
50 def composenormalfilematcher(match, manifest, exclude=None):
50 def composenormalfilematcher(match, manifest, exclude=None):
51 excluded = set()
51 excluded = set()
52 if exclude is not None:
52 if exclude is not None:
53 excluded.update(exclude)
53 excluded.update(exclude)
54
54
55 m = copy.copy(match)
55 m = copy.copy(match)
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 manifest or f in excluded)
57 manifest or f in excluded)
58 m._files = filter(notlfile, m._files)
58 m._files = filter(notlfile, m._files)
59 m._fileroots = set(m._files)
59 m._fileroots = set(m._files)
60 m._always = False
60 m._always = False
61 origmatchfn = m.matchfn
61 origmatchfn = m.matchfn
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 return m
63 return m
64
64
65 def installnormalfilesmatchfn(manifest):
65 def installnormalfilesmatchfn(manifest):
66 '''installmatchfn with a matchfn that ignores all largefiles'''
66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 default='relpath', badfn=None):
68 default='relpath', badfn=None):
69 if opts is None:
69 if opts is None:
70 opts = {}
70 opts = {}
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 return composenormalfilematcher(match, manifest)
72 return composenormalfilematcher(match, manifest)
73 oldmatch = installmatchfn(overridematch)
73 oldmatch = installmatchfn(overridematch)
74
74
75 def installmatchfn(f):
75 def installmatchfn(f):
76 '''monkey patch the scmutil module with a custom match function.
76 '''monkey patch the scmutil module with a custom match function.
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 oldmatch = scmutil.match
78 oldmatch = scmutil.match
79 setattr(f, 'oldmatch', oldmatch)
79 setattr(f, 'oldmatch', oldmatch)
80 scmutil.match = f
80 scmutil.match = f
81 return oldmatch
81 return oldmatch
82
82
83 def restorematchfn():
83 def restorematchfn():
84 '''restores scmutil.match to what it was before installmatchfn
84 '''restores scmutil.match to what it was before installmatchfn
85 was called. no-op if scmutil.match is its original function.
85 was called. no-op if scmutil.match is its original function.
86
86
87 Note that n calls to installmatchfn will require n calls to
87 Note that n calls to installmatchfn will require n calls to
88 restore the original matchfn.'''
88 restore the original matchfn.'''
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90
90
91 def installmatchandpatsfn(f):
91 def installmatchandpatsfn(f):
92 oldmatchandpats = scmutil.matchandpats
92 oldmatchandpats = scmutil.matchandpats
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 scmutil.matchandpats = f
94 scmutil.matchandpats = f
95 return oldmatchandpats
95 return oldmatchandpats
96
96
97 def restorematchandpatsfn():
97 def restorematchandpatsfn():
98 '''restores scmutil.matchandpats to what it was before
98 '''restores scmutil.matchandpats to what it was before
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 is its original function.
100 is its original function.
101
101
102 Note that n calls to installmatchandpatsfn will require n calls
102 Note that n calls to installmatchandpatsfn will require n calls
103 to restore the original matchfn.'''
103 to restore the original matchfn.'''
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 scmutil.matchandpats)
105 scmutil.matchandpats)
106
106
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 large = opts.get('large')
108 large = opts.get('large')
109 lfsize = lfutil.getminsize(
109 lfsize = lfutil.getminsize(
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111
111
112 lfmatcher = None
112 lfmatcher = None
113 if lfutil.islfilesrepo(repo):
113 if lfutil.islfilesrepo(repo):
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 if lfpats:
115 if lfpats:
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117
117
118 lfnames = []
118 lfnames = []
119 m = matcher
119 m = matcher
120
120
121 wctx = repo[None]
121 wctx = repo[None]
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 exact = m.exact(f)
123 exact = m.exact(f)
124 lfile = lfutil.standin(f) in wctx
124 lfile = lfutil.standin(f) in wctx
125 nfile = f in wctx
125 nfile = f in wctx
126 exists = lfile or nfile
126 exists = lfile or nfile
127
127
128 # addremove in core gets fancy with the name, add doesn't
128 # addremove in core gets fancy with the name, add doesn't
129 if isaddremove:
129 if isaddremove:
130 name = m.uipath(f)
130 name = m.uipath(f)
131 else:
131 else:
132 name = m.rel(f)
132 name = m.rel(f)
133
133
134 # Don't warn the user when they attempt to add a normal tracked file.
134 # Don't warn the user when they attempt to add a normal tracked file.
135 # The normal add code will do that for us.
135 # The normal add code will do that for us.
136 if exact and exists:
136 if exact and exists:
137 if lfile:
137 if lfile:
138 ui.warn(_('%s already a largefile\n') % name)
138 ui.warn(_('%s already a largefile\n') % name)
139 continue
139 continue
140
140
141 if (exact or not exists) and not lfutil.isstandin(f):
141 if (exact or not exists) and not lfutil.isstandin(f):
142 # In case the file was removed previously, but not committed
142 # In case the file was removed previously, but not committed
143 # (issue3507)
143 # (issue3507)
144 if not repo.wvfs.exists(f):
144 if not repo.wvfs.exists(f):
145 continue
145 continue
146
146
147 abovemin = (lfsize and
147 abovemin = (lfsize and
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 lfnames.append(f)
150 lfnames.append(f)
151 if ui.verbose or not exact:
151 if ui.verbose or not exact:
152 ui.status(_('adding %s as a largefile\n') % name)
152 ui.status(_('adding %s as a largefile\n') % name)
153
153
154 bad = []
154 bad = []
155
155
156 # Need to lock, otherwise there could be a race condition between
156 # Need to lock, otherwise there could be a race condition between
157 # when standins are created and added to the repo.
157 # when standins are created and added to the repo.
158 with repo.wlock():
158 with repo.wlock():
159 if not opts.get('dry_run'):
159 if not opts.get('dry_run'):
160 standins = []
160 standins = []
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 for f in lfnames:
162 for f in lfnames:
163 standinname = lfutil.standin(f)
163 standinname = lfutil.standin(f)
164 lfutil.writestandin(repo, standinname, hash='',
164 lfutil.writestandin(repo, standinname, hash='',
165 executable=lfutil.getexecutable(repo.wjoin(f)))
165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 standins.append(standinname)
166 standins.append(standinname)
167 if lfdirstate[f] == 'r':
167 if lfdirstate[f] == 'r':
168 lfdirstate.normallookup(f)
168 lfdirstate.normallookup(f)
169 else:
169 else:
170 lfdirstate.add(f)
170 lfdirstate.add(f)
171 lfdirstate.write()
171 lfdirstate.write()
172 bad += [lfutil.splitstandin(f)
172 bad += [lfutil.splitstandin(f)
173 for f in repo[None].add(standins)
173 for f in repo[None].add(standins)
174 if f in m.files()]
174 if f in m.files()]
175
175
176 added = [f for f in lfnames if f not in bad]
176 added = [f for f in lfnames if f not in bad]
177 return added, bad
177 return added, bad
178
178
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 after = opts.get('after')
180 after = opts.get('after')
181 m = composelargefilematcher(matcher, repo[None].manifest())
181 m = composelargefilematcher(matcher, repo[None].manifest())
182 try:
182 try:
183 repo.lfstatus = True
183 repo.lfstatus = True
184 s = repo.status(match=m, clean=not isaddremove)
184 s = repo.status(match=m, clean=not isaddremove)
185 finally:
185 finally:
186 repo.lfstatus = False
186 repo.lfstatus = False
187 manifest = repo[None].manifest()
187 manifest = repo[None].manifest()
188 modified, added, deleted, clean = [[f for f in list
188 modified, added, deleted, clean = [[f for f in list
189 if lfutil.standin(f) in manifest]
189 if lfutil.standin(f) in manifest]
190 for list in (s.modified, s.added,
190 for list in (s.modified, s.added,
191 s.deleted, s.clean)]
191 s.deleted, s.clean)]
192
192
193 def warn(files, msg):
193 def warn(files, msg):
194 for f in files:
194 for f in files:
195 ui.warn(msg % m.rel(f))
195 ui.warn(msg % m.rel(f))
196 return int(len(files) > 0)
196 return int(len(files) > 0)
197
197
198 result = 0
198 result = 0
199
199
200 if after:
200 if after:
201 remove = deleted
201 remove = deleted
202 result = warn(modified + added + clean,
202 result = warn(modified + added + clean,
203 _('not removing %s: file still exists\n'))
203 _('not removing %s: file still exists\n'))
204 else:
204 else:
205 remove = deleted + clean
205 remove = deleted + clean
206 result = warn(modified, _('not removing %s: file is modified (use -f'
206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 ' to force removal)\n'))
207 ' to force removal)\n'))
208 result = warn(added, _('not removing %s: file has been marked for add'
208 result = warn(added, _('not removing %s: file has been marked for add'
209 ' (use forget to undo)\n')) or result
209 ' (use forget to undo)\n')) or result
210
210
211 # Need to lock because standin files are deleted then removed from the
211 # Need to lock because standin files are deleted then removed from the
212 # repository and we could race in-between.
212 # repository and we could race in-between.
213 with repo.wlock():
213 with repo.wlock():
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 for f in sorted(remove):
215 for f in sorted(remove):
216 if ui.verbose or not m.exact(f):
216 if ui.verbose or not m.exact(f):
217 # addremove in core gets fancy with the name, remove doesn't
217 # addremove in core gets fancy with the name, remove doesn't
218 if isaddremove:
218 if isaddremove:
219 name = m.uipath(f)
219 name = m.uipath(f)
220 else:
220 else:
221 name = m.rel(f)
221 name = m.rel(f)
222 ui.status(_('removing %s\n') % name)
222 ui.status(_('removing %s\n') % name)
223
223
224 if not opts.get('dry_run'):
224 if not opts.get('dry_run'):
225 if not after:
225 if not after:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227
227
228 if opts.get('dry_run'):
228 if opts.get('dry_run'):
229 return result
229 return result
230
230
231 remove = [lfutil.standin(f) for f in remove]
231 remove = [lfutil.standin(f) for f in remove]
232 # If this is being called by addremove, let the original addremove
232 # If this is being called by addremove, let the original addremove
233 # function handle this.
233 # function handle this.
234 if not isaddremove:
234 if not isaddremove:
235 for f in remove:
235 for f in remove:
236 repo.wvfs.unlinkpath(f, ignoremissing=True)
236 repo.wvfs.unlinkpath(f, ignoremissing=True)
237 repo[None].forget(remove)
237 repo[None].forget(remove)
238
238
239 for f in remove:
239 for f in remove:
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 False)
241 False)
242
242
243 lfdirstate.write()
243 lfdirstate.write()
244
244
245 return result
245 return result
246
246
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 # appear at their right place in the manifests.
248 # appear at their right place in the manifests.
249 def decodepath(orig, path):
249 def decodepath(orig, path):
250 return lfutil.splitstandin(path) or path
250 return lfutil.splitstandin(path) or path
251
251
252 # -- Wrappers: modify existing commands --------------------------------
252 # -- Wrappers: modify existing commands --------------------------------
253
253
254 def overrideadd(orig, ui, repo, *pats, **opts):
254 def overrideadd(orig, ui, repo, *pats, **opts):
255 if opts.get('normal') and opts.get('large'):
255 if opts.get('normal') and opts.get('large'):
256 raise error.Abort(_('--normal cannot be used with --large'))
256 raise error.Abort(_('--normal cannot be used with --large'))
257 return orig(ui, repo, *pats, **opts)
257 return orig(ui, repo, *pats, **opts)
258
258
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 # The --normal flag short circuits this override
260 # The --normal flag short circuits this override
261 if opts.get('normal'):
261 if opts.get('normal'):
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263
263
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 ladded)
266 ladded)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268
268
269 bad.extend(f for f in lbad)
269 bad.extend(f for f in lbad)
270 return bad
270 return bad
271
271
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 return removelargefiles(ui, repo, False, matcher, after=after,
275 return removelargefiles(ui, repo, False, matcher, after=after,
276 force=force) or result
276 force=force) or result
277
277
278 def overridestatusfn(orig, repo, rev2, **opts):
278 def overridestatusfn(orig, repo, rev2, **opts):
279 try:
279 try:
280 repo._repo.lfstatus = True
280 repo._repo.lfstatus = True
281 return orig(repo, rev2, **opts)
281 return orig(repo, rev2, **opts)
282 finally:
282 finally:
283 repo._repo.lfstatus = False
283 repo._repo.lfstatus = False
284
284
285 def overridestatus(orig, ui, repo, *pats, **opts):
285 def overridestatus(orig, ui, repo, *pats, **opts):
286 try:
286 try:
287 repo.lfstatus = True
287 repo.lfstatus = True
288 return orig(ui, repo, *pats, **opts)
288 return orig(ui, repo, *pats, **opts)
289 finally:
289 finally:
290 repo.lfstatus = False
290 repo.lfstatus = False
291
291
292 def overridedirty(orig, repo, ignoreupdate=False):
292 def overridedirty(orig, repo, ignoreupdate=False):
293 try:
293 try:
294 repo._repo.lfstatus = True
294 repo._repo.lfstatus = True
295 return orig(repo, ignoreupdate)
295 return orig(repo, ignoreupdate)
296 finally:
296 finally:
297 repo._repo.lfstatus = False
297 repo._repo.lfstatus = False
298
298
299 def overridelog(orig, ui, repo, *pats, **opts):
299 def overridelog(orig, ui, repo, *pats, **opts):
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 default='relpath', badfn=None):
301 default='relpath', badfn=None):
302 """Matcher that merges root directory with .hglf, suitable for log.
302 """Matcher that merges root directory with .hglf, suitable for log.
303 It is still possible to match .hglf directly.
303 It is still possible to match .hglf directly.
304 For any listed files run log on the standin too.
304 For any listed files run log on the standin too.
305 matchfn tries both the given filename and with .hglf stripped.
305 matchfn tries both the given filename and with .hglf stripped.
306 """
306 """
307 if opts is None:
307 if opts is None:
308 opts = {}
308 opts = {}
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 badfn=badfn)
310 badfn=badfn)
311 m, p = copy.copy(matchandpats)
311 m, p = copy.copy(matchandpats)
312
312
313 if m.always():
313 if m.always():
314 # We want to match everything anyway, so there's no benefit trying
314 # We want to match everything anyway, so there's no benefit trying
315 # to add standins.
315 # to add standins.
316 return matchandpats
316 return matchandpats
317
317
318 pats = set(p)
318 pats = set(p)
319
319
320 def fixpats(pat, tostandin=lfutil.standin):
320 def fixpats(pat, tostandin=lfutil.standin):
321 if pat.startswith('set:'):
321 if pat.startswith('set:'):
322 return pat
322 return pat
323
323
324 kindpat = matchmod._patsplit(pat, None)
324 kindpat = matchmod._patsplit(pat, None)
325
325
326 if kindpat[0] is not None:
326 if kindpat[0] is not None:
327 return kindpat[0] + ':' + tostandin(kindpat[1])
327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 return tostandin(kindpat[1])
328 return tostandin(kindpat[1])
329
329
330 if m._cwd:
330 if m._cwd:
331 hglf = lfutil.shortname
331 hglf = lfutil.shortname
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333
333
334 def tostandin(f):
334 def tostandin(f):
335 # The file may already be a standin, so truncate the back
335 # The file may already be a standin, so truncate the back
336 # prefix and test before mangling it. This avoids turning
336 # prefix and test before mangling it. This avoids turning
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 return f
339 return f
340
340
341 # An absolute path is from outside the repo, so truncate the
341 # An absolute path is from outside the repo, so truncate the
342 # path to the root before building the standin. Otherwise cwd
342 # path to the root before building the standin. Otherwise cwd
343 # is somewhere in the repo, relative to root, and needs to be
343 # is somewhere in the repo, relative to root, and needs to be
344 # prepended before building the standin.
344 # prepended before building the standin.
345 if os.path.isabs(m._cwd):
345 if os.path.isabs(m._cwd):
346 f = f[len(back):]
346 f = f[len(back):]
347 else:
347 else:
348 f = m._cwd + '/' + f
348 f = m._cwd + '/' + f
349 return back + lfutil.standin(f)
349 return back + lfutil.standin(f)
350
350
351 pats.update(fixpats(f, tostandin) for f in p)
351 pats.update(fixpats(f, tostandin) for f in p)
352 else:
352 else:
353 def tostandin(f):
353 def tostandin(f):
354 if lfutil.isstandin(f):
354 if lfutil.isstandin(f):
355 return f
355 return f
356 return lfutil.standin(f)
356 return lfutil.standin(f)
357 pats.update(fixpats(f, tostandin) for f in p)
357 pats.update(fixpats(f, tostandin) for f in p)
358
358
359 for i in range(0, len(m._files)):
359 for i in range(0, len(m._files)):
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 if m._files[i] == '.':
361 if m._files[i] == '.':
362 continue
362 continue
363 standin = lfutil.standin(m._files[i])
363 standin = lfutil.standin(m._files[i])
364 # If the "standin" is a directory, append instead of replace to
364 # If the "standin" is a directory, append instead of replace to
365 # support naming a directory on the command line with only
365 # support naming a directory on the command line with only
366 # largefiles. The original directory is kept to support normal
366 # largefiles. The original directory is kept to support normal
367 # files.
367 # files.
368 if standin in repo[ctx.node()]:
368 if standin in repo[ctx.node()]:
369 m._files[i] = standin
369 m._files[i] = standin
370 elif m._files[i] not in repo[ctx.node()] \
370 elif m._files[i] not in repo[ctx.node()] \
371 and repo.wvfs.isdir(standin):
371 and repo.wvfs.isdir(standin):
372 m._files.append(standin)
372 m._files.append(standin)
373
373
374 m._fileroots = set(m._files)
374 m._fileroots = set(m._files)
375 m._always = False
375 m._always = False
376 origmatchfn = m.matchfn
376 origmatchfn = m.matchfn
377 def lfmatchfn(f):
377 def lfmatchfn(f):
378 lf = lfutil.splitstandin(f)
378 lf = lfutil.splitstandin(f)
379 if lf is not None and origmatchfn(lf):
379 if lf is not None and origmatchfn(lf):
380 return True
380 return True
381 r = origmatchfn(f)
381 r = origmatchfn(f)
382 return r
382 return r
383 m.matchfn = lfmatchfn
383 m.matchfn = lfmatchfn
384
384
385 ui.debug('updated patterns: %s\n' % sorted(pats))
385 ui.debug('updated patterns: %s\n' % sorted(pats))
386 return m, pats
386 return m, pats
387
387
388 # For hg log --patch, the match object is used in two different senses:
388 # For hg log --patch, the match object is used in two different senses:
389 # (1) to determine what revisions should be printed out, and
389 # (1) to determine what revisions should be printed out, and
390 # (2) to determine what files to print out diffs for.
390 # (2) to determine what files to print out diffs for.
391 # The magic matchandpats override should be used for case (1) but not for
391 # The magic matchandpats override should be used for case (1) but not for
392 # case (2).
392 # case (2).
393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
394 wctx = repo[None]
394 wctx = repo[None]
395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
396 return lambda rev: match
396 return lambda rev: match
397
397
398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
401
401
402 try:
402 try:
403 return orig(ui, repo, *pats, **opts)
403 return orig(ui, repo, *pats, **opts)
404 finally:
404 finally:
405 restorematchandpatsfn()
405 restorematchandpatsfn()
406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
407
407
408 def overrideverify(orig, ui, repo, *pats, **opts):
408 def overrideverify(orig, ui, repo, *pats, **opts):
409 large = opts.pop('large', False)
409 large = opts.pop('large', False)
410 all = opts.pop('lfa', False)
410 all = opts.pop('lfa', False)
411 contents = opts.pop('lfc', False)
411 contents = opts.pop('lfc', False)
412
412
413 result = orig(ui, repo, *pats, **opts)
413 result = orig(ui, repo, *pats, **opts)
414 if large or all or contents:
414 if large or all or contents:
415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
416 return result
416 return result
417
417
418 def overridedebugstate(orig, ui, repo, *pats, **opts):
418 def overridedebugstate(orig, ui, repo, *pats, **opts):
419 large = opts.pop('large', False)
419 large = opts.pop('large', False)
420 if large:
420 if large:
421 class fakerepo(object):
421 class fakerepo(object):
422 dirstate = lfutil.openlfdirstate(ui, repo)
422 dirstate = lfutil.openlfdirstate(ui, repo)
423 orig(ui, fakerepo, *pats, **opts)
423 orig(ui, fakerepo, *pats, **opts)
424 else:
424 else:
425 orig(ui, repo, *pats, **opts)
425 orig(ui, repo, *pats, **opts)
426
426
427 # Before starting the manifest merge, merge.updates will call
427 # Before starting the manifest merge, merge.updates will call
428 # _checkunknownfile to check if there are any files in the merged-in
428 # _checkunknownfile to check if there are any files in the merged-in
429 # changeset that collide with unknown files in the working copy.
429 # changeset that collide with unknown files in the working copy.
430 #
430 #
431 # The largefiles are seen as unknown, so this prevents us from merging
431 # The largefiles are seen as unknown, so this prevents us from merging
432 # in a file 'foo' if we already have a largefile with the same name.
432 # in a file 'foo' if we already have a largefile with the same name.
433 #
433 #
434 # The overridden function filters the unknown files by removing any
434 # The overridden function filters the unknown files by removing any
435 # largefiles. This makes the merge proceed and we can then handle this
435 # largefiles. This makes the merge proceed and we can then handle this
436 # case further in the overridden calculateupdates function below.
436 # case further in the overridden calculateupdates function below.
437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
439 return False
439 return False
440 return origfn(repo, wctx, mctx, f, f2)
440 return origfn(repo, wctx, mctx, f, f2)
441
441
442 # The manifest merge handles conflicts on the manifest level. We want
442 # The manifest merge handles conflicts on the manifest level. We want
443 # to handle changes in largefile-ness of files at this level too.
443 # to handle changes in largefile-ness of files at this level too.
444 #
444 #
445 # The strategy is to run the original calculateupdates and then process
445 # The strategy is to run the original calculateupdates and then process
446 # the action list it outputs. There are two cases we need to deal with:
446 # the action list it outputs. There are two cases we need to deal with:
447 #
447 #
448 # 1. Normal file in p1, largefile in p2. Here the largefile is
448 # 1. Normal file in p1, largefile in p2. Here the largefile is
449 # detected via its standin file, which will enter the working copy
449 # detected via its standin file, which will enter the working copy
450 # with a "get" action. It is not "merge" since the standin is all
450 # with a "get" action. It is not "merge" since the standin is all
451 # Mercurial is concerned with at this level -- the link to the
451 # Mercurial is concerned with at this level -- the link to the
452 # existing normal file is not relevant here.
452 # existing normal file is not relevant here.
453 #
453 #
454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
455 # since the largefile will be present in the working copy and
455 # since the largefile will be present in the working copy and
456 # different from the normal file in p2. Mercurial therefore
456 # different from the normal file in p2. Mercurial therefore
457 # triggers a merge action.
457 # triggers a merge action.
458 #
458 #
459 # In both cases, we prompt the user and emit new actions to either
459 # In both cases, we prompt the user and emit new actions to either
460 # remove the standin (if the normal file was kept) or to remove the
460 # remove the standin (if the normal file was kept) or to remove the
461 # normal file and get the standin (if the largefile was kept). The
461 # normal file and get the standin (if the largefile was kept). The
462 # default prompt answer is to use the largefile version since it was
462 # default prompt answer is to use the largefile version since it was
463 # presumably changed on purpose.
463 # presumably changed on purpose.
464 #
464 #
465 # Finally, the merge.applyupdates function will then take care of
465 # Finally, the merge.applyupdates function will then take care of
466 # writing the files into the working copy and lfcommands.updatelfiles
466 # writing the files into the working copy and lfcommands.updatelfiles
467 # will update the largefiles.
467 # will update the largefiles.
468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
469 acceptremote, *args, **kwargs):
469 acceptremote, *args, **kwargs):
470 overwrite = force and not branchmerge
470 overwrite = force and not branchmerge
471 actions, diverge, renamedelete = origfn(
471 actions, diverge, renamedelete = origfn(
472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
473
473
474 if overwrite:
474 if overwrite:
475 return actions, diverge, renamedelete
475 return actions, diverge, renamedelete
476
476
477 # Convert to dictionary with filename as key and action as value.
477 # Convert to dictionary with filename as key and action as value.
478 lfiles = set()
478 lfiles = set()
479 for f in actions:
479 for f in actions:
480 splitstandin = lfutil.splitstandin(f)
480 splitstandin = lfutil.splitstandin(f)
481 if splitstandin in p1:
481 if splitstandin in p1:
482 lfiles.add(splitstandin)
482 lfiles.add(splitstandin)
483 elif lfutil.standin(f) in p1:
483 elif lfutil.standin(f) in p1:
484 lfiles.add(f)
484 lfiles.add(f)
485
485
486 for lfile in sorted(lfiles):
486 for lfile in sorted(lfiles):
487 standin = lfutil.standin(lfile)
487 standin = lfutil.standin(lfile)
488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
490 if sm in ('g', 'dc') and lm != 'r':
490 if sm in ('g', 'dc') and lm != 'r':
491 if sm == 'dc':
491 if sm == 'dc':
492 f1, f2, fa, move, anc = sargs
492 f1, f2, fa, move, anc = sargs
493 sargs = (p2[f2].flags(), False)
493 sargs = (p2[f2].flags(), False)
494 # Case 1: normal file in the working copy, largefile in
494 # Case 1: normal file in the working copy, largefile in
495 # the second parent
495 # the second parent
496 usermsg = _('remote turned local normal file %s into a largefile\n'
496 usermsg = _('remote turned local normal file %s into a largefile\n'
497 'use (l)argefile or keep (n)ormal file?'
497 'use (l)argefile or keep (n)ormal file?'
498 '$$ &Largefile $$ &Normal file') % lfile
498 '$$ &Largefile $$ &Normal file') % lfile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
500 actions[lfile] = ('r', None, 'replaced by standin')
500 actions[lfile] = ('r', None, 'replaced by standin')
501 actions[standin] = ('g', sargs, 'replaces standin')
501 actions[standin] = ('g', sargs, 'replaces standin')
502 else: # keep local normal file
502 else: # keep local normal file
503 actions[lfile] = ('k', None, 'replaces standin')
503 actions[lfile] = ('k', None, 'replaces standin')
504 if branchmerge:
504 if branchmerge:
505 actions[standin] = ('k', None, 'replaced by non-standin')
505 actions[standin] = ('k', None, 'replaced by non-standin')
506 else:
506 else:
507 actions[standin] = ('r', None, 'replaced by non-standin')
507 actions[standin] = ('r', None, 'replaced by non-standin')
508 elif lm in ('g', 'dc') and sm != 'r':
508 elif lm in ('g', 'dc') and sm != 'r':
509 if lm == 'dc':
509 if lm == 'dc':
510 f1, f2, fa, move, anc = largs
510 f1, f2, fa, move, anc = largs
511 largs = (p2[f2].flags(), False)
511 largs = (p2[f2].flags(), False)
512 # Case 2: largefile in the working copy, normal file in
512 # Case 2: largefile in the working copy, normal file in
513 # the second parent
513 # the second parent
514 usermsg = _('remote turned local largefile %s into a normal file\n'
514 usermsg = _('remote turned local largefile %s into a normal file\n'
515 'keep (l)argefile or use (n)ormal file?'
515 'keep (l)argefile or use (n)ormal file?'
516 '$$ &Largefile $$ &Normal file') % lfile
516 '$$ &Largefile $$ &Normal file') % lfile
517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
518 if branchmerge:
518 if branchmerge:
519 # largefile can be restored from standin safely
519 # largefile can be restored from standin safely
520 actions[lfile] = ('k', None, 'replaced by standin')
520 actions[lfile] = ('k', None, 'replaced by standin')
521 actions[standin] = ('k', None, 'replaces standin')
521 actions[standin] = ('k', None, 'replaces standin')
522 else:
522 else:
523 # "lfile" should be marked as "removed" without
523 # "lfile" should be marked as "removed" without
524 # removal of itself
524 # removal of itself
525 actions[lfile] = ('lfmr', None,
525 actions[lfile] = ('lfmr', None,
526 'forget non-standin largefile')
526 'forget non-standin largefile')
527
527
528 # linear-merge should treat this largefile as 're-added'
528 # linear-merge should treat this largefile as 're-added'
529 actions[standin] = ('a', None, 'keep standin')
529 actions[standin] = ('a', None, 'keep standin')
530 else: # pick remote normal file
530 else: # pick remote normal file
531 actions[lfile] = ('g', largs, 'replaces standin')
531 actions[lfile] = ('g', largs, 'replaces standin')
532 actions[standin] = ('r', None, 'replaced by non-standin')
532 actions[standin] = ('r', None, 'replaced by non-standin')
533
533
534 return actions, diverge, renamedelete
534 return actions, diverge, renamedelete
535
535
536 def mergerecordupdates(orig, repo, actions, branchmerge):
536 def mergerecordupdates(orig, repo, actions, branchmerge):
537 if 'lfmr' in actions:
537 if 'lfmr' in actions:
538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
539 for lfile, args, msg in actions['lfmr']:
539 for lfile, args, msg in actions['lfmr']:
540 # this should be executed before 'orig', to execute 'remove'
540 # this should be executed before 'orig', to execute 'remove'
541 # before all other actions
541 # before all other actions
542 repo.dirstate.remove(lfile)
542 repo.dirstate.remove(lfile)
543 # make sure lfile doesn't get synclfdirstate'd as normal
543 # make sure lfile doesn't get synclfdirstate'd as normal
544 lfdirstate.add(lfile)
544 lfdirstate.add(lfile)
545 lfdirstate.write()
545 lfdirstate.write()
546
546
547 return orig(repo, actions, branchmerge)
547 return orig(repo, actions, branchmerge)
548
548
549 # Override filemerge to prompt the user about how they wish to merge
549 # Override filemerge to prompt the user about how they wish to merge
550 # largefiles. This will handle identical edits without prompting the user.
550 # largefiles. This will handle identical edits without prompting the user.
551 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
551 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
552 labels=None):
552 labels=None):
553 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
553 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
554 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
554 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
555 labels=labels)
555 labels=labels)
556
556
557 ahash = fca.data().strip().lower()
557 ahash = fca.data().strip().lower()
558 dhash = fcd.data().strip().lower()
558 dhash = fcd.data().strip().lower()
559 ohash = fco.data().strip().lower()
559 ohash = fco.data().strip().lower()
560 if (ohash != ahash and
560 if (ohash != ahash and
561 ohash != dhash and
561 ohash != dhash and
562 (dhash == ahash or
562 (dhash == ahash or
563 repo.ui.promptchoice(
563 repo.ui.promptchoice(
564 _('largefile %s has a merge conflict\nancestor was %s\n'
564 _('largefile %s has a merge conflict\nancestor was %s\n'
565 'keep (l)ocal %s or\ntake (o)ther %s?'
565 'keep (l)ocal %s or\ntake (o)ther %s?'
566 '$$ &Local $$ &Other') %
566 '$$ &Local $$ &Other') %
567 (lfutil.splitstandin(orig), ahash, dhash, ohash),
567 (lfutil.splitstandin(orig), ahash, dhash, ohash),
568 0) == 1)):
568 0) == 1)):
569 repo.wwrite(fcd.path(), fco.data(), fco.flags())
569 repo.wwrite(fcd.path(), fco.data(), fco.flags())
570 return True, 0, False
570 return True, 0, False
571
571
572 def copiespathcopies(orig, ctx1, ctx2, match=None):
572 def copiespathcopies(orig, ctx1, ctx2, match=None):
573 copies = orig(ctx1, ctx2, match=match)
573 copies = orig(ctx1, ctx2, match=match)
574 updated = {}
574 updated = {}
575
575
576 for k, v in copies.iteritems():
576 for k, v in copies.iteritems():
577 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
577 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
578
578
579 return updated
579 return updated
580
580
581 # Copy first changes the matchers to match standins instead of
581 # Copy first changes the matchers to match standins instead of
582 # largefiles. Then it overrides util.copyfile in that function it
582 # largefiles. Then it overrides util.copyfile in that function it
583 # checks if the destination largefile already exists. It also keeps a
583 # checks if the destination largefile already exists. It also keeps a
584 # list of copied files so that the largefiles can be copied and the
584 # list of copied files so that the largefiles can be copied and the
585 # dirstate updated.
585 # dirstate updated.
586 def overridecopy(orig, ui, repo, pats, opts, rename=False):
586 def overridecopy(orig, ui, repo, pats, opts, rename=False):
587 # doesn't remove largefile on rename
587 # doesn't remove largefile on rename
588 if len(pats) < 2:
588 if len(pats) < 2:
589 # this isn't legal, let the original function deal with it
589 # this isn't legal, let the original function deal with it
590 return orig(ui, repo, pats, opts, rename)
590 return orig(ui, repo, pats, opts, rename)
591
591
592 # This could copy both lfiles and normal files in one command,
592 # This could copy both lfiles and normal files in one command,
593 # but we don't want to do that. First replace their matcher to
593 # but we don't want to do that. First replace their matcher to
594 # only match normal files and run it, then replace it to just
594 # only match normal files and run it, then replace it to just
595 # match largefiles and run it again.
595 # match largefiles and run it again.
596 nonormalfiles = False
596 nonormalfiles = False
597 nolfiles = False
597 nolfiles = False
598 installnormalfilesmatchfn(repo[None].manifest())
598 installnormalfilesmatchfn(repo[None].manifest())
599 try:
599 try:
600 result = orig(ui, repo, pats, opts, rename)
600 result = orig(ui, repo, pats, opts, rename)
601 except error.Abort as e:
601 except error.Abort as e:
602 if str(e) != _('no files to copy'):
602 if str(e) != _('no files to copy'):
603 raise e
603 raise e
604 else:
604 else:
605 nonormalfiles = True
605 nonormalfiles = True
606 result = 0
606 result = 0
607 finally:
607 finally:
608 restorematchfn()
608 restorematchfn()
609
609
610 # The first rename can cause our current working directory to be removed.
610 # The first rename can cause our current working directory to be removed.
611 # In that case there is nothing left to copy/rename so just quit.
611 # In that case there is nothing left to copy/rename so just quit.
612 try:
612 try:
613 repo.getcwd()
613 repo.getcwd()
614 except OSError:
614 except OSError:
615 return result
615 return result
616
616
617 def makestandin(relpath):
617 def makestandin(relpath):
618 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
618 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
619 return repo.wvfs.join(lfutil.standin(path))
619 return repo.wvfs.join(lfutil.standin(path))
620
620
621 fullpats = scmutil.expandpats(pats)
621 fullpats = scmutil.expandpats(pats)
622 dest = fullpats[-1]
622 dest = fullpats[-1]
623
623
624 if os.path.isdir(dest):
624 if os.path.isdir(dest):
625 if not os.path.isdir(makestandin(dest)):
625 if not os.path.isdir(makestandin(dest)):
626 os.makedirs(makestandin(dest))
626 os.makedirs(makestandin(dest))
627
627
628 try:
628 try:
629 # When we call orig below it creates the standins but we don't add
629 # When we call orig below it creates the standins but we don't add
630 # them to the dir state until later so lock during that time.
630 # them to the dir state until later so lock during that time.
631 wlock = repo.wlock()
631 wlock = repo.wlock()
632
632
633 manifest = repo[None].manifest()
633 manifest = repo[None].manifest()
634 def overridematch(ctx, pats=(), opts=None, globbed=False,
634 def overridematch(ctx, pats=(), opts=None, globbed=False,
635 default='relpath', badfn=None):
635 default='relpath', badfn=None):
636 if opts is None:
636 if opts is None:
637 opts = {}
637 opts = {}
638 newpats = []
638 newpats = []
639 # The patterns were previously mangled to add the standin
639 # The patterns were previously mangled to add the standin
640 # directory; we need to remove that now
640 # directory; we need to remove that now
641 for pat in pats:
641 for pat in pats:
642 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
642 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
643 newpats.append(pat.replace(lfutil.shortname, ''))
643 newpats.append(pat.replace(lfutil.shortname, ''))
644 else:
644 else:
645 newpats.append(pat)
645 newpats.append(pat)
646 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
646 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
647 m = copy.copy(match)
647 m = copy.copy(match)
648 lfile = lambda f: lfutil.standin(f) in manifest
648 lfile = lambda f: lfutil.standin(f) in manifest
649 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
649 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
650 m._fileroots = set(m._files)
650 m._fileroots = set(m._files)
651 origmatchfn = m.matchfn
651 origmatchfn = m.matchfn
652 def matchfn(f):
652 def matchfn(f):
653 lfile = lfutil.splitstandin(f)
653 lfile = lfutil.splitstandin(f)
654 return (lfile is not None and
654 return (lfile is not None and
655 (f in manifest) and
655 (f in manifest) and
656 origmatchfn(lfile) or
656 origmatchfn(lfile) or
657 None)
657 None)
658 m.matchfn = matchfn
658 m.matchfn = matchfn
659 return m
659 return m
660 oldmatch = installmatchfn(overridematch)
660 oldmatch = installmatchfn(overridematch)
661 listpats = []
661 listpats = []
662 for pat in pats:
662 for pat in pats:
663 if matchmod.patkind(pat) is not None:
663 if matchmod.patkind(pat) is not None:
664 listpats.append(pat)
664 listpats.append(pat)
665 else:
665 else:
666 listpats.append(makestandin(pat))
666 listpats.append(makestandin(pat))
667
667
668 try:
668 try:
669 origcopyfile = util.copyfile
669 origcopyfile = util.copyfile
670 copiedfiles = []
670 copiedfiles = []
671 def overridecopyfile(src, dest):
671 def overridecopyfile(src, dest):
672 if (lfutil.shortname in src and
672 if (lfutil.shortname in src and
673 dest.startswith(repo.wjoin(lfutil.shortname))):
673 dest.startswith(repo.wjoin(lfutil.shortname))):
674 destlfile = dest.replace(lfutil.shortname, '')
674 destlfile = dest.replace(lfutil.shortname, '')
675 if not opts['force'] and os.path.exists(destlfile):
675 if not opts['force'] and os.path.exists(destlfile):
676 raise IOError('',
676 raise IOError('',
677 _('destination largefile already exists'))
677 _('destination largefile already exists'))
678 copiedfiles.append((src, dest))
678 copiedfiles.append((src, dest))
679 origcopyfile(src, dest)
679 origcopyfile(src, dest)
680
680
681 util.copyfile = overridecopyfile
681 util.copyfile = overridecopyfile
682 result += orig(ui, repo, listpats, opts, rename)
682 result += orig(ui, repo, listpats, opts, rename)
683 finally:
683 finally:
684 util.copyfile = origcopyfile
684 util.copyfile = origcopyfile
685
685
686 lfdirstate = lfutil.openlfdirstate(ui, repo)
686 lfdirstate = lfutil.openlfdirstate(ui, repo)
687 for (src, dest) in copiedfiles:
687 for (src, dest) in copiedfiles:
688 if (lfutil.shortname in src and
688 if (lfutil.shortname in src and
689 dest.startswith(repo.wjoin(lfutil.shortname))):
689 dest.startswith(repo.wjoin(lfutil.shortname))):
690 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
690 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
691 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
691 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
692 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
692 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
693 if not os.path.isdir(destlfiledir):
693 if not os.path.isdir(destlfiledir):
694 os.makedirs(destlfiledir)
694 os.makedirs(destlfiledir)
695 if rename:
695 if rename:
696 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
696 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
697
697
698 # The file is gone, but this deletes any empty parent
698 # The file is gone, but this deletes any empty parent
699 # directories as a side-effect.
699 # directories as a side-effect.
700 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
700 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
701 lfdirstate.remove(srclfile)
701 lfdirstate.remove(srclfile)
702 else:
702 else:
703 util.copyfile(repo.wjoin(srclfile),
703 util.copyfile(repo.wjoin(srclfile),
704 repo.wjoin(destlfile))
704 repo.wjoin(destlfile))
705
705
706 lfdirstate.add(destlfile)
706 lfdirstate.add(destlfile)
707 lfdirstate.write()
707 lfdirstate.write()
708 except error.Abort as e:
708 except error.Abort as e:
709 if str(e) != _('no files to copy'):
709 if str(e) != _('no files to copy'):
710 raise e
710 raise e
711 else:
711 else:
712 nolfiles = True
712 nolfiles = True
713 finally:
713 finally:
714 restorematchfn()
714 restorematchfn()
715 wlock.release()
715 wlock.release()
716
716
717 if nolfiles and nonormalfiles:
717 if nolfiles and nonormalfiles:
718 raise error.Abort(_('no files to copy'))
718 raise error.Abort(_('no files to copy'))
719
719
720 return result
720 return result
721
721
722 # When the user calls revert, we have to be careful to not revert any
722 # When the user calls revert, we have to be careful to not revert any
723 # changes to other largefiles accidentally. This means we have to keep
723 # changes to other largefiles accidentally. This means we have to keep
724 # track of the largefiles that are being reverted so we only pull down
724 # track of the largefiles that are being reverted so we only pull down
725 # the necessary largefiles.
725 # the necessary largefiles.
726 #
726 #
727 # Standins are only updated (to match the hash of largefiles) before
727 # Standins are only updated (to match the hash of largefiles) before
728 # commits. Update the standins then run the original revert, changing
728 # commits. Update the standins then run the original revert, changing
729 # the matcher to hit standins instead of largefiles. Based on the
729 # the matcher to hit standins instead of largefiles. Based on the
730 # resulting standins update the largefiles.
730 # resulting standins update the largefiles.
731 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
731 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
732 # Because we put the standins in a bad state (by updating them)
732 # Because we put the standins in a bad state (by updating them)
733 # and then return them to a correct state we need to lock to
733 # and then return them to a correct state we need to lock to
734 # prevent others from changing them in their incorrect state.
734 # prevent others from changing them in their incorrect state.
735 with repo.wlock():
735 with repo.wlock():
736 lfdirstate = lfutil.openlfdirstate(ui, repo)
736 lfdirstate = lfutil.openlfdirstate(ui, repo)
737 s = lfutil.lfdirstatestatus(lfdirstate, repo)
737 s = lfutil.lfdirstatestatus(lfdirstate, repo)
738 lfdirstate.write()
738 lfdirstate.write()
739 for lfile in s.modified:
739 for lfile in s.modified:
740 lfutil.updatestandin(repo, lfutil.standin(lfile))
740 lfutil.updatestandin(repo, lfutil.standin(lfile))
741 for lfile in s.deleted:
741 for lfile in s.deleted:
742 fstandin = lfutil.standin(lfile)
742 fstandin = lfutil.standin(lfile)
743 if (repo.wvfs.exists(fstandin)):
743 if (repo.wvfs.exists(fstandin)):
744 repo.wvfs.unlink(fstandin)
744 repo.wvfs.unlink(fstandin)
745
745
746 oldstandins = lfutil.getstandinsstate(repo)
746 oldstandins = lfutil.getstandinsstate(repo)
747
747
748 def overridematch(mctx, pats=(), opts=None, globbed=False,
748 def overridematch(mctx, pats=(), opts=None, globbed=False,
749 default='relpath', badfn=None):
749 default='relpath', badfn=None):
750 if opts is None:
750 if opts is None:
751 opts = {}
751 opts = {}
752 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
752 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
753 m = copy.copy(match)
753 m = copy.copy(match)
754
754
755 # revert supports recursing into subrepos, and though largefiles
755 # revert supports recursing into subrepos, and though largefiles
756 # currently doesn't work correctly in that case, this match is
756 # currently doesn't work correctly in that case, this match is
757 # called, so the lfdirstate above may not be the correct one for
757 # called, so the lfdirstate above may not be the correct one for
758 # this invocation of match.
758 # this invocation of match.
759 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
759 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
760 False)
760 False)
761
761
762 wctx = repo[None]
762 def tostandin(f):
763 def tostandin(f):
763 standin = lfutil.standin(f)
764 standin = lfutil.standin(f)
764 if standin in ctx or standin in mctx:
765 if standin in ctx or standin in mctx:
765 return standin
766 return standin
766 elif standin in repo[None] or lfdirstate[f] == 'r':
767 elif standin in wctx or lfdirstate[f] == 'r':
767 return None
768 return None
768 return f
769 return f
769 m._files = [tostandin(f) for f in m._files]
770 m._files = [tostandin(f) for f in m._files]
770 m._files = [f for f in m._files if f is not None]
771 m._files = [f for f in m._files if f is not None]
771 m._fileroots = set(m._files)
772 m._fileroots = set(m._files)
772 origmatchfn = m.matchfn
773 origmatchfn = m.matchfn
773 def matchfn(f):
774 def matchfn(f):
774 lfile = lfutil.splitstandin(f)
775 lfile = lfutil.splitstandin(f)
775 if lfile is not None:
776 if lfile is not None:
776 return (origmatchfn(lfile) and
777 return (origmatchfn(lfile) and
777 (f in ctx or f in mctx))
778 (f in ctx or f in mctx))
778 return origmatchfn(f)
779 return origmatchfn(f)
779 m.matchfn = matchfn
780 m.matchfn = matchfn
780 return m
781 return m
781 oldmatch = installmatchfn(overridematch)
782 oldmatch = installmatchfn(overridematch)
782 try:
783 try:
783 orig(ui, repo, ctx, parents, *pats, **opts)
784 orig(ui, repo, ctx, parents, *pats, **opts)
784 finally:
785 finally:
785 restorematchfn()
786 restorematchfn()
786
787
787 newstandins = lfutil.getstandinsstate(repo)
788 newstandins = lfutil.getstandinsstate(repo)
788 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
789 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
789 # lfdirstate should be 'normallookup'-ed for updated files,
790 # lfdirstate should be 'normallookup'-ed for updated files,
790 # because reverting doesn't touch dirstate for 'normal' files
791 # because reverting doesn't touch dirstate for 'normal' files
791 # when target revision is explicitly specified: in such case,
792 # when target revision is explicitly specified: in such case,
792 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
793 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
793 # of target (standin) file.
794 # of target (standin) file.
794 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
795 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
795 normallookup=True)
796 normallookup=True)
796
797
797 # after pulling changesets, we need to take some extra care to get
798 # after pulling changesets, we need to take some extra care to get
798 # largefiles updated remotely
799 # largefiles updated remotely
799 def overridepull(orig, ui, repo, source=None, **opts):
800 def overridepull(orig, ui, repo, source=None, **opts):
800 revsprepull = len(repo)
801 revsprepull = len(repo)
801 if not source:
802 if not source:
802 source = 'default'
803 source = 'default'
803 repo.lfpullsource = source
804 repo.lfpullsource = source
804 result = orig(ui, repo, source, **opts)
805 result = orig(ui, repo, source, **opts)
805 revspostpull = len(repo)
806 revspostpull = len(repo)
806 lfrevs = opts.get('lfrev', [])
807 lfrevs = opts.get('lfrev', [])
807 if opts.get('all_largefiles'):
808 if opts.get('all_largefiles'):
808 lfrevs.append('pulled()')
809 lfrevs.append('pulled()')
809 if lfrevs and revspostpull > revsprepull:
810 if lfrevs and revspostpull > revsprepull:
810 numcached = 0
811 numcached = 0
811 repo.firstpulled = revsprepull # for pulled() revset expression
812 repo.firstpulled = revsprepull # for pulled() revset expression
812 try:
813 try:
813 for rev in scmutil.revrange(repo, lfrevs):
814 for rev in scmutil.revrange(repo, lfrevs):
814 ui.note(_('pulling largefiles for revision %s\n') % rev)
815 ui.note(_('pulling largefiles for revision %s\n') % rev)
815 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
816 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
816 numcached += len(cached)
817 numcached += len(cached)
817 finally:
818 finally:
818 del repo.firstpulled
819 del repo.firstpulled
819 ui.status(_("%d largefiles cached\n") % numcached)
820 ui.status(_("%d largefiles cached\n") % numcached)
820 return result
821 return result
821
822
822 def overridepush(orig, ui, repo, *args, **kwargs):
823 def overridepush(orig, ui, repo, *args, **kwargs):
823 """Override push command and store --lfrev parameters in opargs"""
824 """Override push command and store --lfrev parameters in opargs"""
824 lfrevs = kwargs.pop('lfrev', None)
825 lfrevs = kwargs.pop('lfrev', None)
825 if lfrevs:
826 if lfrevs:
826 opargs = kwargs.setdefault('opargs', {})
827 opargs = kwargs.setdefault('opargs', {})
827 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
828 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
828 return orig(ui, repo, *args, **kwargs)
829 return orig(ui, repo, *args, **kwargs)
829
830
830 def exchangepushoperation(orig, *args, **kwargs):
831 def exchangepushoperation(orig, *args, **kwargs):
831 """Override pushoperation constructor and store lfrevs parameter"""
832 """Override pushoperation constructor and store lfrevs parameter"""
832 lfrevs = kwargs.pop('lfrevs', None)
833 lfrevs = kwargs.pop('lfrevs', None)
833 pushop = orig(*args, **kwargs)
834 pushop = orig(*args, **kwargs)
834 pushop.lfrevs = lfrevs
835 pushop.lfrevs = lfrevs
835 return pushop
836 return pushop
836
837
837 revsetpredicate = registrar.revsetpredicate()
838 revsetpredicate = registrar.revsetpredicate()
838
839
839 @revsetpredicate('pulled()')
840 @revsetpredicate('pulled()')
840 def pulledrevsetsymbol(repo, subset, x):
841 def pulledrevsetsymbol(repo, subset, x):
841 """Changesets that just has been pulled.
842 """Changesets that just has been pulled.
842
843
843 Only available with largefiles from pull --lfrev expressions.
844 Only available with largefiles from pull --lfrev expressions.
844
845
845 .. container:: verbose
846 .. container:: verbose
846
847
847 Some examples:
848 Some examples:
848
849
849 - pull largefiles for all new changesets::
850 - pull largefiles for all new changesets::
850
851
851 hg pull -lfrev "pulled()"
852 hg pull -lfrev "pulled()"
852
853
853 - pull largefiles for all new branch heads::
854 - pull largefiles for all new branch heads::
854
855
855 hg pull -lfrev "head(pulled()) and not closed()"
856 hg pull -lfrev "head(pulled()) and not closed()"
856
857
857 """
858 """
858
859
859 try:
860 try:
860 firstpulled = repo.firstpulled
861 firstpulled = repo.firstpulled
861 except AttributeError:
862 except AttributeError:
862 raise error.Abort(_("pulled() only available in --lfrev"))
863 raise error.Abort(_("pulled() only available in --lfrev"))
863 return smartset.baseset([r for r in subset if r >= firstpulled])
864 return smartset.baseset([r for r in subset if r >= firstpulled])
864
865
865 def overrideclone(orig, ui, source, dest=None, **opts):
866 def overrideclone(orig, ui, source, dest=None, **opts):
866 d = dest
867 d = dest
867 if d is None:
868 if d is None:
868 d = hg.defaultdest(source)
869 d = hg.defaultdest(source)
869 if opts.get('all_largefiles') and not hg.islocal(d):
870 if opts.get('all_largefiles') and not hg.islocal(d):
870 raise error.Abort(_(
871 raise error.Abort(_(
871 '--all-largefiles is incompatible with non-local destination %s') %
872 '--all-largefiles is incompatible with non-local destination %s') %
872 d)
873 d)
873
874
874 return orig(ui, source, dest, **opts)
875 return orig(ui, source, dest, **opts)
875
876
876 def hgclone(orig, ui, opts, *args, **kwargs):
877 def hgclone(orig, ui, opts, *args, **kwargs):
877 result = orig(ui, opts, *args, **kwargs)
878 result = orig(ui, opts, *args, **kwargs)
878
879
879 if result is not None:
880 if result is not None:
880 sourcerepo, destrepo = result
881 sourcerepo, destrepo = result
881 repo = destrepo.local()
882 repo = destrepo.local()
882
883
883 # When cloning to a remote repo (like through SSH), no repo is available
884 # When cloning to a remote repo (like through SSH), no repo is available
884 # from the peer. Therefore the largefiles can't be downloaded and the
885 # from the peer. Therefore the largefiles can't be downloaded and the
885 # hgrc can't be updated.
886 # hgrc can't be updated.
886 if not repo:
887 if not repo:
887 return result
888 return result
888
889
889 # If largefiles is required for this repo, permanently enable it locally
890 # If largefiles is required for this repo, permanently enable it locally
890 if 'largefiles' in repo.requirements:
891 if 'largefiles' in repo.requirements:
891 with repo.vfs('hgrc', 'a', text=True) as fp:
892 with repo.vfs('hgrc', 'a', text=True) as fp:
892 fp.write('\n[extensions]\nlargefiles=\n')
893 fp.write('\n[extensions]\nlargefiles=\n')
893
894
894 # Caching is implicitly limited to 'rev' option, since the dest repo was
895 # Caching is implicitly limited to 'rev' option, since the dest repo was
895 # truncated at that point. The user may expect a download count with
896 # truncated at that point. The user may expect a download count with
896 # this option, so attempt whether or not this is a largefile repo.
897 # this option, so attempt whether or not this is a largefile repo.
897 if opts.get('all_largefiles'):
898 if opts.get('all_largefiles'):
898 success, missing = lfcommands.downloadlfiles(ui, repo, None)
899 success, missing = lfcommands.downloadlfiles(ui, repo, None)
899
900
900 if missing != 0:
901 if missing != 0:
901 return None
902 return None
902
903
903 return result
904 return result
904
905
905 def overriderebase(orig, ui, repo, **opts):
906 def overriderebase(orig, ui, repo, **opts):
906 if not util.safehasattr(repo, '_largefilesenabled'):
907 if not util.safehasattr(repo, '_largefilesenabled'):
907 return orig(ui, repo, **opts)
908 return orig(ui, repo, **opts)
908
909
909 resuming = opts.get('continue')
910 resuming = opts.get('continue')
910 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
911 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
911 repo._lfstatuswriters.append(lambda *msg, **opts: None)
912 repo._lfstatuswriters.append(lambda *msg, **opts: None)
912 try:
913 try:
913 return orig(ui, repo, **opts)
914 return orig(ui, repo, **opts)
914 finally:
915 finally:
915 repo._lfstatuswriters.pop()
916 repo._lfstatuswriters.pop()
916 repo._lfcommithooks.pop()
917 repo._lfcommithooks.pop()
917
918
918 def overridearchivecmd(orig, ui, repo, dest, **opts):
919 def overridearchivecmd(orig, ui, repo, dest, **opts):
919 repo.unfiltered().lfstatus = True
920 repo.unfiltered().lfstatus = True
920
921
921 try:
922 try:
922 return orig(ui, repo.unfiltered(), dest, **opts)
923 return orig(ui, repo.unfiltered(), dest, **opts)
923 finally:
924 finally:
924 repo.unfiltered().lfstatus = False
925 repo.unfiltered().lfstatus = False
925
926
926 def hgwebarchive(orig, web, req, tmpl):
927 def hgwebarchive(orig, web, req, tmpl):
927 web.repo.lfstatus = True
928 web.repo.lfstatus = True
928
929
929 try:
930 try:
930 return orig(web, req, tmpl)
931 return orig(web, req, tmpl)
931 finally:
932 finally:
932 web.repo.lfstatus = False
933 web.repo.lfstatus = False
933
934
934 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
935 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
935 prefix='', mtime=None, subrepos=None):
936 prefix='', mtime=None, subrepos=None):
936 # For some reason setting repo.lfstatus in hgwebarchive only changes the
937 # For some reason setting repo.lfstatus in hgwebarchive only changes the
937 # unfiltered repo's attr, so check that as well.
938 # unfiltered repo's attr, so check that as well.
938 if not repo.lfstatus and not repo.unfiltered().lfstatus:
939 if not repo.lfstatus and not repo.unfiltered().lfstatus:
939 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
940 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
940 subrepos)
941 subrepos)
941
942
942 # No need to lock because we are only reading history and
943 # No need to lock because we are only reading history and
943 # largefile caches, neither of which are modified.
944 # largefile caches, neither of which are modified.
944 if node is not None:
945 if node is not None:
945 lfcommands.cachelfiles(repo.ui, repo, node)
946 lfcommands.cachelfiles(repo.ui, repo, node)
946
947
947 if kind not in archival.archivers:
948 if kind not in archival.archivers:
948 raise error.Abort(_("unknown archive type '%s'") % kind)
949 raise error.Abort(_("unknown archive type '%s'") % kind)
949
950
950 ctx = repo[node]
951 ctx = repo[node]
951
952
952 if kind == 'files':
953 if kind == 'files':
953 if prefix:
954 if prefix:
954 raise error.Abort(
955 raise error.Abort(
955 _('cannot give prefix when archiving to files'))
956 _('cannot give prefix when archiving to files'))
956 else:
957 else:
957 prefix = archival.tidyprefix(dest, kind, prefix)
958 prefix = archival.tidyprefix(dest, kind, prefix)
958
959
959 def write(name, mode, islink, getdata):
960 def write(name, mode, islink, getdata):
960 if matchfn and not matchfn(name):
961 if matchfn and not matchfn(name):
961 return
962 return
962 data = getdata()
963 data = getdata()
963 if decode:
964 if decode:
964 data = repo.wwritedata(name, data)
965 data = repo.wwritedata(name, data)
965 archiver.addfile(prefix + name, mode, islink, data)
966 archiver.addfile(prefix + name, mode, islink, data)
966
967
967 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
968 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
968
969
969 if repo.ui.configbool("ui", "archivemeta", True):
970 if repo.ui.configbool("ui", "archivemeta", True):
970 write('.hg_archival.txt', 0o644, False,
971 write('.hg_archival.txt', 0o644, False,
971 lambda: archival.buildmetadata(ctx))
972 lambda: archival.buildmetadata(ctx))
972
973
973 for f in ctx:
974 for f in ctx:
974 ff = ctx.flags(f)
975 ff = ctx.flags(f)
975 getdata = ctx[f].data
976 getdata = ctx[f].data
976 lfile = lfutil.splitstandin(f)
977 lfile = lfutil.splitstandin(f)
977 if lfile is not None:
978 if lfile is not None:
978 if node is not None:
979 if node is not None:
979 path = lfutil.findfile(repo, getdata().strip())
980 path = lfutil.findfile(repo, getdata().strip())
980
981
981 if path is None:
982 if path is None:
982 raise error.Abort(
983 raise error.Abort(
983 _('largefile %s not found in repo store or system cache')
984 _('largefile %s not found in repo store or system cache')
984 % lfile)
985 % lfile)
985 else:
986 else:
986 path = lfile
987 path = lfile
987
988
988 f = lfile
989 f = lfile
989
990
990 getdata = lambda: util.readfile(path)
991 getdata = lambda: util.readfile(path)
991 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
992 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
992
993
993 if subrepos:
994 if subrepos:
994 for subpath in sorted(ctx.substate):
995 for subpath in sorted(ctx.substate):
995 sub = ctx.workingsub(subpath)
996 sub = ctx.workingsub(subpath)
996 submatch = matchmod.subdirmatcher(subpath, matchfn)
997 submatch = matchmod.subdirmatcher(subpath, matchfn)
997 sub._repo.lfstatus = True
998 sub._repo.lfstatus = True
998 sub.archive(archiver, prefix, submatch)
999 sub.archive(archiver, prefix, submatch)
999
1000
1000 archiver.done()
1001 archiver.done()
1001
1002
1002 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1003 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1003 if not repo._repo.lfstatus:
1004 if not repo._repo.lfstatus:
1004 return orig(repo, archiver, prefix, match, decode)
1005 return orig(repo, archiver, prefix, match, decode)
1005
1006
1006 repo._get(repo._state + ('hg',))
1007 repo._get(repo._state + ('hg',))
1007 rev = repo._state[1]
1008 rev = repo._state[1]
1008 ctx = repo._repo[rev]
1009 ctx = repo._repo[rev]
1009
1010
1010 if ctx.node() is not None:
1011 if ctx.node() is not None:
1011 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1012 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1012
1013
1013 def write(name, mode, islink, getdata):
1014 def write(name, mode, islink, getdata):
1014 # At this point, the standin has been replaced with the largefile name,
1015 # At this point, the standin has been replaced with the largefile name,
1015 # so the normal matcher works here without the lfutil variants.
1016 # so the normal matcher works here without the lfutil variants.
1016 if match and not match(f):
1017 if match and not match(f):
1017 return
1018 return
1018 data = getdata()
1019 data = getdata()
1019 if decode:
1020 if decode:
1020 data = repo._repo.wwritedata(name, data)
1021 data = repo._repo.wwritedata(name, data)
1021
1022
1022 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1023 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1023
1024
1024 for f in ctx:
1025 for f in ctx:
1025 ff = ctx.flags(f)
1026 ff = ctx.flags(f)
1026 getdata = ctx[f].data
1027 getdata = ctx[f].data
1027 lfile = lfutil.splitstandin(f)
1028 lfile = lfutil.splitstandin(f)
1028 if lfile is not None:
1029 if lfile is not None:
1029 if ctx.node() is not None:
1030 if ctx.node() is not None:
1030 path = lfutil.findfile(repo._repo, getdata().strip())
1031 path = lfutil.findfile(repo._repo, getdata().strip())
1031
1032
1032 if path is None:
1033 if path is None:
1033 raise error.Abort(
1034 raise error.Abort(
1034 _('largefile %s not found in repo store or system cache')
1035 _('largefile %s not found in repo store or system cache')
1035 % lfile)
1036 % lfile)
1036 else:
1037 else:
1037 path = lfile
1038 path = lfile
1038
1039
1039 f = lfile
1040 f = lfile
1040
1041
1041 getdata = lambda: util.readfile(os.path.join(prefix, path))
1042 getdata = lambda: util.readfile(os.path.join(prefix, path))
1042
1043
1043 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1044 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1044
1045
1045 for subpath in sorted(ctx.substate):
1046 for subpath in sorted(ctx.substate):
1046 sub = ctx.workingsub(subpath)
1047 sub = ctx.workingsub(subpath)
1047 submatch = matchmod.subdirmatcher(subpath, match)
1048 submatch = matchmod.subdirmatcher(subpath, match)
1048 sub._repo.lfstatus = True
1049 sub._repo.lfstatus = True
1049 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1050 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1050
1051
1051 # If a largefile is modified, the change is not reflected in its
1052 # If a largefile is modified, the change is not reflected in its
1052 # standin until a commit. cmdutil.bailifchanged() raises an exception
1053 # standin until a commit. cmdutil.bailifchanged() raises an exception
1053 # if the repo has uncommitted changes. Wrap it to also check if
1054 # if the repo has uncommitted changes. Wrap it to also check if
1054 # largefiles were changed. This is used by bisect, backout and fetch.
1055 # largefiles were changed. This is used by bisect, backout and fetch.
1055 def overridebailifchanged(orig, repo, *args, **kwargs):
1056 def overridebailifchanged(orig, repo, *args, **kwargs):
1056 orig(repo, *args, **kwargs)
1057 orig(repo, *args, **kwargs)
1057 repo.lfstatus = True
1058 repo.lfstatus = True
1058 s = repo.status()
1059 s = repo.status()
1059 repo.lfstatus = False
1060 repo.lfstatus = False
1060 if s.modified or s.added or s.removed or s.deleted:
1061 if s.modified or s.added or s.removed or s.deleted:
1061 raise error.Abort(_('uncommitted changes'))
1062 raise error.Abort(_('uncommitted changes'))
1062
1063
1063 def postcommitstatus(orig, repo, *args, **kwargs):
1064 def postcommitstatus(orig, repo, *args, **kwargs):
1064 repo.lfstatus = True
1065 repo.lfstatus = True
1065 try:
1066 try:
1066 return orig(repo, *args, **kwargs)
1067 return orig(repo, *args, **kwargs)
1067 finally:
1068 finally:
1068 repo.lfstatus = False
1069 repo.lfstatus = False
1069
1070
1070 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1071 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1071 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1072 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1072 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1073 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1073 m = composelargefilematcher(match, repo[None].manifest())
1074 m = composelargefilematcher(match, repo[None].manifest())
1074
1075
1075 try:
1076 try:
1076 repo.lfstatus = True
1077 repo.lfstatus = True
1077 s = repo.status(match=m, clean=True)
1078 s = repo.status(match=m, clean=True)
1078 finally:
1079 finally:
1079 repo.lfstatus = False
1080 repo.lfstatus = False
1081 manifest = repo[None].manifest()
1080 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1082 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1081 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1083 forget = [f for f in forget if lfutil.standin(f) in manifest]
1082
1084
1083 for f in forget:
1085 for f in forget:
1084 fstandin = lfutil.standin(f)
1086 fstandin = lfutil.standin(f)
1085 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1087 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1086 ui.warn(_('not removing %s: file is already untracked\n')
1088 ui.warn(_('not removing %s: file is already untracked\n')
1087 % m.rel(f))
1089 % m.rel(f))
1088 bad.append(f)
1090 bad.append(f)
1089
1091
1090 for f in forget:
1092 for f in forget:
1091 if ui.verbose or not m.exact(f):
1093 if ui.verbose or not m.exact(f):
1092 ui.status(_('removing %s\n') % m.rel(f))
1094 ui.status(_('removing %s\n') % m.rel(f))
1093
1095
1094 # Need to lock because standin files are deleted then removed from the
1096 # Need to lock because standin files are deleted then removed from the
1095 # repository and we could race in-between.
1097 # repository and we could race in-between.
1096 with repo.wlock():
1098 with repo.wlock():
1097 lfdirstate = lfutil.openlfdirstate(ui, repo)
1099 lfdirstate = lfutil.openlfdirstate(ui, repo)
1098 for f in forget:
1100 for f in forget:
1099 if lfdirstate[f] == 'a':
1101 if lfdirstate[f] == 'a':
1100 lfdirstate.drop(f)
1102 lfdirstate.drop(f)
1101 else:
1103 else:
1102 lfdirstate.remove(f)
1104 lfdirstate.remove(f)
1103 lfdirstate.write()
1105 lfdirstate.write()
1104 standins = [lfutil.standin(f) for f in forget]
1106 standins = [lfutil.standin(f) for f in forget]
1105 for f in standins:
1107 for f in standins:
1106 repo.wvfs.unlinkpath(f, ignoremissing=True)
1108 repo.wvfs.unlinkpath(f, ignoremissing=True)
1107 rejected = repo[None].forget(standins)
1109 rejected = repo[None].forget(standins)
1108
1110
1109 bad.extend(f for f in rejected if f in m.files())
1111 bad.extend(f for f in rejected if f in m.files())
1110 forgot.extend(f for f in forget if f not in rejected)
1112 forgot.extend(f for f in forget if f not in rejected)
1111 return bad, forgot
1113 return bad, forgot
1112
1114
1113 def _getoutgoings(repo, other, missing, addfunc):
1115 def _getoutgoings(repo, other, missing, addfunc):
1114 """get pairs of filename and largefile hash in outgoing revisions
1116 """get pairs of filename and largefile hash in outgoing revisions
1115 in 'missing'.
1117 in 'missing'.
1116
1118
1117 largefiles already existing on 'other' repository are ignored.
1119 largefiles already existing on 'other' repository are ignored.
1118
1120
1119 'addfunc' is invoked with each unique pairs of filename and
1121 'addfunc' is invoked with each unique pairs of filename and
1120 largefile hash value.
1122 largefile hash value.
1121 """
1123 """
1122 knowns = set()
1124 knowns = set()
1123 lfhashes = set()
1125 lfhashes = set()
1124 def dedup(fn, lfhash):
1126 def dedup(fn, lfhash):
1125 k = (fn, lfhash)
1127 k = (fn, lfhash)
1126 if k not in knowns:
1128 if k not in knowns:
1127 knowns.add(k)
1129 knowns.add(k)
1128 lfhashes.add(lfhash)
1130 lfhashes.add(lfhash)
1129 lfutil.getlfilestoupload(repo, missing, dedup)
1131 lfutil.getlfilestoupload(repo, missing, dedup)
1130 if lfhashes:
1132 if lfhashes:
1131 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1133 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1132 for fn, lfhash in knowns:
1134 for fn, lfhash in knowns:
1133 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1135 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1134 addfunc(fn, lfhash)
1136 addfunc(fn, lfhash)
1135
1137
1136 def outgoinghook(ui, repo, other, opts, missing):
1138 def outgoinghook(ui, repo, other, opts, missing):
1137 if opts.pop('large', None):
1139 if opts.pop('large', None):
1138 lfhashes = set()
1140 lfhashes = set()
1139 if ui.debugflag:
1141 if ui.debugflag:
1140 toupload = {}
1142 toupload = {}
1141 def addfunc(fn, lfhash):
1143 def addfunc(fn, lfhash):
1142 if fn not in toupload:
1144 if fn not in toupload:
1143 toupload[fn] = []
1145 toupload[fn] = []
1144 toupload[fn].append(lfhash)
1146 toupload[fn].append(lfhash)
1145 lfhashes.add(lfhash)
1147 lfhashes.add(lfhash)
1146 def showhashes(fn):
1148 def showhashes(fn):
1147 for lfhash in sorted(toupload[fn]):
1149 for lfhash in sorted(toupload[fn]):
1148 ui.debug(' %s\n' % (lfhash))
1150 ui.debug(' %s\n' % (lfhash))
1149 else:
1151 else:
1150 toupload = set()
1152 toupload = set()
1151 def addfunc(fn, lfhash):
1153 def addfunc(fn, lfhash):
1152 toupload.add(fn)
1154 toupload.add(fn)
1153 lfhashes.add(lfhash)
1155 lfhashes.add(lfhash)
1154 def showhashes(fn):
1156 def showhashes(fn):
1155 pass
1157 pass
1156 _getoutgoings(repo, other, missing, addfunc)
1158 _getoutgoings(repo, other, missing, addfunc)
1157
1159
1158 if not toupload:
1160 if not toupload:
1159 ui.status(_('largefiles: no files to upload\n'))
1161 ui.status(_('largefiles: no files to upload\n'))
1160 else:
1162 else:
1161 ui.status(_('largefiles to upload (%d entities):\n')
1163 ui.status(_('largefiles to upload (%d entities):\n')
1162 % (len(lfhashes)))
1164 % (len(lfhashes)))
1163 for file in sorted(toupload):
1165 for file in sorted(toupload):
1164 ui.status(lfutil.splitstandin(file) + '\n')
1166 ui.status(lfutil.splitstandin(file) + '\n')
1165 showhashes(file)
1167 showhashes(file)
1166 ui.status('\n')
1168 ui.status('\n')
1167
1169
1168 def summaryremotehook(ui, repo, opts, changes):
1170 def summaryremotehook(ui, repo, opts, changes):
1169 largeopt = opts.get('large', False)
1171 largeopt = opts.get('large', False)
1170 if changes is None:
1172 if changes is None:
1171 if largeopt:
1173 if largeopt:
1172 return (False, True) # only outgoing check is needed
1174 return (False, True) # only outgoing check is needed
1173 else:
1175 else:
1174 return (False, False)
1176 return (False, False)
1175 elif largeopt:
1177 elif largeopt:
1176 url, branch, peer, outgoing = changes[1]
1178 url, branch, peer, outgoing = changes[1]
1177 if peer is None:
1179 if peer is None:
1178 # i18n: column positioning for "hg summary"
1180 # i18n: column positioning for "hg summary"
1179 ui.status(_('largefiles: (no remote repo)\n'))
1181 ui.status(_('largefiles: (no remote repo)\n'))
1180 return
1182 return
1181
1183
1182 toupload = set()
1184 toupload = set()
1183 lfhashes = set()
1185 lfhashes = set()
1184 def addfunc(fn, lfhash):
1186 def addfunc(fn, lfhash):
1185 toupload.add(fn)
1187 toupload.add(fn)
1186 lfhashes.add(lfhash)
1188 lfhashes.add(lfhash)
1187 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1189 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1188
1190
1189 if not toupload:
1191 if not toupload:
1190 # i18n: column positioning for "hg summary"
1192 # i18n: column positioning for "hg summary"
1191 ui.status(_('largefiles: (no files to upload)\n'))
1193 ui.status(_('largefiles: (no files to upload)\n'))
1192 else:
1194 else:
1193 # i18n: column positioning for "hg summary"
1195 # i18n: column positioning for "hg summary"
1194 ui.status(_('largefiles: %d entities for %d files to upload\n')
1196 ui.status(_('largefiles: %d entities for %d files to upload\n')
1195 % (len(lfhashes), len(toupload)))
1197 % (len(lfhashes), len(toupload)))
1196
1198
1197 def overridesummary(orig, ui, repo, *pats, **opts):
1199 def overridesummary(orig, ui, repo, *pats, **opts):
1198 try:
1200 try:
1199 repo.lfstatus = True
1201 repo.lfstatus = True
1200 orig(ui, repo, *pats, **opts)
1202 orig(ui, repo, *pats, **opts)
1201 finally:
1203 finally:
1202 repo.lfstatus = False
1204 repo.lfstatus = False
1203
1205
1204 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1206 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1205 similarity=None):
1207 similarity=None):
1206 if opts is None:
1208 if opts is None:
1207 opts = {}
1209 opts = {}
1208 if not lfutil.islfilesrepo(repo):
1210 if not lfutil.islfilesrepo(repo):
1209 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1211 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1210 # Get the list of missing largefiles so we can remove them
1212 # Get the list of missing largefiles so we can remove them
1211 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1213 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1212 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1214 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1213 False, False, False)
1215 False, False, False)
1214
1216
1215 # Call into the normal remove code, but the removing of the standin, we want
1217 # Call into the normal remove code, but the removing of the standin, we want
1216 # to have handled by original addremove. Monkey patching here makes sure
1218 # to have handled by original addremove. Monkey patching here makes sure
1217 # we don't remove the standin in the largefiles code, preventing a very
1219 # we don't remove the standin in the largefiles code, preventing a very
1218 # confused state later.
1220 # confused state later.
1219 if s.deleted:
1221 if s.deleted:
1220 m = copy.copy(matcher)
1222 m = copy.copy(matcher)
1221
1223
1222 # The m._files and m._map attributes are not changed to the deleted list
1224 # The m._files and m._map attributes are not changed to the deleted list
1223 # because that affects the m.exact() test, which in turn governs whether
1225 # because that affects the m.exact() test, which in turn governs whether
1224 # or not the file name is printed, and how. Simply limit the original
1226 # or not the file name is printed, and how. Simply limit the original
1225 # matches to those in the deleted status list.
1227 # matches to those in the deleted status list.
1226 matchfn = m.matchfn
1228 matchfn = m.matchfn
1227 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1229 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1228
1230
1229 removelargefiles(repo.ui, repo, True, m, **opts)
1231 removelargefiles(repo.ui, repo, True, m, **opts)
1230 # Call into the normal add code, and any files that *should* be added as
1232 # Call into the normal add code, and any files that *should* be added as
1231 # largefiles will be
1233 # largefiles will be
1232 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1234 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1233 # Now that we've handled largefiles, hand off to the original addremove
1235 # Now that we've handled largefiles, hand off to the original addremove
1234 # function to take care of the rest. Make sure it doesn't do anything with
1236 # function to take care of the rest. Make sure it doesn't do anything with
1235 # largefiles by passing a matcher that will ignore them.
1237 # largefiles by passing a matcher that will ignore them.
1236 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1238 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1237 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1239 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1238
1240
1239 # Calling purge with --all will cause the largefiles to be deleted.
1241 # Calling purge with --all will cause the largefiles to be deleted.
1240 # Override repo.status to prevent this from happening.
1242 # Override repo.status to prevent this from happening.
1241 def overridepurge(orig, ui, repo, *dirs, **opts):
1243 def overridepurge(orig, ui, repo, *dirs, **opts):
1242 # XXX Monkey patching a repoview will not work. The assigned attribute will
1244 # XXX Monkey patching a repoview will not work. The assigned attribute will
1243 # be set on the unfiltered repo, but we will only lookup attributes in the
1245 # be set on the unfiltered repo, but we will only lookup attributes in the
1244 # unfiltered repo if the lookup in the repoview object itself fails. As the
1246 # unfiltered repo if the lookup in the repoview object itself fails. As the
1245 # monkey patched method exists on the repoview class the lookup will not
1247 # monkey patched method exists on the repoview class the lookup will not
1246 # fail. As a result, the original version will shadow the monkey patched
1248 # fail. As a result, the original version will shadow the monkey patched
1247 # one, defeating the monkey patch.
1249 # one, defeating the monkey patch.
1248 #
1250 #
1249 # As a work around we use an unfiltered repo here. We should do something
1251 # As a work around we use an unfiltered repo here. We should do something
1250 # cleaner instead.
1252 # cleaner instead.
1251 repo = repo.unfiltered()
1253 repo = repo.unfiltered()
1252 oldstatus = repo.status
1254 oldstatus = repo.status
1253 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1255 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1254 clean=False, unknown=False, listsubrepos=False):
1256 clean=False, unknown=False, listsubrepos=False):
1255 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1257 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1256 listsubrepos)
1258 listsubrepos)
1257 lfdirstate = lfutil.openlfdirstate(ui, repo)
1259 lfdirstate = lfutil.openlfdirstate(ui, repo)
1258 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1260 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1259 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1261 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1260 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1262 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1261 unknown, ignored, r.clean)
1263 unknown, ignored, r.clean)
1262 repo.status = overridestatus
1264 repo.status = overridestatus
1263 orig(ui, repo, *dirs, **opts)
1265 orig(ui, repo, *dirs, **opts)
1264 repo.status = oldstatus
1266 repo.status = oldstatus
1265 def overriderollback(orig, ui, repo, **opts):
1267 def overriderollback(orig, ui, repo, **opts):
1266 with repo.wlock():
1268 with repo.wlock():
1267 before = repo.dirstate.parents()
1269 before = repo.dirstate.parents()
1268 orphans = set(f for f in repo.dirstate
1270 orphans = set(f for f in repo.dirstate
1269 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1271 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1270 result = orig(ui, repo, **opts)
1272 result = orig(ui, repo, **opts)
1271 after = repo.dirstate.parents()
1273 after = repo.dirstate.parents()
1272 if before == after:
1274 if before == after:
1273 return result # no need to restore standins
1275 return result # no need to restore standins
1274
1276
1275 pctx = repo['.']
1277 pctx = repo['.']
1276 for f in repo.dirstate:
1278 for f in repo.dirstate:
1277 if lfutil.isstandin(f):
1279 if lfutil.isstandin(f):
1278 orphans.discard(f)
1280 orphans.discard(f)
1279 if repo.dirstate[f] == 'r':
1281 if repo.dirstate[f] == 'r':
1280 repo.wvfs.unlinkpath(f, ignoremissing=True)
1282 repo.wvfs.unlinkpath(f, ignoremissing=True)
1281 elif f in pctx:
1283 elif f in pctx:
1282 fctx = pctx[f]
1284 fctx = pctx[f]
1283 repo.wwrite(f, fctx.data(), fctx.flags())
1285 repo.wwrite(f, fctx.data(), fctx.flags())
1284 else:
1286 else:
1285 # content of standin is not so important in 'a',
1287 # content of standin is not so important in 'a',
1286 # 'm' or 'n' (coming from the 2nd parent) cases
1288 # 'm' or 'n' (coming from the 2nd parent) cases
1287 lfutil.writestandin(repo, f, '', False)
1289 lfutil.writestandin(repo, f, '', False)
1288 for standin in orphans:
1290 for standin in orphans:
1289 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1291 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1290
1292
1291 lfdirstate = lfutil.openlfdirstate(ui, repo)
1293 lfdirstate = lfutil.openlfdirstate(ui, repo)
1292 orphans = set(lfdirstate)
1294 orphans = set(lfdirstate)
1293 lfiles = lfutil.listlfiles(repo)
1295 lfiles = lfutil.listlfiles(repo)
1294 for file in lfiles:
1296 for file in lfiles:
1295 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1297 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1296 orphans.discard(file)
1298 orphans.discard(file)
1297 for lfile in orphans:
1299 for lfile in orphans:
1298 lfdirstate.drop(lfile)
1300 lfdirstate.drop(lfile)
1299 lfdirstate.write()
1301 lfdirstate.write()
1300 return result
1302 return result
1301
1303
1302 def overridetransplant(orig, ui, repo, *revs, **opts):
1304 def overridetransplant(orig, ui, repo, *revs, **opts):
1303 resuming = opts.get('continue')
1305 resuming = opts.get('continue')
1304 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1306 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1305 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1307 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1306 try:
1308 try:
1307 result = orig(ui, repo, *revs, **opts)
1309 result = orig(ui, repo, *revs, **opts)
1308 finally:
1310 finally:
1309 repo._lfstatuswriters.pop()
1311 repo._lfstatuswriters.pop()
1310 repo._lfcommithooks.pop()
1312 repo._lfcommithooks.pop()
1311 return result
1313 return result
1312
1314
1313 def overridecat(orig, ui, repo, file1, *pats, **opts):
1315 def overridecat(orig, ui, repo, file1, *pats, **opts):
1314 ctx = scmutil.revsingle(repo, opts.get('rev'))
1316 ctx = scmutil.revsingle(repo, opts.get('rev'))
1315 err = 1
1317 err = 1
1316 notbad = set()
1318 notbad = set()
1317 m = scmutil.match(ctx, (file1,) + pats, opts)
1319 m = scmutil.match(ctx, (file1,) + pats, opts)
1318 origmatchfn = m.matchfn
1320 origmatchfn = m.matchfn
1319 def lfmatchfn(f):
1321 def lfmatchfn(f):
1320 if origmatchfn(f):
1322 if origmatchfn(f):
1321 return True
1323 return True
1322 lf = lfutil.splitstandin(f)
1324 lf = lfutil.splitstandin(f)
1323 if lf is None:
1325 if lf is None:
1324 return False
1326 return False
1325 notbad.add(lf)
1327 notbad.add(lf)
1326 return origmatchfn(lf)
1328 return origmatchfn(lf)
1327 m.matchfn = lfmatchfn
1329 m.matchfn = lfmatchfn
1328 origbadfn = m.bad
1330 origbadfn = m.bad
1329 def lfbadfn(f, msg):
1331 def lfbadfn(f, msg):
1330 if not f in notbad:
1332 if not f in notbad:
1331 origbadfn(f, msg)
1333 origbadfn(f, msg)
1332 m.bad = lfbadfn
1334 m.bad = lfbadfn
1333
1335
1334 origvisitdirfn = m.visitdir
1336 origvisitdirfn = m.visitdir
1335 def lfvisitdirfn(dir):
1337 def lfvisitdirfn(dir):
1336 if dir == lfutil.shortname:
1338 if dir == lfutil.shortname:
1337 return True
1339 return True
1338 ret = origvisitdirfn(dir)
1340 ret = origvisitdirfn(dir)
1339 if ret:
1341 if ret:
1340 return ret
1342 return ret
1341 lf = lfutil.splitstandin(dir)
1343 lf = lfutil.splitstandin(dir)
1342 if lf is None:
1344 if lf is None:
1343 return False
1345 return False
1344 return origvisitdirfn(lf)
1346 return origvisitdirfn(lf)
1345 m.visitdir = lfvisitdirfn
1347 m.visitdir = lfvisitdirfn
1346
1348
1347 for f in ctx.walk(m):
1349 for f in ctx.walk(m):
1348 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1350 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1349 pathname=f) as fp:
1351 pathname=f) as fp:
1350 lf = lfutil.splitstandin(f)
1352 lf = lfutil.splitstandin(f)
1351 if lf is None or origmatchfn(f):
1353 if lf is None or origmatchfn(f):
1352 # duplicating unreachable code from commands.cat
1354 # duplicating unreachable code from commands.cat
1353 data = ctx[f].data()
1355 data = ctx[f].data()
1354 if opts.get('decode'):
1356 if opts.get('decode'):
1355 data = repo.wwritedata(f, data)
1357 data = repo.wwritedata(f, data)
1356 fp.write(data)
1358 fp.write(data)
1357 else:
1359 else:
1358 hash = lfutil.readstandin(repo, lf, ctx)
1360 hash = lfutil.readstandin(repo, lf, ctx)
1359 if not lfutil.inusercache(repo.ui, hash):
1361 if not lfutil.inusercache(repo.ui, hash):
1360 store = storefactory.openstore(repo)
1362 store = storefactory.openstore(repo)
1361 success, missing = store.get([(lf, hash)])
1363 success, missing = store.get([(lf, hash)])
1362 if len(success) != 1:
1364 if len(success) != 1:
1363 raise error.Abort(
1365 raise error.Abort(
1364 _('largefile %s is not in cache and could not be '
1366 _('largefile %s is not in cache and could not be '
1365 'downloaded') % lf)
1367 'downloaded') % lf)
1366 path = lfutil.usercachepath(repo.ui, hash)
1368 path = lfutil.usercachepath(repo.ui, hash)
1367 with open(path, "rb") as fpin:
1369 with open(path, "rb") as fpin:
1368 for chunk in util.filechunkiter(fpin):
1370 for chunk in util.filechunkiter(fpin):
1369 fp.write(chunk)
1371 fp.write(chunk)
1370 err = 0
1372 err = 0
1371 return err
1373 return err
1372
1374
1373 def mergeupdate(orig, repo, node, branchmerge, force,
1375 def mergeupdate(orig, repo, node, branchmerge, force,
1374 *args, **kwargs):
1376 *args, **kwargs):
1375 matcher = kwargs.get('matcher', None)
1377 matcher = kwargs.get('matcher', None)
1376 # note if this is a partial update
1378 # note if this is a partial update
1377 partial = matcher and not matcher.always()
1379 partial = matcher and not matcher.always()
1378 with repo.wlock():
1380 with repo.wlock():
1379 # branch | | |
1381 # branch | | |
1380 # merge | force | partial | action
1382 # merge | force | partial | action
1381 # -------+-------+---------+--------------
1383 # -------+-------+---------+--------------
1382 # x | x | x | linear-merge
1384 # x | x | x | linear-merge
1383 # o | x | x | branch-merge
1385 # o | x | x | branch-merge
1384 # x | o | x | overwrite (as clean update)
1386 # x | o | x | overwrite (as clean update)
1385 # o | o | x | force-branch-merge (*1)
1387 # o | o | x | force-branch-merge (*1)
1386 # x | x | o | (*)
1388 # x | x | o | (*)
1387 # o | x | o | (*)
1389 # o | x | o | (*)
1388 # x | o | o | overwrite (as revert)
1390 # x | o | o | overwrite (as revert)
1389 # o | o | o | (*)
1391 # o | o | o | (*)
1390 #
1392 #
1391 # (*) don't care
1393 # (*) don't care
1392 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1394 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1393
1395
1394 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1396 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1395 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1397 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1396 repo.getcwd()),
1398 repo.getcwd()),
1397 [], False, True, False)
1399 [], False, True, False)
1398 oldclean = set(s.clean)
1400 oldclean = set(s.clean)
1399 pctx = repo['.']
1401 pctx = repo['.']
1400 dctx = repo[node]
1402 dctx = repo[node]
1401 for lfile in unsure + s.modified:
1403 for lfile in unsure + s.modified:
1402 lfileabs = repo.wvfs.join(lfile)
1404 lfileabs = repo.wvfs.join(lfile)
1403 if not repo.wvfs.exists(lfileabs):
1405 if not repo.wvfs.exists(lfileabs):
1404 continue
1406 continue
1405 lfhash = lfutil.hashfile(lfileabs)
1407 lfhash = lfutil.hashfile(lfileabs)
1406 standin = lfutil.standin(lfile)
1408 standin = lfutil.standin(lfile)
1407 lfutil.writestandin(repo, standin, lfhash,
1409 lfutil.writestandin(repo, standin, lfhash,
1408 lfutil.getexecutable(lfileabs))
1410 lfutil.getexecutable(lfileabs))
1409 if (standin in pctx and
1411 if (standin in pctx and
1410 lfhash == lfutil.readstandin(repo, lfile, pctx)):
1412 lfhash == lfutil.readstandin(repo, lfile, pctx)):
1411 oldclean.add(lfile)
1413 oldclean.add(lfile)
1412 for lfile in s.added:
1414 for lfile in s.added:
1413 fstandin = lfutil.standin(lfile)
1415 fstandin = lfutil.standin(lfile)
1414 if fstandin not in dctx:
1416 if fstandin not in dctx:
1415 # in this case, content of standin file is meaningless
1417 # in this case, content of standin file is meaningless
1416 # (in dctx, lfile is unknown, or normal file)
1418 # (in dctx, lfile is unknown, or normal file)
1417 continue
1419 continue
1418 lfutil.updatestandin(repo, fstandin)
1420 lfutil.updatestandin(repo, fstandin)
1419 # mark all clean largefiles as dirty, just in case the update gets
1421 # mark all clean largefiles as dirty, just in case the update gets
1420 # interrupted before largefiles and lfdirstate are synchronized
1422 # interrupted before largefiles and lfdirstate are synchronized
1421 for lfile in oldclean:
1423 for lfile in oldclean:
1422 lfdirstate.normallookup(lfile)
1424 lfdirstate.normallookup(lfile)
1423 lfdirstate.write()
1425 lfdirstate.write()
1424
1426
1425 oldstandins = lfutil.getstandinsstate(repo)
1427 oldstandins = lfutil.getstandinsstate(repo)
1426
1428
1427 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1429 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1428
1430
1429 newstandins = lfutil.getstandinsstate(repo)
1431 newstandins = lfutil.getstandinsstate(repo)
1430 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1432 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1431
1433
1432 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1434 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1433 # all the ones that didn't change as clean
1435 # all the ones that didn't change as clean
1434 for lfile in oldclean.difference(filelist):
1436 for lfile in oldclean.difference(filelist):
1435 lfdirstate.normal(lfile)
1437 lfdirstate.normal(lfile)
1436 lfdirstate.write()
1438 lfdirstate.write()
1437
1439
1438 if branchmerge or force or partial:
1440 if branchmerge or force or partial:
1439 filelist.extend(s.deleted + s.removed)
1441 filelist.extend(s.deleted + s.removed)
1440
1442
1441 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1443 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1442 normallookup=partial)
1444 normallookup=partial)
1443
1445
1444 return result
1446 return result
1445
1447
1446 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1448 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1447 result = orig(repo, files, *args, **kwargs)
1449 result = orig(repo, files, *args, **kwargs)
1448
1450
1449 filelist = []
1451 filelist = []
1450 for f in files:
1452 for f in files:
1451 lf = lfutil.splitstandin(f)
1453 lf = lfutil.splitstandin(f)
1452 if lf is not None:
1454 if lf is not None:
1453 filelist.append(lf)
1455 filelist.append(lf)
1454 if filelist:
1456 if filelist:
1455 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1457 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1456 printmessage=False, normallookup=True)
1458 printmessage=False, normallookup=True)
1457
1459
1458 return result
1460 return result
General Comments 0
You need to be logged in to leave comments. Login now