##// END OF EJS Templates
largefiles: avoid redundant standin() invocations...
FUJIWARA Katsunori -
r31618:8228bc8f default
parent child Browse files
Show More
@@ -1,576 +1,577 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 commands,
21 commands,
22 context,
22 context,
23 error,
23 error,
24 hg,
24 hg,
25 lock,
25 lock,
26 match as matchmod,
26 match as matchmod,
27 node,
27 node,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 from ..convert import (
32 from ..convert import (
33 convcmd,
33 convcmd,
34 filemap,
34 filemap,
35 )
35 )
36
36
37 from . import (
37 from . import (
38 lfutil,
38 lfutil,
39 storefactory
39 storefactory
40 )
40 )
41
41
42 release = lock.release
42 release = lock.release
43
43
44 # -- Commands ----------------------------------------------------------
44 # -- Commands ----------------------------------------------------------
45
45
46 cmdtable = {}
46 cmdtable = {}
47 command = cmdutil.command(cmdtable)
47 command = cmdutil.command(cmdtable)
48
48
49 @command('lfconvert',
49 @command('lfconvert',
50 [('s', 'size', '',
50 [('s', 'size', '',
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 ('', 'to-normal', False,
52 ('', 'to-normal', False,
53 _('convert from a largefiles repo to a normal repo')),
53 _('convert from a largefiles repo to a normal repo')),
54 ],
54 ],
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 norepo=True,
56 norepo=True,
57 inferrepo=True)
57 inferrepo=True)
58 def lfconvert(ui, src, dest, *pats, **opts):
58 def lfconvert(ui, src, dest, *pats, **opts):
59 '''convert a normal repository to a largefiles repository
59 '''convert a normal repository to a largefiles repository
60
60
61 Convert repository SOURCE to a new repository DEST, identical to
61 Convert repository SOURCE to a new repository DEST, identical to
62 SOURCE except that certain files will be converted as largefiles:
62 SOURCE except that certain files will be converted as largefiles:
63 specifically, any file that matches any PATTERN *or* whose size is
63 specifically, any file that matches any PATTERN *or* whose size is
64 above the minimum size threshold is converted as a largefile. The
64 above the minimum size threshold is converted as a largefile. The
65 size used to determine whether or not to track a file as a
65 size used to determine whether or not to track a file as a
66 largefile is the size of the first version of the file. The
66 largefile is the size of the first version of the file. The
67 minimum size can be specified either with --size or in
67 minimum size can be specified either with --size or in
68 configuration as ``largefiles.size``.
68 configuration as ``largefiles.size``.
69
69
70 After running this command you will need to make sure that
70 After running this command you will need to make sure that
71 largefiles is enabled anywhere you intend to push the new
71 largefiles is enabled anywhere you intend to push the new
72 repository.
72 repository.
73
73
74 Use --to-normal to convert largefiles back to normal files; after
74 Use --to-normal to convert largefiles back to normal files; after
75 this, the DEST repository can be used without largefiles at all.'''
75 this, the DEST repository can be used without largefiles at all.'''
76
76
77 if opts['to_normal']:
77 if opts['to_normal']:
78 tolfile = False
78 tolfile = False
79 else:
79 else:
80 tolfile = True
80 tolfile = True
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82
82
83 if not hg.islocal(src):
83 if not hg.islocal(src):
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 if not hg.islocal(dest):
85 if not hg.islocal(dest):
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87
87
88 rsrc = hg.repository(ui, src)
88 rsrc = hg.repository(ui, src)
89 ui.status(_('initializing destination %s\n') % dest)
89 ui.status(_('initializing destination %s\n') % dest)
90 rdst = hg.repository(ui, dest, create=True)
90 rdst = hg.repository(ui, dest, create=True)
91
91
92 success = False
92 success = False
93 dstwlock = dstlock = None
93 dstwlock = dstlock = None
94 try:
94 try:
95 # Get a list of all changesets in the source. The easy way to do this
95 # Get a list of all changesets in the source. The easy way to do this
96 # is to simply walk the changelog, using changelog.nodesbetween().
96 # is to simply walk the changelog, using changelog.nodesbetween().
97 # Take a look at mercurial/revlog.py:639 for more details.
97 # Take a look at mercurial/revlog.py:639 for more details.
98 # Use a generator instead of a list to decrease memory usage
98 # Use a generator instead of a list to decrease memory usage
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 rsrc.heads())[0])
100 rsrc.heads())[0])
101 revmap = {node.nullid: node.nullid}
101 revmap = {node.nullid: node.nullid}
102 if tolfile:
102 if tolfile:
103 # Lock destination to prevent modification while it is converted to.
103 # Lock destination to prevent modification while it is converted to.
104 # Don't need to lock src because we are just reading from its
104 # Don't need to lock src because we are just reading from its
105 # history which can't change.
105 # history which can't change.
106 dstwlock = rdst.wlock()
106 dstwlock = rdst.wlock()
107 dstlock = rdst.lock()
107 dstlock = rdst.lock()
108
108
109 lfiles = set()
109 lfiles = set()
110 normalfiles = set()
110 normalfiles = set()
111 if not pats:
111 if not pats:
112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
113 if pats:
113 if pats:
114 matcher = matchmod.match(rsrc.root, '', list(pats))
114 matcher = matchmod.match(rsrc.root, '', list(pats))
115 else:
115 else:
116 matcher = None
116 matcher = None
117
117
118 lfiletohash = {}
118 lfiletohash = {}
119 for ctx in ctxs:
119 for ctx in ctxs:
120 ui.progress(_('converting revisions'), ctx.rev(),
120 ui.progress(_('converting revisions'), ctx.rev(),
121 unit=_('revisions'), total=rsrc['tip'].rev())
121 unit=_('revisions'), total=rsrc['tip'].rev())
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
123 lfiles, normalfiles, matcher, size, lfiletohash)
123 lfiles, normalfiles, matcher, size, lfiletohash)
124 ui.progress(_('converting revisions'), None)
124 ui.progress(_('converting revisions'), None)
125
125
126 if rdst.wvfs.exists(lfutil.shortname):
126 if rdst.wvfs.exists(lfutil.shortname):
127 rdst.wvfs.rmtree(lfutil.shortname)
127 rdst.wvfs.rmtree(lfutil.shortname)
128
128
129 for f in lfiletohash.keys():
129 for f in lfiletohash.keys():
130 if rdst.wvfs.isfile(f):
130 if rdst.wvfs.isfile(f):
131 rdst.wvfs.unlink(f)
131 rdst.wvfs.unlink(f)
132 try:
132 try:
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
134 except OSError:
134 except OSError:
135 pass
135 pass
136
136
137 # If there were any files converted to largefiles, add largefiles
137 # If there were any files converted to largefiles, add largefiles
138 # to the destination repository's requirements.
138 # to the destination repository's requirements.
139 if lfiles:
139 if lfiles:
140 rdst.requirements.add('largefiles')
140 rdst.requirements.add('largefiles')
141 rdst._writerequirements()
141 rdst._writerequirements()
142 else:
142 else:
143 class lfsource(filemap.filemap_source):
143 class lfsource(filemap.filemap_source):
144 def __init__(self, ui, source):
144 def __init__(self, ui, source):
145 super(lfsource, self).__init__(ui, source, None)
145 super(lfsource, self).__init__(ui, source, None)
146 self.filemapper.rename[lfutil.shortname] = '.'
146 self.filemapper.rename[lfutil.shortname] = '.'
147
147
148 def getfile(self, name, rev):
148 def getfile(self, name, rev):
149 realname, realrev = rev
149 realname, realrev = rev
150 f = super(lfsource, self).getfile(name, rev)
150 f = super(lfsource, self).getfile(name, rev)
151
151
152 if (not realname.startswith(lfutil.shortnameslash)
152 if (not realname.startswith(lfutil.shortnameslash)
153 or f[0] is None):
153 or f[0] is None):
154 return f
154 return f
155
155
156 # Substitute in the largefile data for the hash
156 # Substitute in the largefile data for the hash
157 hash = f[0].strip()
157 hash = f[0].strip()
158 path = lfutil.findfile(rsrc, hash)
158 path = lfutil.findfile(rsrc, hash)
159
159
160 if path is None:
160 if path is None:
161 raise error.Abort(_("missing largefile for '%s' in %s")
161 raise error.Abort(_("missing largefile for '%s' in %s")
162 % (realname, realrev))
162 % (realname, realrev))
163 return util.readfile(path), f[1]
163 return util.readfile(path), f[1]
164
164
165 class converter(convcmd.converter):
165 class converter(convcmd.converter):
166 def __init__(self, ui, source, dest, revmapfile, opts):
166 def __init__(self, ui, source, dest, revmapfile, opts):
167 src = lfsource(ui, source)
167 src = lfsource(ui, source)
168
168
169 super(converter, self).__init__(ui, src, dest, revmapfile,
169 super(converter, self).__init__(ui, src, dest, revmapfile,
170 opts)
170 opts)
171
171
172 found, missing = downloadlfiles(ui, rsrc)
172 found, missing = downloadlfiles(ui, rsrc)
173 if missing != 0:
173 if missing != 0:
174 raise error.Abort(_("all largefiles must be present locally"))
174 raise error.Abort(_("all largefiles must be present locally"))
175
175
176 orig = convcmd.converter
176 orig = convcmd.converter
177 convcmd.converter = converter
177 convcmd.converter = converter
178
178
179 try:
179 try:
180 convcmd.convert(ui, src, dest)
180 convcmd.convert(ui, src, dest)
181 finally:
181 finally:
182 convcmd.converter = orig
182 convcmd.converter = orig
183 success = True
183 success = True
184 finally:
184 finally:
185 if tolfile:
185 if tolfile:
186 rdst.dirstate.clear()
186 rdst.dirstate.clear()
187 release(dstlock, dstwlock)
187 release(dstlock, dstwlock)
188 if not success:
188 if not success:
189 # we failed, remove the new directory
189 # we failed, remove the new directory
190 shutil.rmtree(rdst.root)
190 shutil.rmtree(rdst.root)
191
191
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
193 matcher, size, lfiletohash):
193 matcher, size, lfiletohash):
194 # Convert src parents to dst parents
194 # Convert src parents to dst parents
195 parents = _convertparents(ctx, revmap)
195 parents = _convertparents(ctx, revmap)
196
196
197 # Generate list of changed files
197 # Generate list of changed files
198 files = _getchangedfiles(ctx, parents)
198 files = _getchangedfiles(ctx, parents)
199
199
200 dstfiles = []
200 dstfiles = []
201 for f in files:
201 for f in files:
202 if f not in lfiles and f not in normalfiles:
202 if f not in lfiles and f not in normalfiles:
203 islfile = _islfile(f, ctx, matcher, size)
203 islfile = _islfile(f, ctx, matcher, size)
204 # If this file was renamed or copied then copy
204 # If this file was renamed or copied then copy
205 # the largefile-ness of its predecessor
205 # the largefile-ness of its predecessor
206 if f in ctx.manifest():
206 if f in ctx.manifest():
207 fctx = ctx.filectx(f)
207 fctx = ctx.filectx(f)
208 renamed = fctx.renamed()
208 renamed = fctx.renamed()
209 renamedlfile = renamed and renamed[0] in lfiles
209 renamedlfile = renamed and renamed[0] in lfiles
210 islfile |= renamedlfile
210 islfile |= renamedlfile
211 if 'l' in fctx.flags():
211 if 'l' in fctx.flags():
212 if renamedlfile:
212 if renamedlfile:
213 raise error.Abort(
213 raise error.Abort(
214 _('renamed/copied largefile %s becomes symlink')
214 _('renamed/copied largefile %s becomes symlink')
215 % f)
215 % f)
216 islfile = False
216 islfile = False
217 if islfile:
217 if islfile:
218 lfiles.add(f)
218 lfiles.add(f)
219 else:
219 else:
220 normalfiles.add(f)
220 normalfiles.add(f)
221
221
222 if f in lfiles:
222 if f in lfiles:
223 dstfiles.append(lfutil.standin(f))
223 fstandin = lfutil.standin(f)
224 dstfiles.append(fstandin)
224 # largefile in manifest if it has not been removed/renamed
225 # largefile in manifest if it has not been removed/renamed
225 if f in ctx.manifest():
226 if f in ctx.manifest():
226 fctx = ctx.filectx(f)
227 fctx = ctx.filectx(f)
227 if 'l' in fctx.flags():
228 if 'l' in fctx.flags():
228 renamed = fctx.renamed()
229 renamed = fctx.renamed()
229 if renamed and renamed[0] in lfiles:
230 if renamed and renamed[0] in lfiles:
230 raise error.Abort(_('largefile %s becomes symlink') % f)
231 raise error.Abort(_('largefile %s becomes symlink') % f)
231
232
232 # largefile was modified, update standins
233 # largefile was modified, update standins
233 m = hashlib.sha1('')
234 m = hashlib.sha1('')
234 m.update(ctx[f].data())
235 m.update(ctx[f].data())
235 hash = m.hexdigest()
236 hash = m.hexdigest()
236 if f not in lfiletohash or lfiletohash[f] != hash:
237 if f not in lfiletohash or lfiletohash[f] != hash:
237 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
238 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
238 executable = 'x' in ctx[f].flags()
239 executable = 'x' in ctx[f].flags()
239 lfutil.writestandin(rdst, lfutil.standin(f), hash,
240 lfutil.writestandin(rdst, fstandin, hash,
240 executable)
241 executable)
241 lfiletohash[f] = hash
242 lfiletohash[f] = hash
242 else:
243 else:
243 # normal file
244 # normal file
244 dstfiles.append(f)
245 dstfiles.append(f)
245
246
246 def getfilectx(repo, memctx, f):
247 def getfilectx(repo, memctx, f):
247 srcfname = lfutil.splitstandin(f)
248 srcfname = lfutil.splitstandin(f)
248 if srcfname is not None:
249 if srcfname is not None:
249 # if the file isn't in the manifest then it was removed
250 # if the file isn't in the manifest then it was removed
250 # or renamed, return None to indicate this
251 # or renamed, return None to indicate this
251 try:
252 try:
252 fctx = ctx.filectx(srcfname)
253 fctx = ctx.filectx(srcfname)
253 except error.LookupError:
254 except error.LookupError:
254 return None
255 return None
255 renamed = fctx.renamed()
256 renamed = fctx.renamed()
256 if renamed:
257 if renamed:
257 # standin is always a largefile because largefile-ness
258 # standin is always a largefile because largefile-ness
258 # doesn't change after rename or copy
259 # doesn't change after rename or copy
259 renamed = lfutil.standin(renamed[0])
260 renamed = lfutil.standin(renamed[0])
260
261
261 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
262 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
262 'l' in fctx.flags(), 'x' in fctx.flags(),
263 'l' in fctx.flags(), 'x' in fctx.flags(),
263 renamed)
264 renamed)
264 else:
265 else:
265 return _getnormalcontext(repo, ctx, f, revmap)
266 return _getnormalcontext(repo, ctx, f, revmap)
266
267
267 # Commit
268 # Commit
268 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
269 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
269
270
270 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
271 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
271 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
272 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
272 getfilectx, ctx.user(), ctx.date(), ctx.extra())
273 getfilectx, ctx.user(), ctx.date(), ctx.extra())
273 ret = rdst.commitctx(mctx)
274 ret = rdst.commitctx(mctx)
274 lfutil.copyalltostore(rdst, ret)
275 lfutil.copyalltostore(rdst, ret)
275 rdst.setparents(ret)
276 rdst.setparents(ret)
276 revmap[ctx.node()] = rdst.changelog.tip()
277 revmap[ctx.node()] = rdst.changelog.tip()
277
278
278 # Generate list of changed files
279 # Generate list of changed files
279 def _getchangedfiles(ctx, parents):
280 def _getchangedfiles(ctx, parents):
280 files = set(ctx.files())
281 files = set(ctx.files())
281 if node.nullid not in parents:
282 if node.nullid not in parents:
282 mc = ctx.manifest()
283 mc = ctx.manifest()
283 mp1 = ctx.parents()[0].manifest()
284 mp1 = ctx.parents()[0].manifest()
284 mp2 = ctx.parents()[1].manifest()
285 mp2 = ctx.parents()[1].manifest()
285 files |= (set(mp1) | set(mp2)) - set(mc)
286 files |= (set(mp1) | set(mp2)) - set(mc)
286 for f in mc:
287 for f in mc:
287 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
288 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
288 files.add(f)
289 files.add(f)
289 return files
290 return files
290
291
291 # Convert src parents to dst parents
292 # Convert src parents to dst parents
292 def _convertparents(ctx, revmap):
293 def _convertparents(ctx, revmap):
293 parents = []
294 parents = []
294 for p in ctx.parents():
295 for p in ctx.parents():
295 parents.append(revmap[p.node()])
296 parents.append(revmap[p.node()])
296 while len(parents) < 2:
297 while len(parents) < 2:
297 parents.append(node.nullid)
298 parents.append(node.nullid)
298 return parents
299 return parents
299
300
300 # Get memfilectx for a normal file
301 # Get memfilectx for a normal file
301 def _getnormalcontext(repo, ctx, f, revmap):
302 def _getnormalcontext(repo, ctx, f, revmap):
302 try:
303 try:
303 fctx = ctx.filectx(f)
304 fctx = ctx.filectx(f)
304 except error.LookupError:
305 except error.LookupError:
305 return None
306 return None
306 renamed = fctx.renamed()
307 renamed = fctx.renamed()
307 if renamed:
308 if renamed:
308 renamed = renamed[0]
309 renamed = renamed[0]
309
310
310 data = fctx.data()
311 data = fctx.data()
311 if f == '.hgtags':
312 if f == '.hgtags':
312 data = _converttags (repo.ui, revmap, data)
313 data = _converttags (repo.ui, revmap, data)
313 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
314 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
314 'x' in fctx.flags(), renamed)
315 'x' in fctx.flags(), renamed)
315
316
316 # Remap tag data using a revision map
317 # Remap tag data using a revision map
317 def _converttags(ui, revmap, data):
318 def _converttags(ui, revmap, data):
318 newdata = []
319 newdata = []
319 for line in data.splitlines():
320 for line in data.splitlines():
320 try:
321 try:
321 id, name = line.split(' ', 1)
322 id, name = line.split(' ', 1)
322 except ValueError:
323 except ValueError:
323 ui.warn(_('skipping incorrectly formatted tag %s\n')
324 ui.warn(_('skipping incorrectly formatted tag %s\n')
324 % line)
325 % line)
325 continue
326 continue
326 try:
327 try:
327 newid = node.bin(id)
328 newid = node.bin(id)
328 except TypeError:
329 except TypeError:
329 ui.warn(_('skipping incorrectly formatted id %s\n')
330 ui.warn(_('skipping incorrectly formatted id %s\n')
330 % id)
331 % id)
331 continue
332 continue
332 try:
333 try:
333 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
334 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
334 name))
335 name))
335 except KeyError:
336 except KeyError:
336 ui.warn(_('no mapping for id %s\n') % id)
337 ui.warn(_('no mapping for id %s\n') % id)
337 continue
338 continue
338 return ''.join(newdata)
339 return ''.join(newdata)
339
340
340 def _islfile(file, ctx, matcher, size):
341 def _islfile(file, ctx, matcher, size):
341 '''Return true if file should be considered a largefile, i.e.
342 '''Return true if file should be considered a largefile, i.e.
342 matcher matches it or it is larger than size.'''
343 matcher matches it or it is larger than size.'''
343 # never store special .hg* files as largefiles
344 # never store special .hg* files as largefiles
344 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
345 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
345 return False
346 return False
346 if matcher and matcher(file):
347 if matcher and matcher(file):
347 return True
348 return True
348 try:
349 try:
349 return ctx.filectx(file).size() >= size * 1024 * 1024
350 return ctx.filectx(file).size() >= size * 1024 * 1024
350 except error.LookupError:
351 except error.LookupError:
351 return False
352 return False
352
353
353 def uploadlfiles(ui, rsrc, rdst, files):
354 def uploadlfiles(ui, rsrc, rdst, files):
354 '''upload largefiles to the central store'''
355 '''upload largefiles to the central store'''
355
356
356 if not files:
357 if not files:
357 return
358 return
358
359
359 store = storefactory.openstore(rsrc, rdst, put=True)
360 store = storefactory.openstore(rsrc, rdst, put=True)
360
361
361 at = 0
362 at = 0
362 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
363 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
363 retval = store.exists(files)
364 retval = store.exists(files)
364 files = filter(lambda h: not retval[h], files)
365 files = filter(lambda h: not retval[h], files)
365 ui.debug("%d largefiles need to be uploaded\n" % len(files))
366 ui.debug("%d largefiles need to be uploaded\n" % len(files))
366
367
367 for hash in files:
368 for hash in files:
368 ui.progress(_('uploading largefiles'), at, unit=_('files'),
369 ui.progress(_('uploading largefiles'), at, unit=_('files'),
369 total=len(files))
370 total=len(files))
370 source = lfutil.findfile(rsrc, hash)
371 source = lfutil.findfile(rsrc, hash)
371 if not source:
372 if not source:
372 raise error.Abort(_('largefile %s missing from store'
373 raise error.Abort(_('largefile %s missing from store'
373 ' (needs to be uploaded)') % hash)
374 ' (needs to be uploaded)') % hash)
374 # XXX check for errors here
375 # XXX check for errors here
375 store.put(source, hash)
376 store.put(source, hash)
376 at += 1
377 at += 1
377 ui.progress(_('uploading largefiles'), None)
378 ui.progress(_('uploading largefiles'), None)
378
379
379 def verifylfiles(ui, repo, all=False, contents=False):
380 def verifylfiles(ui, repo, all=False, contents=False):
380 '''Verify that every largefile revision in the current changeset
381 '''Verify that every largefile revision in the current changeset
381 exists in the central store. With --contents, also verify that
382 exists in the central store. With --contents, also verify that
382 the contents of each local largefile file revision are correct (SHA-1 hash
383 the contents of each local largefile file revision are correct (SHA-1 hash
383 matches the revision ID). With --all, check every changeset in
384 matches the revision ID). With --all, check every changeset in
384 this repository.'''
385 this repository.'''
385 if all:
386 if all:
386 revs = repo.revs('all()')
387 revs = repo.revs('all()')
387 else:
388 else:
388 revs = ['.']
389 revs = ['.']
389
390
390 store = storefactory.openstore(repo)
391 store = storefactory.openstore(repo)
391 return store.verify(revs, contents=contents)
392 return store.verify(revs, contents=contents)
392
393
393 def cachelfiles(ui, repo, node, filelist=None):
394 def cachelfiles(ui, repo, node, filelist=None):
394 '''cachelfiles ensures that all largefiles needed by the specified revision
395 '''cachelfiles ensures that all largefiles needed by the specified revision
395 are present in the repository's largefile cache.
396 are present in the repository's largefile cache.
396
397
397 returns a tuple (cached, missing). cached is the list of files downloaded
398 returns a tuple (cached, missing). cached is the list of files downloaded
398 by this operation; missing is the list of files that were needed but could
399 by this operation; missing is the list of files that were needed but could
399 not be found.'''
400 not be found.'''
400 lfiles = lfutil.listlfiles(repo, node)
401 lfiles = lfutil.listlfiles(repo, node)
401 if filelist:
402 if filelist:
402 lfiles = set(lfiles) & set(filelist)
403 lfiles = set(lfiles) & set(filelist)
403 toget = []
404 toget = []
404
405
405 for lfile in lfiles:
406 for lfile in lfiles:
406 try:
407 try:
407 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
408 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
408 except IOError as err:
409 except IOError as err:
409 if err.errno == errno.ENOENT:
410 if err.errno == errno.ENOENT:
410 continue # node must be None and standin wasn't found in wctx
411 continue # node must be None and standin wasn't found in wctx
411 raise
412 raise
412 if not lfutil.findfile(repo, expectedhash):
413 if not lfutil.findfile(repo, expectedhash):
413 toget.append((lfile, expectedhash))
414 toget.append((lfile, expectedhash))
414
415
415 if toget:
416 if toget:
416 store = storefactory.openstore(repo)
417 store = storefactory.openstore(repo)
417 ret = store.get(toget)
418 ret = store.get(toget)
418 return ret
419 return ret
419
420
420 return ([], [])
421 return ([], [])
421
422
422 def downloadlfiles(ui, repo, rev=None):
423 def downloadlfiles(ui, repo, rev=None):
423 matchfn = scmutil.match(repo[None],
424 matchfn = scmutil.match(repo[None],
424 [repo.wjoin(lfutil.shortname)], {})
425 [repo.wjoin(lfutil.shortname)], {})
425 def prepare(ctx, fns):
426 def prepare(ctx, fns):
426 pass
427 pass
427 totalsuccess = 0
428 totalsuccess = 0
428 totalmissing = 0
429 totalmissing = 0
429 if rev != []: # walkchangerevs on empty list would return all revs
430 if rev != []: # walkchangerevs on empty list would return all revs
430 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
431 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
431 prepare):
432 prepare):
432 success, missing = cachelfiles(ui, repo, ctx.node())
433 success, missing = cachelfiles(ui, repo, ctx.node())
433 totalsuccess += len(success)
434 totalsuccess += len(success)
434 totalmissing += len(missing)
435 totalmissing += len(missing)
435 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
436 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
436 if totalmissing > 0:
437 if totalmissing > 0:
437 ui.status(_("%d largefiles failed to download\n") % totalmissing)
438 ui.status(_("%d largefiles failed to download\n") % totalmissing)
438 return totalsuccess, totalmissing
439 return totalsuccess, totalmissing
439
440
440 def updatelfiles(ui, repo, filelist=None, printmessage=None,
441 def updatelfiles(ui, repo, filelist=None, printmessage=None,
441 normallookup=False):
442 normallookup=False):
442 '''Update largefiles according to standins in the working directory
443 '''Update largefiles according to standins in the working directory
443
444
444 If ``printmessage`` is other than ``None``, it means "print (or
445 If ``printmessage`` is other than ``None``, it means "print (or
445 ignore, for false) message forcibly".
446 ignore, for false) message forcibly".
446 '''
447 '''
447 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
448 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
448 with repo.wlock():
449 with repo.wlock():
449 lfdirstate = lfutil.openlfdirstate(ui, repo)
450 lfdirstate = lfutil.openlfdirstate(ui, repo)
450 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
451 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
451
452
452 if filelist is not None:
453 if filelist is not None:
453 filelist = set(filelist)
454 filelist = set(filelist)
454 lfiles = [f for f in lfiles if f in filelist]
455 lfiles = [f for f in lfiles if f in filelist]
455
456
456 update = {}
457 update = {}
457 updated, removed = 0, 0
458 updated, removed = 0, 0
458 wvfs = repo.wvfs
459 wvfs = repo.wvfs
459 for lfile in lfiles:
460 for lfile in lfiles:
460 rellfile = lfile
461 rellfile = lfile
461 rellfileorig = os.path.relpath(
462 rellfileorig = os.path.relpath(
462 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
463 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
463 start=repo.root)
464 start=repo.root)
464 relstandin = lfutil.standin(lfile)
465 relstandin = lfutil.standin(lfile)
465 relstandinorig = os.path.relpath(
466 relstandinorig = os.path.relpath(
466 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
467 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
467 start=repo.root)
468 start=repo.root)
468 if wvfs.exists(relstandin):
469 if wvfs.exists(relstandin):
469 if (wvfs.exists(relstandinorig) and
470 if (wvfs.exists(relstandinorig) and
470 wvfs.exists(rellfile)):
471 wvfs.exists(rellfile)):
471 shutil.copyfile(wvfs.join(rellfile),
472 shutil.copyfile(wvfs.join(rellfile),
472 wvfs.join(rellfileorig))
473 wvfs.join(rellfileorig))
473 wvfs.unlinkpath(relstandinorig)
474 wvfs.unlinkpath(relstandinorig)
474 expecthash = lfutil.readstandin(repo, lfile)
475 expecthash = lfutil.readstandin(repo, lfile)
475 if expecthash != '':
476 if expecthash != '':
476 if lfile not in repo[None]: # not switched to normal file
477 if lfile not in repo[None]: # not switched to normal file
477 wvfs.unlinkpath(rellfile, ignoremissing=True)
478 wvfs.unlinkpath(rellfile, ignoremissing=True)
478 # use normallookup() to allocate an entry in largefiles
479 # use normallookup() to allocate an entry in largefiles
479 # dirstate to prevent lfilesrepo.status() from reporting
480 # dirstate to prevent lfilesrepo.status() from reporting
480 # missing files as removed.
481 # missing files as removed.
481 lfdirstate.normallookup(lfile)
482 lfdirstate.normallookup(lfile)
482 update[lfile] = expecthash
483 update[lfile] = expecthash
483 else:
484 else:
484 # Remove lfiles for which the standin is deleted, unless the
485 # Remove lfiles for which the standin is deleted, unless the
485 # lfile is added to the repository again. This happens when a
486 # lfile is added to the repository again. This happens when a
486 # largefile is converted back to a normal file: the standin
487 # largefile is converted back to a normal file: the standin
487 # disappears, but a new (normal) file appears as the lfile.
488 # disappears, but a new (normal) file appears as the lfile.
488 if (wvfs.exists(rellfile) and
489 if (wvfs.exists(rellfile) and
489 repo.dirstate.normalize(lfile) not in repo[None]):
490 repo.dirstate.normalize(lfile) not in repo[None]):
490 wvfs.unlinkpath(rellfile)
491 wvfs.unlinkpath(rellfile)
491 removed += 1
492 removed += 1
492
493
493 # largefile processing might be slow and be interrupted - be prepared
494 # largefile processing might be slow and be interrupted - be prepared
494 lfdirstate.write()
495 lfdirstate.write()
495
496
496 if lfiles:
497 if lfiles:
497 statuswriter(_('getting changed largefiles\n'))
498 statuswriter(_('getting changed largefiles\n'))
498 cachelfiles(ui, repo, None, lfiles)
499 cachelfiles(ui, repo, None, lfiles)
499
500
500 for lfile in lfiles:
501 for lfile in lfiles:
501 update1 = 0
502 update1 = 0
502
503
503 expecthash = update.get(lfile)
504 expecthash = update.get(lfile)
504 if expecthash:
505 if expecthash:
505 if not lfutil.copyfromcache(repo, expecthash, lfile):
506 if not lfutil.copyfromcache(repo, expecthash, lfile):
506 # failed ... but already removed and set to normallookup
507 # failed ... but already removed and set to normallookup
507 continue
508 continue
508 # Synchronize largefile dirstate to the last modified
509 # Synchronize largefile dirstate to the last modified
509 # time of the file
510 # time of the file
510 lfdirstate.normal(lfile)
511 lfdirstate.normal(lfile)
511 update1 = 1
512 update1 = 1
512
513
513 # copy the exec mode of largefile standin from the repository's
514 # copy the exec mode of largefile standin from the repository's
514 # dirstate to its state in the lfdirstate.
515 # dirstate to its state in the lfdirstate.
515 rellfile = lfile
516 rellfile = lfile
516 relstandin = lfutil.standin(lfile)
517 relstandin = lfutil.standin(lfile)
517 if wvfs.exists(relstandin):
518 if wvfs.exists(relstandin):
518 # exec is decided by the users permissions using mask 0o100
519 # exec is decided by the users permissions using mask 0o100
519 standinexec = wvfs.stat(relstandin).st_mode & 0o100
520 standinexec = wvfs.stat(relstandin).st_mode & 0o100
520 st = wvfs.stat(rellfile)
521 st = wvfs.stat(rellfile)
521 mode = st.st_mode
522 mode = st.st_mode
522 if standinexec != mode & 0o100:
523 if standinexec != mode & 0o100:
523 # first remove all X bits, then shift all R bits to X
524 # first remove all X bits, then shift all R bits to X
524 mode &= ~0o111
525 mode &= ~0o111
525 if standinexec:
526 if standinexec:
526 mode |= (mode >> 2) & 0o111 & ~util.umask
527 mode |= (mode >> 2) & 0o111 & ~util.umask
527 wvfs.chmod(rellfile, mode)
528 wvfs.chmod(rellfile, mode)
528 update1 = 1
529 update1 = 1
529
530
530 updated += update1
531 updated += update1
531
532
532 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
533 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
533
534
534 lfdirstate.write()
535 lfdirstate.write()
535 if lfiles:
536 if lfiles:
536 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
537 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
537 removed))
538 removed))
538
539
539 @command('lfpull',
540 @command('lfpull',
540 [('r', 'rev', [], _('pull largefiles for these revisions'))
541 [('r', 'rev', [], _('pull largefiles for these revisions'))
541 ] + commands.remoteopts,
542 ] + commands.remoteopts,
542 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
543 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
543 def lfpull(ui, repo, source="default", **opts):
544 def lfpull(ui, repo, source="default", **opts):
544 """pull largefiles for the specified revisions from the specified source
545 """pull largefiles for the specified revisions from the specified source
545
546
546 Pull largefiles that are referenced from local changesets but missing
547 Pull largefiles that are referenced from local changesets but missing
547 locally, pulling from a remote repository to the local cache.
548 locally, pulling from a remote repository to the local cache.
548
549
549 If SOURCE is omitted, the 'default' path will be used.
550 If SOURCE is omitted, the 'default' path will be used.
550 See :hg:`help urls` for more information.
551 See :hg:`help urls` for more information.
551
552
552 .. container:: verbose
553 .. container:: verbose
553
554
554 Some examples:
555 Some examples:
555
556
556 - pull largefiles for all branch heads::
557 - pull largefiles for all branch heads::
557
558
558 hg lfpull -r "head() and not closed()"
559 hg lfpull -r "head() and not closed()"
559
560
560 - pull largefiles on the default branch::
561 - pull largefiles on the default branch::
561
562
562 hg lfpull -r "branch(default)"
563 hg lfpull -r "branch(default)"
563 """
564 """
564 repo.lfpullsource = source
565 repo.lfpullsource = source
565
566
566 revs = opts.get('rev', [])
567 revs = opts.get('rev', [])
567 if not revs:
568 if not revs:
568 raise error.Abort(_('no revisions specified'))
569 raise error.Abort(_('no revisions specified'))
569 revs = scmutil.revrange(repo, revs)
570 revs = scmutil.revrange(repo, revs)
570
571
571 numcached = 0
572 numcached = 0
572 for rev in revs:
573 for rev in revs:
573 ui.note(_('pulling largefiles for revision %s\n') % rev)
574 ui.note(_('pulling largefiles for revision %s\n') % rev)
574 (cached, missing) = cachelfiles(ui, repo, rev)
575 (cached, missing) = cachelfiles(ui, repo, rev)
575 numcached += len(cached)
576 numcached += len(cached)
576 ui.status(_("%d largefiles cached\n") % numcached)
577 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,670 +1,670 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import platform
15 import platform
16 import stat
16 import stat
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 vfs as vfsmod,
30 vfs as vfsmod,
31 )
31 )
32
32
33 shortname = '.hglf'
33 shortname = '.hglf'
34 shortnameslash = shortname + '/'
34 shortnameslash = shortname + '/'
35 longname = 'largefiles'
35 longname = 'largefiles'
36
36
37 # -- Private worker functions ------------------------------------------
37 # -- Private worker functions ------------------------------------------
38
38
39 def getminsize(ui, assumelfiles, opt, default=10):
39 def getminsize(ui, assumelfiles, opt, default=10):
40 lfsize = opt
40 lfsize = opt
41 if not lfsize and assumelfiles:
41 if not lfsize and assumelfiles:
42 lfsize = ui.config(longname, 'minsize', default=default)
42 lfsize = ui.config(longname, 'minsize', default=default)
43 if lfsize:
43 if lfsize:
44 try:
44 try:
45 lfsize = float(lfsize)
45 lfsize = float(lfsize)
46 except ValueError:
46 except ValueError:
47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
48 % lfsize)
48 % lfsize)
49 if lfsize is None:
49 if lfsize is None:
50 raise error.Abort(_('minimum size for largefiles must be specified'))
50 raise error.Abort(_('minimum size for largefiles must be specified'))
51 return lfsize
51 return lfsize
52
52
53 def link(src, dest):
53 def link(src, dest):
54 """Try to create hardlink - if that fails, efficiently make a copy."""
54 """Try to create hardlink - if that fails, efficiently make a copy."""
55 util.makedirs(os.path.dirname(dest))
55 util.makedirs(os.path.dirname(dest))
56 try:
56 try:
57 util.oslink(src, dest)
57 util.oslink(src, dest)
58 except OSError:
58 except OSError:
59 # if hardlinks fail, fallback on atomic copy
59 # if hardlinks fail, fallback on atomic copy
60 with open(src, 'rb') as srcf:
60 with open(src, 'rb') as srcf:
61 with util.atomictempfile(dest) as dstf:
61 with util.atomictempfile(dest) as dstf:
62 for chunk in util.filechunkiter(srcf):
62 for chunk in util.filechunkiter(srcf):
63 dstf.write(chunk)
63 dstf.write(chunk)
64 os.chmod(dest, os.stat(src).st_mode)
64 os.chmod(dest, os.stat(src).st_mode)
65
65
66 def usercachepath(ui, hash):
66 def usercachepath(ui, hash):
67 '''Return the correct location in the "global" largefiles cache for a file
67 '''Return the correct location in the "global" largefiles cache for a file
68 with the given hash.
68 with the given hash.
69 This cache is used for sharing of largefiles across repositories - both
69 This cache is used for sharing of largefiles across repositories - both
70 to preserve download bandwidth and storage space.'''
70 to preserve download bandwidth and storage space.'''
71 return os.path.join(_usercachedir(ui), hash)
71 return os.path.join(_usercachedir(ui), hash)
72
72
73 def _usercachedir(ui):
73 def _usercachedir(ui):
74 '''Return the location of the "global" largefiles cache.'''
74 '''Return the location of the "global" largefiles cache.'''
75 path = ui.configpath(longname, 'usercache', None)
75 path = ui.configpath(longname, 'usercache', None)
76 if path:
76 if path:
77 return path
77 return path
78 if pycompat.osname == 'nt':
78 if pycompat.osname == 'nt':
79 appdata = encoding.environ.get('LOCALAPPDATA',\
79 appdata = encoding.environ.get('LOCALAPPDATA',\
80 encoding.environ.get('APPDATA'))
80 encoding.environ.get('APPDATA'))
81 if appdata:
81 if appdata:
82 return os.path.join(appdata, longname)
82 return os.path.join(appdata, longname)
83 elif platform.system() == 'Darwin':
83 elif platform.system() == 'Darwin':
84 home = encoding.environ.get('HOME')
84 home = encoding.environ.get('HOME')
85 if home:
85 if home:
86 return os.path.join(home, 'Library', 'Caches', longname)
86 return os.path.join(home, 'Library', 'Caches', longname)
87 elif pycompat.osname == 'posix':
87 elif pycompat.osname == 'posix':
88 path = encoding.environ.get('XDG_CACHE_HOME')
88 path = encoding.environ.get('XDG_CACHE_HOME')
89 if path:
89 if path:
90 return os.path.join(path, longname)
90 return os.path.join(path, longname)
91 home = encoding.environ.get('HOME')
91 home = encoding.environ.get('HOME')
92 if home:
92 if home:
93 return os.path.join(home, '.cache', longname)
93 return os.path.join(home, '.cache', longname)
94 else:
94 else:
95 raise error.Abort(_('unknown operating system: %s\n')
95 raise error.Abort(_('unknown operating system: %s\n')
96 % pycompat.osname)
96 % pycompat.osname)
97 raise error.Abort(_('unknown %s usercache location') % longname)
97 raise error.Abort(_('unknown %s usercache location') % longname)
98
98
99 def inusercache(ui, hash):
99 def inusercache(ui, hash):
100 path = usercachepath(ui, hash)
100 path = usercachepath(ui, hash)
101 return os.path.exists(path)
101 return os.path.exists(path)
102
102
103 def findfile(repo, hash):
103 def findfile(repo, hash):
104 '''Return store path of the largefile with the specified hash.
104 '''Return store path of the largefile with the specified hash.
105 As a side effect, the file might be linked from user cache.
105 As a side effect, the file might be linked from user cache.
106 Return None if the file can't be found locally.'''
106 Return None if the file can't be found locally.'''
107 path, exists = findstorepath(repo, hash)
107 path, exists = findstorepath(repo, hash)
108 if exists:
108 if exists:
109 repo.ui.note(_('found %s in store\n') % hash)
109 repo.ui.note(_('found %s in store\n') % hash)
110 return path
110 return path
111 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
112 repo.ui.note(_('found %s in system cache\n') % hash)
112 repo.ui.note(_('found %s in system cache\n') % hash)
113 path = storepath(repo, hash)
113 path = storepath(repo, hash)
114 link(usercachepath(repo.ui, hash), path)
114 link(usercachepath(repo.ui, hash), path)
115 return path
115 return path
116 return None
116 return None
117
117
118 class largefilesdirstate(dirstate.dirstate):
118 class largefilesdirstate(dirstate.dirstate):
119 def __getitem__(self, key):
119 def __getitem__(self, key):
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 def normal(self, f):
121 def normal(self, f):
122 return super(largefilesdirstate, self).normal(unixpath(f))
122 return super(largefilesdirstate, self).normal(unixpath(f))
123 def remove(self, f):
123 def remove(self, f):
124 return super(largefilesdirstate, self).remove(unixpath(f))
124 return super(largefilesdirstate, self).remove(unixpath(f))
125 def add(self, f):
125 def add(self, f):
126 return super(largefilesdirstate, self).add(unixpath(f))
126 return super(largefilesdirstate, self).add(unixpath(f))
127 def drop(self, f):
127 def drop(self, f):
128 return super(largefilesdirstate, self).drop(unixpath(f))
128 return super(largefilesdirstate, self).drop(unixpath(f))
129 def forget(self, f):
129 def forget(self, f):
130 return super(largefilesdirstate, self).forget(unixpath(f))
130 return super(largefilesdirstate, self).forget(unixpath(f))
131 def normallookup(self, f):
131 def normallookup(self, f):
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 def _ignore(self, f):
133 def _ignore(self, f):
134 return False
134 return False
135 def write(self, tr=False):
135 def write(self, tr=False):
136 # (1) disable PENDING mode always
136 # (1) disable PENDING mode always
137 # (lfdirstate isn't yet managed as a part of the transaction)
137 # (lfdirstate isn't yet managed as a part of the transaction)
138 # (2) avoid develwarn 'use dirstate.write with ....'
138 # (2) avoid develwarn 'use dirstate.write with ....'
139 super(largefilesdirstate, self).write(None)
139 super(largefilesdirstate, self).write(None)
140
140
141 def openlfdirstate(ui, repo, create=True):
141 def openlfdirstate(ui, repo, create=True):
142 '''
142 '''
143 Return a dirstate object that tracks largefiles: i.e. its root is
143 Return a dirstate object that tracks largefiles: i.e. its root is
144 the repo root, but it is saved in .hg/largefiles/dirstate.
144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 '''
145 '''
146 vfs = repo.vfs
146 vfs = repo.vfs
147 lfstoredir = longname
147 lfstoredir = longname
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 repo.dirstate._validate)
150 repo.dirstate._validate)
151
151
152 # If the largefiles dirstate does not exist, populate and create
152 # If the largefiles dirstate does not exist, populate and create
153 # it. This ensures that we create it on the first meaningful
153 # it. This ensures that we create it on the first meaningful
154 # largefiles operation in a new clone.
154 # largefiles operation in a new clone.
155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
156 matcher = getstandinmatcher(repo)
156 matcher = getstandinmatcher(repo)
157 standins = repo.dirstate.walk(matcher, [], False, False)
157 standins = repo.dirstate.walk(matcher, [], False, False)
158
158
159 if len(standins) > 0:
159 if len(standins) > 0:
160 vfs.makedirs(lfstoredir)
160 vfs.makedirs(lfstoredir)
161
161
162 for standin in standins:
162 for standin in standins:
163 lfile = splitstandin(standin)
163 lfile = splitstandin(standin)
164 lfdirstate.normallookup(lfile)
164 lfdirstate.normallookup(lfile)
165 return lfdirstate
165 return lfdirstate
166
166
167 def lfdirstatestatus(lfdirstate, repo):
167 def lfdirstatestatus(lfdirstate, repo):
168 wctx = repo['.']
168 wctx = repo['.']
169 match = matchmod.always(repo.root, repo.getcwd())
169 match = matchmod.always(repo.root, repo.getcwd())
170 unsure, s = lfdirstate.status(match, [], False, False, False)
170 unsure, s = lfdirstate.status(match, [], False, False, False)
171 modified, clean = s.modified, s.clean
171 modified, clean = s.modified, s.clean
172 for lfile in unsure:
172 for lfile in unsure:
173 try:
173 try:
174 fctx = wctx[standin(lfile)]
174 fctx = wctx[standin(lfile)]
175 except LookupError:
175 except LookupError:
176 fctx = None
176 fctx = None
177 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
177 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
178 modified.append(lfile)
178 modified.append(lfile)
179 else:
179 else:
180 clean.append(lfile)
180 clean.append(lfile)
181 lfdirstate.normal(lfile)
181 lfdirstate.normal(lfile)
182 return s
182 return s
183
183
184 def listlfiles(repo, rev=None, matcher=None):
184 def listlfiles(repo, rev=None, matcher=None):
185 '''return a list of largefiles in the working copy or the
185 '''return a list of largefiles in the working copy or the
186 specified changeset'''
186 specified changeset'''
187
187
188 if matcher is None:
188 if matcher is None:
189 matcher = getstandinmatcher(repo)
189 matcher = getstandinmatcher(repo)
190
190
191 # ignore unknown files in working directory
191 # ignore unknown files in working directory
192 return [splitstandin(f)
192 return [splitstandin(f)
193 for f in repo[rev].walk(matcher)
193 for f in repo[rev].walk(matcher)
194 if rev is not None or repo.dirstate[f] != '?']
194 if rev is not None or repo.dirstate[f] != '?']
195
195
196 def instore(repo, hash, forcelocal=False):
196 def instore(repo, hash, forcelocal=False):
197 '''Return true if a largefile with the given hash exists in the store'''
197 '''Return true if a largefile with the given hash exists in the store'''
198 return os.path.exists(storepath(repo, hash, forcelocal))
198 return os.path.exists(storepath(repo, hash, forcelocal))
199
199
200 def storepath(repo, hash, forcelocal=False):
200 def storepath(repo, hash, forcelocal=False):
201 '''Return the correct location in the repository largefiles store for a
201 '''Return the correct location in the repository largefiles store for a
202 file with the given hash.'''
202 file with the given hash.'''
203 if not forcelocal and repo.shared():
203 if not forcelocal and repo.shared():
204 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
204 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
205 return repo.vfs.join(longname, hash)
205 return repo.vfs.join(longname, hash)
206
206
207 def findstorepath(repo, hash):
207 def findstorepath(repo, hash):
208 '''Search through the local store path(s) to find the file for the given
208 '''Search through the local store path(s) to find the file for the given
209 hash. If the file is not found, its path in the primary store is returned.
209 hash. If the file is not found, its path in the primary store is returned.
210 The return value is a tuple of (path, exists(path)).
210 The return value is a tuple of (path, exists(path)).
211 '''
211 '''
212 # For shared repos, the primary store is in the share source. But for
212 # For shared repos, the primary store is in the share source. But for
213 # backward compatibility, force a lookup in the local store if it wasn't
213 # backward compatibility, force a lookup in the local store if it wasn't
214 # found in the share source.
214 # found in the share source.
215 path = storepath(repo, hash, False)
215 path = storepath(repo, hash, False)
216
216
217 if instore(repo, hash):
217 if instore(repo, hash):
218 return (path, True)
218 return (path, True)
219 elif repo.shared() and instore(repo, hash, True):
219 elif repo.shared() and instore(repo, hash, True):
220 return storepath(repo, hash, True), True
220 return storepath(repo, hash, True), True
221
221
222 return (path, False)
222 return (path, False)
223
223
224 def copyfromcache(repo, hash, filename):
224 def copyfromcache(repo, hash, filename):
225 '''Copy the specified largefile from the repo or system cache to
225 '''Copy the specified largefile from the repo or system cache to
226 filename in the repository. Return true on success or false if the
226 filename in the repository. Return true on success or false if the
227 file was not found in either cache (which should not happened:
227 file was not found in either cache (which should not happened:
228 this is meant to be called only after ensuring that the needed
228 this is meant to be called only after ensuring that the needed
229 largefile exists in the cache).'''
229 largefile exists in the cache).'''
230 wvfs = repo.wvfs
230 wvfs = repo.wvfs
231 path = findfile(repo, hash)
231 path = findfile(repo, hash)
232 if path is None:
232 if path is None:
233 return False
233 return False
234 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
234 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
235 # The write may fail before the file is fully written, but we
235 # The write may fail before the file is fully written, but we
236 # don't use atomic writes in the working copy.
236 # don't use atomic writes in the working copy.
237 with open(path, 'rb') as srcfd:
237 with open(path, 'rb') as srcfd:
238 with wvfs(filename, 'wb') as destfd:
238 with wvfs(filename, 'wb') as destfd:
239 gothash = copyandhash(
239 gothash = copyandhash(
240 util.filechunkiter(srcfd), destfd)
240 util.filechunkiter(srcfd), destfd)
241 if gothash != hash:
241 if gothash != hash:
242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
243 % (filename, path, gothash))
243 % (filename, path, gothash))
244 wvfs.unlink(filename)
244 wvfs.unlink(filename)
245 return False
245 return False
246 return True
246 return True
247
247
248 def copytostore(repo, revorctx, file, uploaded=False):
248 def copytostore(repo, revorctx, file, uploaded=False):
249 wvfs = repo.wvfs
249 wvfs = repo.wvfs
250 hash = readstandin(repo, file, revorctx)
250 hash = readstandin(repo, file, revorctx)
251 if instore(repo, hash):
251 if instore(repo, hash):
252 return
252 return
253 if wvfs.exists(file):
253 if wvfs.exists(file):
254 copytostoreabsolute(repo, wvfs.join(file), hash)
254 copytostoreabsolute(repo, wvfs.join(file), hash)
255 else:
255 else:
256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
257 (file, hash))
257 (file, hash))
258
258
259 def copyalltostore(repo, node):
259 def copyalltostore(repo, node):
260 '''Copy all largefiles in a given revision to the store'''
260 '''Copy all largefiles in a given revision to the store'''
261
261
262 ctx = repo[node]
262 ctx = repo[node]
263 for filename in ctx.files():
263 for filename in ctx.files():
264 realfile = splitstandin(filename)
264 realfile = splitstandin(filename)
265 if realfile is not None and filename in ctx.manifest():
265 if realfile is not None and filename in ctx.manifest():
266 copytostore(repo, ctx, realfile)
266 copytostore(repo, ctx, realfile)
267
267
268 def copytostoreabsolute(repo, file, hash):
268 def copytostoreabsolute(repo, file, hash):
269 if inusercache(repo.ui, hash):
269 if inusercache(repo.ui, hash):
270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
271 else:
271 else:
272 util.makedirs(os.path.dirname(storepath(repo, hash)))
272 util.makedirs(os.path.dirname(storepath(repo, hash)))
273 with open(file, 'rb') as srcf:
273 with open(file, 'rb') as srcf:
274 with util.atomictempfile(storepath(repo, hash),
274 with util.atomictempfile(storepath(repo, hash),
275 createmode=repo.store.createmode) as dstf:
275 createmode=repo.store.createmode) as dstf:
276 for chunk in util.filechunkiter(srcf):
276 for chunk in util.filechunkiter(srcf):
277 dstf.write(chunk)
277 dstf.write(chunk)
278 linktousercache(repo, hash)
278 linktousercache(repo, hash)
279
279
280 def linktousercache(repo, hash):
280 def linktousercache(repo, hash):
281 '''Link / copy the largefile with the specified hash from the store
281 '''Link / copy the largefile with the specified hash from the store
282 to the cache.'''
282 to the cache.'''
283 path = usercachepath(repo.ui, hash)
283 path = usercachepath(repo.ui, hash)
284 link(storepath(repo, hash), path)
284 link(storepath(repo, hash), path)
285
285
286 def getstandinmatcher(repo, rmatcher=None):
286 def getstandinmatcher(repo, rmatcher=None):
287 '''Return a match object that applies rmatcher to the standin directory'''
287 '''Return a match object that applies rmatcher to the standin directory'''
288 wvfs = repo.wvfs
288 wvfs = repo.wvfs
289 standindir = shortname
289 standindir = shortname
290
290
291 # no warnings about missing files or directories
291 # no warnings about missing files or directories
292 badfn = lambda f, msg: None
292 badfn = lambda f, msg: None
293
293
294 if rmatcher and not rmatcher.always():
294 if rmatcher and not rmatcher.always():
295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
296 if not pats:
296 if not pats:
297 pats = [wvfs.join(standindir)]
297 pats = [wvfs.join(standindir)]
298 match = scmutil.match(repo[None], pats, badfn=badfn)
298 match = scmutil.match(repo[None], pats, badfn=badfn)
299 # if pats is empty, it would incorrectly always match, so clear _always
299 # if pats is empty, it would incorrectly always match, so clear _always
300 match._always = False
300 match._always = False
301 else:
301 else:
302 # no patterns: relative to repo root
302 # no patterns: relative to repo root
303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
304 return match
304 return match
305
305
306 def composestandinmatcher(repo, rmatcher):
306 def composestandinmatcher(repo, rmatcher):
307 '''Return a matcher that accepts standins corresponding to the
307 '''Return a matcher that accepts standins corresponding to the
308 files accepted by rmatcher. Pass the list of files in the matcher
308 files accepted by rmatcher. Pass the list of files in the matcher
309 as the paths specified by the user.'''
309 as the paths specified by the user.'''
310 smatcher = getstandinmatcher(repo, rmatcher)
310 smatcher = getstandinmatcher(repo, rmatcher)
311 isstandin = smatcher.matchfn
311 isstandin = smatcher.matchfn
312 def composedmatchfn(f):
312 def composedmatchfn(f):
313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
314 smatcher.matchfn = composedmatchfn
314 smatcher.matchfn = composedmatchfn
315
315
316 return smatcher
316 return smatcher
317
317
318 def standin(filename):
318 def standin(filename):
319 '''Return the repo-relative path to the standin for the specified big
319 '''Return the repo-relative path to the standin for the specified big
320 file.'''
320 file.'''
321 # Notes:
321 # Notes:
322 # 1) Some callers want an absolute path, but for instance addlargefiles
322 # 1) Some callers want an absolute path, but for instance addlargefiles
323 # needs it repo-relative so it can be passed to repo[None].add(). So
323 # needs it repo-relative so it can be passed to repo[None].add(). So
324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
325 # 2) Join with '/' because that's what dirstate always uses, even on
325 # 2) Join with '/' because that's what dirstate always uses, even on
326 # Windows. Change existing separator to '/' first in case we are
326 # Windows. Change existing separator to '/' first in case we are
327 # passed filenames from an external source (like the command line).
327 # passed filenames from an external source (like the command line).
328 return shortnameslash + util.pconvert(filename)
328 return shortnameslash + util.pconvert(filename)
329
329
330 def isstandin(filename):
330 def isstandin(filename):
331 '''Return true if filename is a big file standin. filename must be
331 '''Return true if filename is a big file standin. filename must be
332 in Mercurial's internal form (slash-separated).'''
332 in Mercurial's internal form (slash-separated).'''
333 return filename.startswith(shortnameslash)
333 return filename.startswith(shortnameslash)
334
334
335 def splitstandin(filename):
335 def splitstandin(filename):
336 # Split on / because that's what dirstate always uses, even on Windows.
336 # Split on / because that's what dirstate always uses, even on Windows.
337 # Change local separator to / first just in case we are passed filenames
337 # Change local separator to / first just in case we are passed filenames
338 # from an external source (like the command line).
338 # from an external source (like the command line).
339 bits = util.pconvert(filename).split('/', 1)
339 bits = util.pconvert(filename).split('/', 1)
340 if len(bits) == 2 and bits[0] == shortname:
340 if len(bits) == 2 and bits[0] == shortname:
341 return bits[1]
341 return bits[1]
342 else:
342 else:
343 return None
343 return None
344
344
345 def updatestandin(repo, standin):
345 def updatestandin(repo, standin):
346 lfile = splitstandin(standin)
346 lfile = splitstandin(standin)
347 file = repo.wjoin(lfile)
347 file = repo.wjoin(lfile)
348 if repo.wvfs.exists(lfile):
348 if repo.wvfs.exists(lfile):
349 hash = hashfile(file)
349 hash = hashfile(file)
350 executable = getexecutable(file)
350 executable = getexecutable(file)
351 writestandin(repo, standin, hash, executable)
351 writestandin(repo, standin, hash, executable)
352 else:
352 else:
353 raise error.Abort(_('%s: file not found!') % lfile)
353 raise error.Abort(_('%s: file not found!') % lfile)
354
354
355 def readstandin(repo, filename, node=None):
355 def readstandin(repo, filename, node=None):
356 '''read hex hash from standin for filename at given node, or working
356 '''read hex hash from standin for filename at given node, or working
357 directory if no node is given'''
357 directory if no node is given'''
358 return repo[node][standin(filename)].data().strip()
358 return repo[node][standin(filename)].data().strip()
359
359
360 def writestandin(repo, standin, hash, executable):
360 def writestandin(repo, standin, hash, executable):
361 '''write hash to <repo.root>/<standin>'''
361 '''write hash to <repo.root>/<standin>'''
362 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
362 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
363
363
364 def copyandhash(instream, outfile):
364 def copyandhash(instream, outfile):
365 '''Read bytes from instream (iterable) and write them to outfile,
365 '''Read bytes from instream (iterable) and write them to outfile,
366 computing the SHA-1 hash of the data along the way. Return the hash.'''
366 computing the SHA-1 hash of the data along the way. Return the hash.'''
367 hasher = hashlib.sha1('')
367 hasher = hashlib.sha1('')
368 for data in instream:
368 for data in instream:
369 hasher.update(data)
369 hasher.update(data)
370 outfile.write(data)
370 outfile.write(data)
371 return hasher.hexdigest()
371 return hasher.hexdigest()
372
372
373 def hashfile(file):
373 def hashfile(file):
374 if not os.path.exists(file):
374 if not os.path.exists(file):
375 return ''
375 return ''
376 hasher = hashlib.sha1('')
376 hasher = hashlib.sha1('')
377 with open(file, 'rb') as fd:
377 with open(file, 'rb') as fd:
378 for data in util.filechunkiter(fd):
378 for data in util.filechunkiter(fd):
379 hasher.update(data)
379 hasher.update(data)
380 return hasher.hexdigest()
380 return hasher.hexdigest()
381
381
382 def getexecutable(filename):
382 def getexecutable(filename):
383 mode = os.stat(filename).st_mode
383 mode = os.stat(filename).st_mode
384 return ((mode & stat.S_IXUSR) and
384 return ((mode & stat.S_IXUSR) and
385 (mode & stat.S_IXGRP) and
385 (mode & stat.S_IXGRP) and
386 (mode & stat.S_IXOTH))
386 (mode & stat.S_IXOTH))
387
387
388 def urljoin(first, second, *arg):
388 def urljoin(first, second, *arg):
389 def join(left, right):
389 def join(left, right):
390 if not left.endswith('/'):
390 if not left.endswith('/'):
391 left += '/'
391 left += '/'
392 if right.startswith('/'):
392 if right.startswith('/'):
393 right = right[1:]
393 right = right[1:]
394 return left + right
394 return left + right
395
395
396 url = join(first, second)
396 url = join(first, second)
397 for a in arg:
397 for a in arg:
398 url = join(url, a)
398 url = join(url, a)
399 return url
399 return url
400
400
401 def hexsha1(data):
401 def hexsha1(data):
402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
403 object data"""
403 object data"""
404 h = hashlib.sha1()
404 h = hashlib.sha1()
405 for chunk in util.filechunkiter(data):
405 for chunk in util.filechunkiter(data):
406 h.update(chunk)
406 h.update(chunk)
407 return h.hexdigest()
407 return h.hexdigest()
408
408
409 def httpsendfile(ui, filename):
409 def httpsendfile(ui, filename):
410 return httpconnection.httpsendfile(ui, filename, 'rb')
410 return httpconnection.httpsendfile(ui, filename, 'rb')
411
411
412 def unixpath(path):
412 def unixpath(path):
413 '''Return a version of path normalized for use with the lfdirstate.'''
413 '''Return a version of path normalized for use with the lfdirstate.'''
414 return util.pconvert(os.path.normpath(path))
414 return util.pconvert(os.path.normpath(path))
415
415
416 def islfilesrepo(repo):
416 def islfilesrepo(repo):
417 '''Return true if the repo is a largefile repo.'''
417 '''Return true if the repo is a largefile repo.'''
418 if ('largefiles' in repo.requirements and
418 if ('largefiles' in repo.requirements and
419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
420 return True
420 return True
421
421
422 return any(openlfdirstate(repo.ui, repo, False))
422 return any(openlfdirstate(repo.ui, repo, False))
423
423
424 class storeprotonotcapable(Exception):
424 class storeprotonotcapable(Exception):
425 def __init__(self, storetypes):
425 def __init__(self, storetypes):
426 self.storetypes = storetypes
426 self.storetypes = storetypes
427
427
428 def getstandinsstate(repo):
428 def getstandinsstate(repo):
429 standins = []
429 standins = []
430 matcher = getstandinmatcher(repo)
430 matcher = getstandinmatcher(repo)
431 for standin in repo.dirstate.walk(matcher, [], False, False):
431 for standin in repo.dirstate.walk(matcher, [], False, False):
432 lfile = splitstandin(standin)
432 lfile = splitstandin(standin)
433 try:
433 try:
434 hash = readstandin(repo, lfile)
434 hash = readstandin(repo, lfile)
435 except IOError:
435 except IOError:
436 hash = None
436 hash = None
437 standins.append((lfile, hash))
437 standins.append((lfile, hash))
438 return standins
438 return standins
439
439
440 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
440 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
441 lfstandin = standin(lfile)
441 lfstandin = standin(lfile)
442 if lfstandin in repo.dirstate:
442 if lfstandin in repo.dirstate:
443 stat = repo.dirstate._map[lfstandin]
443 stat = repo.dirstate._map[lfstandin]
444 state, mtime = stat[0], stat[3]
444 state, mtime = stat[0], stat[3]
445 else:
445 else:
446 state, mtime = '?', -1
446 state, mtime = '?', -1
447 if state == 'n':
447 if state == 'n':
448 if (normallookup or mtime < 0 or
448 if (normallookup or mtime < 0 or
449 not repo.wvfs.exists(lfile)):
449 not repo.wvfs.exists(lfile)):
450 # state 'n' doesn't ensure 'clean' in this case
450 # state 'n' doesn't ensure 'clean' in this case
451 lfdirstate.normallookup(lfile)
451 lfdirstate.normallookup(lfile)
452 else:
452 else:
453 lfdirstate.normal(lfile)
453 lfdirstate.normal(lfile)
454 elif state == 'm':
454 elif state == 'm':
455 lfdirstate.normallookup(lfile)
455 lfdirstate.normallookup(lfile)
456 elif state == 'r':
456 elif state == 'r':
457 lfdirstate.remove(lfile)
457 lfdirstate.remove(lfile)
458 elif state == 'a':
458 elif state == 'a':
459 lfdirstate.add(lfile)
459 lfdirstate.add(lfile)
460 elif state == '?':
460 elif state == '?':
461 lfdirstate.drop(lfile)
461 lfdirstate.drop(lfile)
462
462
463 def markcommitted(orig, ctx, node):
463 def markcommitted(orig, ctx, node):
464 repo = ctx.repo()
464 repo = ctx.repo()
465
465
466 orig(node)
466 orig(node)
467
467
468 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
468 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
469 # because files coming from the 2nd parent are omitted in the latter.
469 # because files coming from the 2nd parent are omitted in the latter.
470 #
470 #
471 # The former should be used to get targets of "synclfdirstate",
471 # The former should be used to get targets of "synclfdirstate",
472 # because such files:
472 # because such files:
473 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
473 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
474 # - have to be marked as "n" after commit, but
474 # - have to be marked as "n" after commit, but
475 # - aren't listed in "repo[node].files()"
475 # - aren't listed in "repo[node].files()"
476
476
477 lfdirstate = openlfdirstate(repo.ui, repo)
477 lfdirstate = openlfdirstate(repo.ui, repo)
478 for f in ctx.files():
478 for f in ctx.files():
479 lfile = splitstandin(f)
479 lfile = splitstandin(f)
480 if lfile is not None:
480 if lfile is not None:
481 synclfdirstate(repo, lfdirstate, lfile, False)
481 synclfdirstate(repo, lfdirstate, lfile, False)
482 lfdirstate.write()
482 lfdirstate.write()
483
483
484 # As part of committing, copy all of the largefiles into the cache.
484 # As part of committing, copy all of the largefiles into the cache.
485 #
485 #
486 # Using "node" instead of "ctx" implies additional "repo[node]"
486 # Using "node" instead of "ctx" implies additional "repo[node]"
487 # lookup while copyalltostore(), but can omit redundant check for
487 # lookup while copyalltostore(), but can omit redundant check for
488 # files comming from the 2nd parent, which should exist in store
488 # files comming from the 2nd parent, which should exist in store
489 # at merging.
489 # at merging.
490 copyalltostore(repo, node)
490 copyalltostore(repo, node)
491
491
492 def getlfilestoupdate(oldstandins, newstandins):
492 def getlfilestoupdate(oldstandins, newstandins):
493 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
493 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
494 filelist = []
494 filelist = []
495 for f in changedstandins:
495 for f in changedstandins:
496 if f[0] not in filelist:
496 if f[0] not in filelist:
497 filelist.append(f[0])
497 filelist.append(f[0])
498 return filelist
498 return filelist
499
499
500 def getlfilestoupload(repo, missing, addfunc):
500 def getlfilestoupload(repo, missing, addfunc):
501 for i, n in enumerate(missing):
501 for i, n in enumerate(missing):
502 repo.ui.progress(_('finding outgoing largefiles'), i,
502 repo.ui.progress(_('finding outgoing largefiles'), i,
503 unit=_('revisions'), total=len(missing))
503 unit=_('revisions'), total=len(missing))
504 parents = [p for p in repo[n].parents() if p != node.nullid]
504 parents = [p for p in repo[n].parents() if p != node.nullid]
505
505
506 oldlfstatus = repo.lfstatus
506 oldlfstatus = repo.lfstatus
507 repo.lfstatus = False
507 repo.lfstatus = False
508 try:
508 try:
509 ctx = repo[n]
509 ctx = repo[n]
510 finally:
510 finally:
511 repo.lfstatus = oldlfstatus
511 repo.lfstatus = oldlfstatus
512
512
513 files = set(ctx.files())
513 files = set(ctx.files())
514 if len(parents) == 2:
514 if len(parents) == 2:
515 mc = ctx.manifest()
515 mc = ctx.manifest()
516 mp1 = ctx.parents()[0].manifest()
516 mp1 = ctx.parents()[0].manifest()
517 mp2 = ctx.parents()[1].manifest()
517 mp2 = ctx.parents()[1].manifest()
518 for f in mp1:
518 for f in mp1:
519 if f not in mc:
519 if f not in mc:
520 files.add(f)
520 files.add(f)
521 for f in mp2:
521 for f in mp2:
522 if f not in mc:
522 if f not in mc:
523 files.add(f)
523 files.add(f)
524 for f in mc:
524 for f in mc:
525 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
525 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
526 files.add(f)
526 files.add(f)
527 for fn in files:
527 for fn in files:
528 if isstandin(fn) and fn in ctx:
528 if isstandin(fn) and fn in ctx:
529 addfunc(fn, ctx[fn].data().strip())
529 addfunc(fn, ctx[fn].data().strip())
530 repo.ui.progress(_('finding outgoing largefiles'), None)
530 repo.ui.progress(_('finding outgoing largefiles'), None)
531
531
532 def updatestandinsbymatch(repo, match):
532 def updatestandinsbymatch(repo, match):
533 '''Update standins in the working directory according to specified match
533 '''Update standins in the working directory according to specified match
534
534
535 This returns (possibly modified) ``match`` object to be used for
535 This returns (possibly modified) ``match`` object to be used for
536 subsequent commit process.
536 subsequent commit process.
537 '''
537 '''
538
538
539 ui = repo.ui
539 ui = repo.ui
540
540
541 # Case 1: user calls commit with no specific files or
541 # Case 1: user calls commit with no specific files or
542 # include/exclude patterns: refresh and commit all files that
542 # include/exclude patterns: refresh and commit all files that
543 # are "dirty".
543 # are "dirty".
544 if match is None or match.always():
544 if match is None or match.always():
545 # Spend a bit of time here to get a list of files we know
545 # Spend a bit of time here to get a list of files we know
546 # are modified so we can compare only against those.
546 # are modified so we can compare only against those.
547 # It can cost a lot of time (several seconds)
547 # It can cost a lot of time (several seconds)
548 # otherwise to update all standins if the largefiles are
548 # otherwise to update all standins if the largefiles are
549 # large.
549 # large.
550 lfdirstate = openlfdirstate(ui, repo)
550 lfdirstate = openlfdirstate(ui, repo)
551 dirtymatch = matchmod.always(repo.root, repo.getcwd())
551 dirtymatch = matchmod.always(repo.root, repo.getcwd())
552 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
552 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
553 False)
553 False)
554 modifiedfiles = unsure + s.modified + s.added + s.removed
554 modifiedfiles = unsure + s.modified + s.added + s.removed
555 lfiles = listlfiles(repo)
555 lfiles = listlfiles(repo)
556 # this only loops through largefiles that exist (not
556 # this only loops through largefiles that exist (not
557 # removed/renamed)
557 # removed/renamed)
558 for lfile in lfiles:
558 for lfile in lfiles:
559 if lfile in modifiedfiles:
559 if lfile in modifiedfiles:
560 if repo.wvfs.exists(standin(lfile)):
560 fstandin = standin(lfile)
561 if repo.wvfs.exists(fstandin):
561 # this handles the case where a rebase is being
562 # this handles the case where a rebase is being
562 # performed and the working copy is not updated
563 # performed and the working copy is not updated
563 # yet.
564 # yet.
564 if repo.wvfs.exists(lfile):
565 if repo.wvfs.exists(lfile):
565 updatestandin(repo,
566 updatestandin(repo, fstandin)
566 standin(lfile))
567
567
568 return match
568 return match
569
569
570 lfiles = listlfiles(repo)
570 lfiles = listlfiles(repo)
571 match._files = repo._subdirlfs(match.files(), lfiles)
571 match._files = repo._subdirlfs(match.files(), lfiles)
572
572
573 # Case 2: user calls commit with specified patterns: refresh
573 # Case 2: user calls commit with specified patterns: refresh
574 # any matching big files.
574 # any matching big files.
575 smatcher = composestandinmatcher(repo, match)
575 smatcher = composestandinmatcher(repo, match)
576 standins = repo.dirstate.walk(smatcher, [], False, False)
576 standins = repo.dirstate.walk(smatcher, [], False, False)
577
577
578 # No matching big files: get out of the way and pass control to
578 # No matching big files: get out of the way and pass control to
579 # the usual commit() method.
579 # the usual commit() method.
580 if not standins:
580 if not standins:
581 return match
581 return match
582
582
583 # Refresh all matching big files. It's possible that the
583 # Refresh all matching big files. It's possible that the
584 # commit will end up failing, in which case the big files will
584 # commit will end up failing, in which case the big files will
585 # stay refreshed. No harm done: the user modified them and
585 # stay refreshed. No harm done: the user modified them and
586 # asked to commit them, so sooner or later we're going to
586 # asked to commit them, so sooner or later we're going to
587 # refresh the standins. Might as well leave them refreshed.
587 # refresh the standins. Might as well leave them refreshed.
588 lfdirstate = openlfdirstate(ui, repo)
588 lfdirstate = openlfdirstate(ui, repo)
589 for fstandin in standins:
589 for fstandin in standins:
590 lfile = splitstandin(fstandin)
590 lfile = splitstandin(fstandin)
591 if lfdirstate[lfile] != 'r':
591 if lfdirstate[lfile] != 'r':
592 updatestandin(repo, fstandin)
592 updatestandin(repo, fstandin)
593
593
594 # Cook up a new matcher that only matches regular files or
594 # Cook up a new matcher that only matches regular files or
595 # standins corresponding to the big files requested by the
595 # standins corresponding to the big files requested by the
596 # user. Have to modify _files to prevent commit() from
596 # user. Have to modify _files to prevent commit() from
597 # complaining "not tracked" for big files.
597 # complaining "not tracked" for big files.
598 match = copy.copy(match)
598 match = copy.copy(match)
599 origmatchfn = match.matchfn
599 origmatchfn = match.matchfn
600
600
601 # Check both the list of largefiles and the list of
601 # Check both the list of largefiles and the list of
602 # standins because if a largefile was removed, it
602 # standins because if a largefile was removed, it
603 # won't be in the list of largefiles at this point
603 # won't be in the list of largefiles at this point
604 match._files += sorted(standins)
604 match._files += sorted(standins)
605
605
606 actualfiles = []
606 actualfiles = []
607 for f in match._files:
607 for f in match._files:
608 fstandin = standin(f)
608 fstandin = standin(f)
609
609
610 # For largefiles, only one of the normal and standin should be
610 # For largefiles, only one of the normal and standin should be
611 # committed (except if one of them is a remove). In the case of a
611 # committed (except if one of them is a remove). In the case of a
612 # standin removal, drop the normal file if it is unknown to dirstate.
612 # standin removal, drop the normal file if it is unknown to dirstate.
613 # Thus, skip plain largefile names but keep the standin.
613 # Thus, skip plain largefile names but keep the standin.
614 if f in lfiles or fstandin in standins:
614 if f in lfiles or fstandin in standins:
615 if repo.dirstate[fstandin] != 'r':
615 if repo.dirstate[fstandin] != 'r':
616 if repo.dirstate[f] != 'r':
616 if repo.dirstate[f] != 'r':
617 continue
617 continue
618 elif repo.dirstate[f] == '?':
618 elif repo.dirstate[f] == '?':
619 continue
619 continue
620
620
621 actualfiles.append(f)
621 actualfiles.append(f)
622 match._files = actualfiles
622 match._files = actualfiles
623
623
624 def matchfn(f):
624 def matchfn(f):
625 if origmatchfn(f):
625 if origmatchfn(f):
626 return f not in lfiles
626 return f not in lfiles
627 else:
627 else:
628 return f in standins
628 return f in standins
629
629
630 match.matchfn = matchfn
630 match.matchfn = matchfn
631
631
632 return match
632 return match
633
633
634 class automatedcommithook(object):
634 class automatedcommithook(object):
635 '''Stateful hook to update standins at the 1st commit of resuming
635 '''Stateful hook to update standins at the 1st commit of resuming
636
636
637 For efficiency, updating standins in the working directory should
637 For efficiency, updating standins in the working directory should
638 be avoided while automated committing (like rebase, transplant and
638 be avoided while automated committing (like rebase, transplant and
639 so on), because they should be updated before committing.
639 so on), because they should be updated before committing.
640
640
641 But the 1st commit of resuming automated committing (e.g. ``rebase
641 But the 1st commit of resuming automated committing (e.g. ``rebase
642 --continue``) should update them, because largefiles may be
642 --continue``) should update them, because largefiles may be
643 modified manually.
643 modified manually.
644 '''
644 '''
645 def __init__(self, resuming):
645 def __init__(self, resuming):
646 self.resuming = resuming
646 self.resuming = resuming
647
647
648 def __call__(self, repo, match):
648 def __call__(self, repo, match):
649 if self.resuming:
649 if self.resuming:
650 self.resuming = False # avoids updating at subsequent commits
650 self.resuming = False # avoids updating at subsequent commits
651 return updatestandinsbymatch(repo, match)
651 return updatestandinsbymatch(repo, match)
652 else:
652 else:
653 return match
653 return match
654
654
655 def getstatuswriter(ui, repo, forcibly=None):
655 def getstatuswriter(ui, repo, forcibly=None):
656 '''Return the function to write largefiles specific status out
656 '''Return the function to write largefiles specific status out
657
657
658 If ``forcibly`` is ``None``, this returns the last element of
658 If ``forcibly`` is ``None``, this returns the last element of
659 ``repo._lfstatuswriters`` as "default" writer function.
659 ``repo._lfstatuswriters`` as "default" writer function.
660
660
661 Otherwise, this returns the function to always write out (or
661 Otherwise, this returns the function to always write out (or
662 ignore if ``not forcibly``) status.
662 ignore if ``not forcibly``) status.
663 '''
663 '''
664 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
664 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
665 return repo._lfstatuswriters[-1]
665 return repo._lfstatuswriters[-1]
666 else:
666 else:
667 if forcibly:
667 if forcibly:
668 return ui.status # forcibly WRITE OUT
668 return ui.status # forcibly WRITE OUT
669 else:
669 else:
670 return lambda *msg, **opts: None # forcibly IGNORE
670 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1451 +1,1452 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial import (
17 from mercurial import (
18 archival,
18 archival,
19 cmdutil,
19 cmdutil,
20 error,
20 error,
21 hg,
21 hg,
22 match as matchmod,
22 match as matchmod,
23 pathutil,
23 pathutil,
24 registrar,
24 registrar,
25 scmutil,
25 scmutil,
26 smartset,
26 smartset,
27 util,
27 util,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 lfcommands,
31 lfcommands,
32 lfutil,
32 lfutil,
33 storefactory,
33 storefactory,
34 )
34 )
35
35
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37
37
38 def composelargefilematcher(match, manifest):
38 def composelargefilematcher(match, manifest):
39 '''create a matcher that matches only the largefiles in the original
39 '''create a matcher that matches only the largefiles in the original
40 matcher'''
40 matcher'''
41 m = copy.copy(match)
41 m = copy.copy(match)
42 lfile = lambda f: lfutil.standin(f) in manifest
42 lfile = lambda f: lfutil.standin(f) in manifest
43 m._files = filter(lfile, m._files)
43 m._files = filter(lfile, m._files)
44 m._fileroots = set(m._files)
44 m._fileroots = set(m._files)
45 m._always = False
45 m._always = False
46 origmatchfn = m.matchfn
46 origmatchfn = m.matchfn
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 return m
48 return m
49
49
50 def composenormalfilematcher(match, manifest, exclude=None):
50 def composenormalfilematcher(match, manifest, exclude=None):
51 excluded = set()
51 excluded = set()
52 if exclude is not None:
52 if exclude is not None:
53 excluded.update(exclude)
53 excluded.update(exclude)
54
54
55 m = copy.copy(match)
55 m = copy.copy(match)
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 manifest or f in excluded)
57 manifest or f in excluded)
58 m._files = filter(notlfile, m._files)
58 m._files = filter(notlfile, m._files)
59 m._fileroots = set(m._files)
59 m._fileroots = set(m._files)
60 m._always = False
60 m._always = False
61 origmatchfn = m.matchfn
61 origmatchfn = m.matchfn
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 return m
63 return m
64
64
65 def installnormalfilesmatchfn(manifest):
65 def installnormalfilesmatchfn(manifest):
66 '''installmatchfn with a matchfn that ignores all largefiles'''
66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 default='relpath', badfn=None):
68 default='relpath', badfn=None):
69 if opts is None:
69 if opts is None:
70 opts = {}
70 opts = {}
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 return composenormalfilematcher(match, manifest)
72 return composenormalfilematcher(match, manifest)
73 oldmatch = installmatchfn(overridematch)
73 oldmatch = installmatchfn(overridematch)
74
74
75 def installmatchfn(f):
75 def installmatchfn(f):
76 '''monkey patch the scmutil module with a custom match function.
76 '''monkey patch the scmutil module with a custom match function.
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 oldmatch = scmutil.match
78 oldmatch = scmutil.match
79 setattr(f, 'oldmatch', oldmatch)
79 setattr(f, 'oldmatch', oldmatch)
80 scmutil.match = f
80 scmutil.match = f
81 return oldmatch
81 return oldmatch
82
82
83 def restorematchfn():
83 def restorematchfn():
84 '''restores scmutil.match to what it was before installmatchfn
84 '''restores scmutil.match to what it was before installmatchfn
85 was called. no-op if scmutil.match is its original function.
85 was called. no-op if scmutil.match is its original function.
86
86
87 Note that n calls to installmatchfn will require n calls to
87 Note that n calls to installmatchfn will require n calls to
88 restore the original matchfn.'''
88 restore the original matchfn.'''
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90
90
91 def installmatchandpatsfn(f):
91 def installmatchandpatsfn(f):
92 oldmatchandpats = scmutil.matchandpats
92 oldmatchandpats = scmutil.matchandpats
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 scmutil.matchandpats = f
94 scmutil.matchandpats = f
95 return oldmatchandpats
95 return oldmatchandpats
96
96
97 def restorematchandpatsfn():
97 def restorematchandpatsfn():
98 '''restores scmutil.matchandpats to what it was before
98 '''restores scmutil.matchandpats to what it was before
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 is its original function.
100 is its original function.
101
101
102 Note that n calls to installmatchandpatsfn will require n calls
102 Note that n calls to installmatchandpatsfn will require n calls
103 to restore the original matchfn.'''
103 to restore the original matchfn.'''
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 scmutil.matchandpats)
105 scmutil.matchandpats)
106
106
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 large = opts.get('large')
108 large = opts.get('large')
109 lfsize = lfutil.getminsize(
109 lfsize = lfutil.getminsize(
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111
111
112 lfmatcher = None
112 lfmatcher = None
113 if lfutil.islfilesrepo(repo):
113 if lfutil.islfilesrepo(repo):
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 if lfpats:
115 if lfpats:
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117
117
118 lfnames = []
118 lfnames = []
119 m = matcher
119 m = matcher
120
120
121 wctx = repo[None]
121 wctx = repo[None]
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 exact = m.exact(f)
123 exact = m.exact(f)
124 lfile = lfutil.standin(f) in wctx
124 lfile = lfutil.standin(f) in wctx
125 nfile = f in wctx
125 nfile = f in wctx
126 exists = lfile or nfile
126 exists = lfile or nfile
127
127
128 # addremove in core gets fancy with the name, add doesn't
128 # addremove in core gets fancy with the name, add doesn't
129 if isaddremove:
129 if isaddremove:
130 name = m.uipath(f)
130 name = m.uipath(f)
131 else:
131 else:
132 name = m.rel(f)
132 name = m.rel(f)
133
133
134 # Don't warn the user when they attempt to add a normal tracked file.
134 # Don't warn the user when they attempt to add a normal tracked file.
135 # The normal add code will do that for us.
135 # The normal add code will do that for us.
136 if exact and exists:
136 if exact and exists:
137 if lfile:
137 if lfile:
138 ui.warn(_('%s already a largefile\n') % name)
138 ui.warn(_('%s already a largefile\n') % name)
139 continue
139 continue
140
140
141 if (exact or not exists) and not lfutil.isstandin(f):
141 if (exact or not exists) and not lfutil.isstandin(f):
142 # In case the file was removed previously, but not committed
142 # In case the file was removed previously, but not committed
143 # (issue3507)
143 # (issue3507)
144 if not repo.wvfs.exists(f):
144 if not repo.wvfs.exists(f):
145 continue
145 continue
146
146
147 abovemin = (lfsize and
147 abovemin = (lfsize and
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 lfnames.append(f)
150 lfnames.append(f)
151 if ui.verbose or not exact:
151 if ui.verbose or not exact:
152 ui.status(_('adding %s as a largefile\n') % name)
152 ui.status(_('adding %s as a largefile\n') % name)
153
153
154 bad = []
154 bad = []
155
155
156 # Need to lock, otherwise there could be a race condition between
156 # Need to lock, otherwise there could be a race condition between
157 # when standins are created and added to the repo.
157 # when standins are created and added to the repo.
158 with repo.wlock():
158 with repo.wlock():
159 if not opts.get('dry_run'):
159 if not opts.get('dry_run'):
160 standins = []
160 standins = []
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 for f in lfnames:
162 for f in lfnames:
163 standinname = lfutil.standin(f)
163 standinname = lfutil.standin(f)
164 lfutil.writestandin(repo, standinname, hash='',
164 lfutil.writestandin(repo, standinname, hash='',
165 executable=lfutil.getexecutable(repo.wjoin(f)))
165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 standins.append(standinname)
166 standins.append(standinname)
167 if lfdirstate[f] == 'r':
167 if lfdirstate[f] == 'r':
168 lfdirstate.normallookup(f)
168 lfdirstate.normallookup(f)
169 else:
169 else:
170 lfdirstate.add(f)
170 lfdirstate.add(f)
171 lfdirstate.write()
171 lfdirstate.write()
172 bad += [lfutil.splitstandin(f)
172 bad += [lfutil.splitstandin(f)
173 for f in repo[None].add(standins)
173 for f in repo[None].add(standins)
174 if f in m.files()]
174 if f in m.files()]
175
175
176 added = [f for f in lfnames if f not in bad]
176 added = [f for f in lfnames if f not in bad]
177 return added, bad
177 return added, bad
178
178
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 after = opts.get('after')
180 after = opts.get('after')
181 m = composelargefilematcher(matcher, repo[None].manifest())
181 m = composelargefilematcher(matcher, repo[None].manifest())
182 try:
182 try:
183 repo.lfstatus = True
183 repo.lfstatus = True
184 s = repo.status(match=m, clean=not isaddremove)
184 s = repo.status(match=m, clean=not isaddremove)
185 finally:
185 finally:
186 repo.lfstatus = False
186 repo.lfstatus = False
187 manifest = repo[None].manifest()
187 manifest = repo[None].manifest()
188 modified, added, deleted, clean = [[f for f in list
188 modified, added, deleted, clean = [[f for f in list
189 if lfutil.standin(f) in manifest]
189 if lfutil.standin(f) in manifest]
190 for list in (s.modified, s.added,
190 for list in (s.modified, s.added,
191 s.deleted, s.clean)]
191 s.deleted, s.clean)]
192
192
193 def warn(files, msg):
193 def warn(files, msg):
194 for f in files:
194 for f in files:
195 ui.warn(msg % m.rel(f))
195 ui.warn(msg % m.rel(f))
196 return int(len(files) > 0)
196 return int(len(files) > 0)
197
197
198 result = 0
198 result = 0
199
199
200 if after:
200 if after:
201 remove = deleted
201 remove = deleted
202 result = warn(modified + added + clean,
202 result = warn(modified + added + clean,
203 _('not removing %s: file still exists\n'))
203 _('not removing %s: file still exists\n'))
204 else:
204 else:
205 remove = deleted + clean
205 remove = deleted + clean
206 result = warn(modified, _('not removing %s: file is modified (use -f'
206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 ' to force removal)\n'))
207 ' to force removal)\n'))
208 result = warn(added, _('not removing %s: file has been marked for add'
208 result = warn(added, _('not removing %s: file has been marked for add'
209 ' (use forget to undo)\n')) or result
209 ' (use forget to undo)\n')) or result
210
210
211 # Need to lock because standin files are deleted then removed from the
211 # Need to lock because standin files are deleted then removed from the
212 # repository and we could race in-between.
212 # repository and we could race in-between.
213 with repo.wlock():
213 with repo.wlock():
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 for f in sorted(remove):
215 for f in sorted(remove):
216 if ui.verbose or not m.exact(f):
216 if ui.verbose or not m.exact(f):
217 # addremove in core gets fancy with the name, remove doesn't
217 # addremove in core gets fancy with the name, remove doesn't
218 if isaddremove:
218 if isaddremove:
219 name = m.uipath(f)
219 name = m.uipath(f)
220 else:
220 else:
221 name = m.rel(f)
221 name = m.rel(f)
222 ui.status(_('removing %s\n') % name)
222 ui.status(_('removing %s\n') % name)
223
223
224 if not opts.get('dry_run'):
224 if not opts.get('dry_run'):
225 if not after:
225 if not after:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227
227
228 if opts.get('dry_run'):
228 if opts.get('dry_run'):
229 return result
229 return result
230
230
231 remove = [lfutil.standin(f) for f in remove]
231 remove = [lfutil.standin(f) for f in remove]
232 # If this is being called by addremove, let the original addremove
232 # If this is being called by addremove, let the original addremove
233 # function handle this.
233 # function handle this.
234 if not isaddremove:
234 if not isaddremove:
235 for f in remove:
235 for f in remove:
236 repo.wvfs.unlinkpath(f, ignoremissing=True)
236 repo.wvfs.unlinkpath(f, ignoremissing=True)
237 repo[None].forget(remove)
237 repo[None].forget(remove)
238
238
239 for f in remove:
239 for f in remove:
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 False)
241 False)
242
242
243 lfdirstate.write()
243 lfdirstate.write()
244
244
245 return result
245 return result
246
246
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 # appear at their right place in the manifests.
248 # appear at their right place in the manifests.
249 def decodepath(orig, path):
249 def decodepath(orig, path):
250 return lfutil.splitstandin(path) or path
250 return lfutil.splitstandin(path) or path
251
251
252 # -- Wrappers: modify existing commands --------------------------------
252 # -- Wrappers: modify existing commands --------------------------------
253
253
254 def overrideadd(orig, ui, repo, *pats, **opts):
254 def overrideadd(orig, ui, repo, *pats, **opts):
255 if opts.get('normal') and opts.get('large'):
255 if opts.get('normal') and opts.get('large'):
256 raise error.Abort(_('--normal cannot be used with --large'))
256 raise error.Abort(_('--normal cannot be used with --large'))
257 return orig(ui, repo, *pats, **opts)
257 return orig(ui, repo, *pats, **opts)
258
258
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 # The --normal flag short circuits this override
260 # The --normal flag short circuits this override
261 if opts.get('normal'):
261 if opts.get('normal'):
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263
263
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 ladded)
266 ladded)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268
268
269 bad.extend(f for f in lbad)
269 bad.extend(f for f in lbad)
270 return bad
270 return bad
271
271
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 return removelargefiles(ui, repo, False, matcher, after=after,
275 return removelargefiles(ui, repo, False, matcher, after=after,
276 force=force) or result
276 force=force) or result
277
277
278 def overridestatusfn(orig, repo, rev2, **opts):
278 def overridestatusfn(orig, repo, rev2, **opts):
279 try:
279 try:
280 repo._repo.lfstatus = True
280 repo._repo.lfstatus = True
281 return orig(repo, rev2, **opts)
281 return orig(repo, rev2, **opts)
282 finally:
282 finally:
283 repo._repo.lfstatus = False
283 repo._repo.lfstatus = False
284
284
285 def overridestatus(orig, ui, repo, *pats, **opts):
285 def overridestatus(orig, ui, repo, *pats, **opts):
286 try:
286 try:
287 repo.lfstatus = True
287 repo.lfstatus = True
288 return orig(ui, repo, *pats, **opts)
288 return orig(ui, repo, *pats, **opts)
289 finally:
289 finally:
290 repo.lfstatus = False
290 repo.lfstatus = False
291
291
292 def overridedirty(orig, repo, ignoreupdate=False):
292 def overridedirty(orig, repo, ignoreupdate=False):
293 try:
293 try:
294 repo._repo.lfstatus = True
294 repo._repo.lfstatus = True
295 return orig(repo, ignoreupdate)
295 return orig(repo, ignoreupdate)
296 finally:
296 finally:
297 repo._repo.lfstatus = False
297 repo._repo.lfstatus = False
298
298
299 def overridelog(orig, ui, repo, *pats, **opts):
299 def overridelog(orig, ui, repo, *pats, **opts):
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 default='relpath', badfn=None):
301 default='relpath', badfn=None):
302 """Matcher that merges root directory with .hglf, suitable for log.
302 """Matcher that merges root directory with .hglf, suitable for log.
303 It is still possible to match .hglf directly.
303 It is still possible to match .hglf directly.
304 For any listed files run log on the standin too.
304 For any listed files run log on the standin too.
305 matchfn tries both the given filename and with .hglf stripped.
305 matchfn tries both the given filename and with .hglf stripped.
306 """
306 """
307 if opts is None:
307 if opts is None:
308 opts = {}
308 opts = {}
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 badfn=badfn)
310 badfn=badfn)
311 m, p = copy.copy(matchandpats)
311 m, p = copy.copy(matchandpats)
312
312
313 if m.always():
313 if m.always():
314 # We want to match everything anyway, so there's no benefit trying
314 # We want to match everything anyway, so there's no benefit trying
315 # to add standins.
315 # to add standins.
316 return matchandpats
316 return matchandpats
317
317
318 pats = set(p)
318 pats = set(p)
319
319
320 def fixpats(pat, tostandin=lfutil.standin):
320 def fixpats(pat, tostandin=lfutil.standin):
321 if pat.startswith('set:'):
321 if pat.startswith('set:'):
322 return pat
322 return pat
323
323
324 kindpat = matchmod._patsplit(pat, None)
324 kindpat = matchmod._patsplit(pat, None)
325
325
326 if kindpat[0] is not None:
326 if kindpat[0] is not None:
327 return kindpat[0] + ':' + tostandin(kindpat[1])
327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 return tostandin(kindpat[1])
328 return tostandin(kindpat[1])
329
329
330 if m._cwd:
330 if m._cwd:
331 hglf = lfutil.shortname
331 hglf = lfutil.shortname
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333
333
334 def tostandin(f):
334 def tostandin(f):
335 # The file may already be a standin, so truncate the back
335 # The file may already be a standin, so truncate the back
336 # prefix and test before mangling it. This avoids turning
336 # prefix and test before mangling it. This avoids turning
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 return f
339 return f
340
340
341 # An absolute path is from outside the repo, so truncate the
341 # An absolute path is from outside the repo, so truncate the
342 # path to the root before building the standin. Otherwise cwd
342 # path to the root before building the standin. Otherwise cwd
343 # is somewhere in the repo, relative to root, and needs to be
343 # is somewhere in the repo, relative to root, and needs to be
344 # prepended before building the standin.
344 # prepended before building the standin.
345 if os.path.isabs(m._cwd):
345 if os.path.isabs(m._cwd):
346 f = f[len(back):]
346 f = f[len(back):]
347 else:
347 else:
348 f = m._cwd + '/' + f
348 f = m._cwd + '/' + f
349 return back + lfutil.standin(f)
349 return back + lfutil.standin(f)
350
350
351 pats.update(fixpats(f, tostandin) for f in p)
351 pats.update(fixpats(f, tostandin) for f in p)
352 else:
352 else:
353 def tostandin(f):
353 def tostandin(f):
354 if lfutil.isstandin(f):
354 if lfutil.isstandin(f):
355 return f
355 return f
356 return lfutil.standin(f)
356 return lfutil.standin(f)
357 pats.update(fixpats(f, tostandin) for f in p)
357 pats.update(fixpats(f, tostandin) for f in p)
358
358
359 for i in range(0, len(m._files)):
359 for i in range(0, len(m._files)):
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 if m._files[i] == '.':
361 if m._files[i] == '.':
362 continue
362 continue
363 standin = lfutil.standin(m._files[i])
363 standin = lfutil.standin(m._files[i])
364 # If the "standin" is a directory, append instead of replace to
364 # If the "standin" is a directory, append instead of replace to
365 # support naming a directory on the command line with only
365 # support naming a directory on the command line with only
366 # largefiles. The original directory is kept to support normal
366 # largefiles. The original directory is kept to support normal
367 # files.
367 # files.
368 if standin in repo[ctx.node()]:
368 if standin in repo[ctx.node()]:
369 m._files[i] = standin
369 m._files[i] = standin
370 elif m._files[i] not in repo[ctx.node()] \
370 elif m._files[i] not in repo[ctx.node()] \
371 and repo.wvfs.isdir(standin):
371 and repo.wvfs.isdir(standin):
372 m._files.append(standin)
372 m._files.append(standin)
373
373
374 m._fileroots = set(m._files)
374 m._fileroots = set(m._files)
375 m._always = False
375 m._always = False
376 origmatchfn = m.matchfn
376 origmatchfn = m.matchfn
377 def lfmatchfn(f):
377 def lfmatchfn(f):
378 lf = lfutil.splitstandin(f)
378 lf = lfutil.splitstandin(f)
379 if lf is not None and origmatchfn(lf):
379 if lf is not None and origmatchfn(lf):
380 return True
380 return True
381 r = origmatchfn(f)
381 r = origmatchfn(f)
382 return r
382 return r
383 m.matchfn = lfmatchfn
383 m.matchfn = lfmatchfn
384
384
385 ui.debug('updated patterns: %s\n' % sorted(pats))
385 ui.debug('updated patterns: %s\n' % sorted(pats))
386 return m, pats
386 return m, pats
387
387
388 # For hg log --patch, the match object is used in two different senses:
388 # For hg log --patch, the match object is used in two different senses:
389 # (1) to determine what revisions should be printed out, and
389 # (1) to determine what revisions should be printed out, and
390 # (2) to determine what files to print out diffs for.
390 # (2) to determine what files to print out diffs for.
391 # The magic matchandpats override should be used for case (1) but not for
391 # The magic matchandpats override should be used for case (1) but not for
392 # case (2).
392 # case (2).
393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
394 wctx = repo[None]
394 wctx = repo[None]
395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
396 return lambda rev: match
396 return lambda rev: match
397
397
398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
401
401
402 try:
402 try:
403 return orig(ui, repo, *pats, **opts)
403 return orig(ui, repo, *pats, **opts)
404 finally:
404 finally:
405 restorematchandpatsfn()
405 restorematchandpatsfn()
406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
407
407
408 def overrideverify(orig, ui, repo, *pats, **opts):
408 def overrideverify(orig, ui, repo, *pats, **opts):
409 large = opts.pop('large', False)
409 large = opts.pop('large', False)
410 all = opts.pop('lfa', False)
410 all = opts.pop('lfa', False)
411 contents = opts.pop('lfc', False)
411 contents = opts.pop('lfc', False)
412
412
413 result = orig(ui, repo, *pats, **opts)
413 result = orig(ui, repo, *pats, **opts)
414 if large or all or contents:
414 if large or all or contents:
415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
416 return result
416 return result
417
417
418 def overridedebugstate(orig, ui, repo, *pats, **opts):
418 def overridedebugstate(orig, ui, repo, *pats, **opts):
419 large = opts.pop('large', False)
419 large = opts.pop('large', False)
420 if large:
420 if large:
421 class fakerepo(object):
421 class fakerepo(object):
422 dirstate = lfutil.openlfdirstate(ui, repo)
422 dirstate = lfutil.openlfdirstate(ui, repo)
423 orig(ui, fakerepo, *pats, **opts)
423 orig(ui, fakerepo, *pats, **opts)
424 else:
424 else:
425 orig(ui, repo, *pats, **opts)
425 orig(ui, repo, *pats, **opts)
426
426
427 # Before starting the manifest merge, merge.updates will call
427 # Before starting the manifest merge, merge.updates will call
428 # _checkunknownfile to check if there are any files in the merged-in
428 # _checkunknownfile to check if there are any files in the merged-in
429 # changeset that collide with unknown files in the working copy.
429 # changeset that collide with unknown files in the working copy.
430 #
430 #
431 # The largefiles are seen as unknown, so this prevents us from merging
431 # The largefiles are seen as unknown, so this prevents us from merging
432 # in a file 'foo' if we already have a largefile with the same name.
432 # in a file 'foo' if we already have a largefile with the same name.
433 #
433 #
434 # The overridden function filters the unknown files by removing any
434 # The overridden function filters the unknown files by removing any
435 # largefiles. This makes the merge proceed and we can then handle this
435 # largefiles. This makes the merge proceed and we can then handle this
436 # case further in the overridden calculateupdates function below.
436 # case further in the overridden calculateupdates function below.
437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
439 return False
439 return False
440 return origfn(repo, wctx, mctx, f, f2)
440 return origfn(repo, wctx, mctx, f, f2)
441
441
442 # The manifest merge handles conflicts on the manifest level. We want
442 # The manifest merge handles conflicts on the manifest level. We want
443 # to handle changes in largefile-ness of files at this level too.
443 # to handle changes in largefile-ness of files at this level too.
444 #
444 #
445 # The strategy is to run the original calculateupdates and then process
445 # The strategy is to run the original calculateupdates and then process
446 # the action list it outputs. There are two cases we need to deal with:
446 # the action list it outputs. There are two cases we need to deal with:
447 #
447 #
448 # 1. Normal file in p1, largefile in p2. Here the largefile is
448 # 1. Normal file in p1, largefile in p2. Here the largefile is
449 # detected via its standin file, which will enter the working copy
449 # detected via its standin file, which will enter the working copy
450 # with a "get" action. It is not "merge" since the standin is all
450 # with a "get" action. It is not "merge" since the standin is all
451 # Mercurial is concerned with at this level -- the link to the
451 # Mercurial is concerned with at this level -- the link to the
452 # existing normal file is not relevant here.
452 # existing normal file is not relevant here.
453 #
453 #
454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
455 # since the largefile will be present in the working copy and
455 # since the largefile will be present in the working copy and
456 # different from the normal file in p2. Mercurial therefore
456 # different from the normal file in p2. Mercurial therefore
457 # triggers a merge action.
457 # triggers a merge action.
458 #
458 #
459 # In both cases, we prompt the user and emit new actions to either
459 # In both cases, we prompt the user and emit new actions to either
460 # remove the standin (if the normal file was kept) or to remove the
460 # remove the standin (if the normal file was kept) or to remove the
461 # normal file and get the standin (if the largefile was kept). The
461 # normal file and get the standin (if the largefile was kept). The
462 # default prompt answer is to use the largefile version since it was
462 # default prompt answer is to use the largefile version since it was
463 # presumably changed on purpose.
463 # presumably changed on purpose.
464 #
464 #
465 # Finally, the merge.applyupdates function will then take care of
465 # Finally, the merge.applyupdates function will then take care of
466 # writing the files into the working copy and lfcommands.updatelfiles
466 # writing the files into the working copy and lfcommands.updatelfiles
467 # will update the largefiles.
467 # will update the largefiles.
468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
469 acceptremote, *args, **kwargs):
469 acceptremote, *args, **kwargs):
470 overwrite = force and not branchmerge
470 overwrite = force and not branchmerge
471 actions, diverge, renamedelete = origfn(
471 actions, diverge, renamedelete = origfn(
472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
473
473
474 if overwrite:
474 if overwrite:
475 return actions, diverge, renamedelete
475 return actions, diverge, renamedelete
476
476
477 # Convert to dictionary with filename as key and action as value.
477 # Convert to dictionary with filename as key and action as value.
478 lfiles = set()
478 lfiles = set()
479 for f in actions:
479 for f in actions:
480 splitstandin = lfutil.splitstandin(f)
480 splitstandin = lfutil.splitstandin(f)
481 if splitstandin in p1:
481 if splitstandin in p1:
482 lfiles.add(splitstandin)
482 lfiles.add(splitstandin)
483 elif lfutil.standin(f) in p1:
483 elif lfutil.standin(f) in p1:
484 lfiles.add(f)
484 lfiles.add(f)
485
485
486 for lfile in sorted(lfiles):
486 for lfile in sorted(lfiles):
487 standin = lfutil.standin(lfile)
487 standin = lfutil.standin(lfile)
488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
490 if sm in ('g', 'dc') and lm != 'r':
490 if sm in ('g', 'dc') and lm != 'r':
491 if sm == 'dc':
491 if sm == 'dc':
492 f1, f2, fa, move, anc = sargs
492 f1, f2, fa, move, anc = sargs
493 sargs = (p2[f2].flags(), False)
493 sargs = (p2[f2].flags(), False)
494 # Case 1: normal file in the working copy, largefile in
494 # Case 1: normal file in the working copy, largefile in
495 # the second parent
495 # the second parent
496 usermsg = _('remote turned local normal file %s into a largefile\n'
496 usermsg = _('remote turned local normal file %s into a largefile\n'
497 'use (l)argefile or keep (n)ormal file?'
497 'use (l)argefile or keep (n)ormal file?'
498 '$$ &Largefile $$ &Normal file') % lfile
498 '$$ &Largefile $$ &Normal file') % lfile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
500 actions[lfile] = ('r', None, 'replaced by standin')
500 actions[lfile] = ('r', None, 'replaced by standin')
501 actions[standin] = ('g', sargs, 'replaces standin')
501 actions[standin] = ('g', sargs, 'replaces standin')
502 else: # keep local normal file
502 else: # keep local normal file
503 actions[lfile] = ('k', None, 'replaces standin')
503 actions[lfile] = ('k', None, 'replaces standin')
504 if branchmerge:
504 if branchmerge:
505 actions[standin] = ('k', None, 'replaced by non-standin')
505 actions[standin] = ('k', None, 'replaced by non-standin')
506 else:
506 else:
507 actions[standin] = ('r', None, 'replaced by non-standin')
507 actions[standin] = ('r', None, 'replaced by non-standin')
508 elif lm in ('g', 'dc') and sm != 'r':
508 elif lm in ('g', 'dc') and sm != 'r':
509 if lm == 'dc':
509 if lm == 'dc':
510 f1, f2, fa, move, anc = largs
510 f1, f2, fa, move, anc = largs
511 largs = (p2[f2].flags(), False)
511 largs = (p2[f2].flags(), False)
512 # Case 2: largefile in the working copy, normal file in
512 # Case 2: largefile in the working copy, normal file in
513 # the second parent
513 # the second parent
514 usermsg = _('remote turned local largefile %s into a normal file\n'
514 usermsg = _('remote turned local largefile %s into a normal file\n'
515 'keep (l)argefile or use (n)ormal file?'
515 'keep (l)argefile or use (n)ormal file?'
516 '$$ &Largefile $$ &Normal file') % lfile
516 '$$ &Largefile $$ &Normal file') % lfile
517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
518 if branchmerge:
518 if branchmerge:
519 # largefile can be restored from standin safely
519 # largefile can be restored from standin safely
520 actions[lfile] = ('k', None, 'replaced by standin')
520 actions[lfile] = ('k', None, 'replaced by standin')
521 actions[standin] = ('k', None, 'replaces standin')
521 actions[standin] = ('k', None, 'replaces standin')
522 else:
522 else:
523 # "lfile" should be marked as "removed" without
523 # "lfile" should be marked as "removed" without
524 # removal of itself
524 # removal of itself
525 actions[lfile] = ('lfmr', None,
525 actions[lfile] = ('lfmr', None,
526 'forget non-standin largefile')
526 'forget non-standin largefile')
527
527
528 # linear-merge should treat this largefile as 're-added'
528 # linear-merge should treat this largefile as 're-added'
529 actions[standin] = ('a', None, 'keep standin')
529 actions[standin] = ('a', None, 'keep standin')
530 else: # pick remote normal file
530 else: # pick remote normal file
531 actions[lfile] = ('g', largs, 'replaces standin')
531 actions[lfile] = ('g', largs, 'replaces standin')
532 actions[standin] = ('r', None, 'replaced by non-standin')
532 actions[standin] = ('r', None, 'replaced by non-standin')
533
533
534 return actions, diverge, renamedelete
534 return actions, diverge, renamedelete
535
535
536 def mergerecordupdates(orig, repo, actions, branchmerge):
536 def mergerecordupdates(orig, repo, actions, branchmerge):
537 if 'lfmr' in actions:
537 if 'lfmr' in actions:
538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
539 for lfile, args, msg in actions['lfmr']:
539 for lfile, args, msg in actions['lfmr']:
540 # this should be executed before 'orig', to execute 'remove'
540 # this should be executed before 'orig', to execute 'remove'
541 # before all other actions
541 # before all other actions
542 repo.dirstate.remove(lfile)
542 repo.dirstate.remove(lfile)
543 # make sure lfile doesn't get synclfdirstate'd as normal
543 # make sure lfile doesn't get synclfdirstate'd as normal
544 lfdirstate.add(lfile)
544 lfdirstate.add(lfile)
545 lfdirstate.write()
545 lfdirstate.write()
546
546
547 return orig(repo, actions, branchmerge)
547 return orig(repo, actions, branchmerge)
548
548
549 # Override filemerge to prompt the user about how they wish to merge
549 # Override filemerge to prompt the user about how they wish to merge
550 # largefiles. This will handle identical edits without prompting the user.
550 # largefiles. This will handle identical edits without prompting the user.
551 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
551 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
552 labels=None):
552 labels=None):
553 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
553 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
554 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
554 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
555 labels=labels)
555 labels=labels)
556
556
557 ahash = fca.data().strip().lower()
557 ahash = fca.data().strip().lower()
558 dhash = fcd.data().strip().lower()
558 dhash = fcd.data().strip().lower()
559 ohash = fco.data().strip().lower()
559 ohash = fco.data().strip().lower()
560 if (ohash != ahash and
560 if (ohash != ahash and
561 ohash != dhash and
561 ohash != dhash and
562 (dhash == ahash or
562 (dhash == ahash or
563 repo.ui.promptchoice(
563 repo.ui.promptchoice(
564 _('largefile %s has a merge conflict\nancestor was %s\n'
564 _('largefile %s has a merge conflict\nancestor was %s\n'
565 'keep (l)ocal %s or\ntake (o)ther %s?'
565 'keep (l)ocal %s or\ntake (o)ther %s?'
566 '$$ &Local $$ &Other') %
566 '$$ &Local $$ &Other') %
567 (lfutil.splitstandin(orig), ahash, dhash, ohash),
567 (lfutil.splitstandin(orig), ahash, dhash, ohash),
568 0) == 1)):
568 0) == 1)):
569 repo.wwrite(fcd.path(), fco.data(), fco.flags())
569 repo.wwrite(fcd.path(), fco.data(), fco.flags())
570 return True, 0, False
570 return True, 0, False
571
571
572 def copiespathcopies(orig, ctx1, ctx2, match=None):
572 def copiespathcopies(orig, ctx1, ctx2, match=None):
573 copies = orig(ctx1, ctx2, match=match)
573 copies = orig(ctx1, ctx2, match=match)
574 updated = {}
574 updated = {}
575
575
576 for k, v in copies.iteritems():
576 for k, v in copies.iteritems():
577 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
577 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
578
578
579 return updated
579 return updated
580
580
581 # Copy first changes the matchers to match standins instead of
581 # Copy first changes the matchers to match standins instead of
582 # largefiles. Then it overrides util.copyfile in that function it
582 # largefiles. Then it overrides util.copyfile in that function it
583 # checks if the destination largefile already exists. It also keeps a
583 # checks if the destination largefile already exists. It also keeps a
584 # list of copied files so that the largefiles can be copied and the
584 # list of copied files so that the largefiles can be copied and the
585 # dirstate updated.
585 # dirstate updated.
586 def overridecopy(orig, ui, repo, pats, opts, rename=False):
586 def overridecopy(orig, ui, repo, pats, opts, rename=False):
587 # doesn't remove largefile on rename
587 # doesn't remove largefile on rename
588 if len(pats) < 2:
588 if len(pats) < 2:
589 # this isn't legal, let the original function deal with it
589 # this isn't legal, let the original function deal with it
590 return orig(ui, repo, pats, opts, rename)
590 return orig(ui, repo, pats, opts, rename)
591
591
592 # This could copy both lfiles and normal files in one command,
592 # This could copy both lfiles and normal files in one command,
593 # but we don't want to do that. First replace their matcher to
593 # but we don't want to do that. First replace their matcher to
594 # only match normal files and run it, then replace it to just
594 # only match normal files and run it, then replace it to just
595 # match largefiles and run it again.
595 # match largefiles and run it again.
596 nonormalfiles = False
596 nonormalfiles = False
597 nolfiles = False
597 nolfiles = False
598 installnormalfilesmatchfn(repo[None].manifest())
598 installnormalfilesmatchfn(repo[None].manifest())
599 try:
599 try:
600 result = orig(ui, repo, pats, opts, rename)
600 result = orig(ui, repo, pats, opts, rename)
601 except error.Abort as e:
601 except error.Abort as e:
602 if str(e) != _('no files to copy'):
602 if str(e) != _('no files to copy'):
603 raise e
603 raise e
604 else:
604 else:
605 nonormalfiles = True
605 nonormalfiles = True
606 result = 0
606 result = 0
607 finally:
607 finally:
608 restorematchfn()
608 restorematchfn()
609
609
610 # The first rename can cause our current working directory to be removed.
610 # The first rename can cause our current working directory to be removed.
611 # In that case there is nothing left to copy/rename so just quit.
611 # In that case there is nothing left to copy/rename so just quit.
612 try:
612 try:
613 repo.getcwd()
613 repo.getcwd()
614 except OSError:
614 except OSError:
615 return result
615 return result
616
616
617 def makestandin(relpath):
617 def makestandin(relpath):
618 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
618 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
619 return repo.wvfs.join(lfutil.standin(path))
619 return repo.wvfs.join(lfutil.standin(path))
620
620
621 fullpats = scmutil.expandpats(pats)
621 fullpats = scmutil.expandpats(pats)
622 dest = fullpats[-1]
622 dest = fullpats[-1]
623
623
624 if os.path.isdir(dest):
624 if os.path.isdir(dest):
625 if not os.path.isdir(makestandin(dest)):
625 if not os.path.isdir(makestandin(dest)):
626 os.makedirs(makestandin(dest))
626 os.makedirs(makestandin(dest))
627
627
628 try:
628 try:
629 # When we call orig below it creates the standins but we don't add
629 # When we call orig below it creates the standins but we don't add
630 # them to the dir state until later so lock during that time.
630 # them to the dir state until later so lock during that time.
631 wlock = repo.wlock()
631 wlock = repo.wlock()
632
632
633 manifest = repo[None].manifest()
633 manifest = repo[None].manifest()
634 def overridematch(ctx, pats=(), opts=None, globbed=False,
634 def overridematch(ctx, pats=(), opts=None, globbed=False,
635 default='relpath', badfn=None):
635 default='relpath', badfn=None):
636 if opts is None:
636 if opts is None:
637 opts = {}
637 opts = {}
638 newpats = []
638 newpats = []
639 # The patterns were previously mangled to add the standin
639 # The patterns were previously mangled to add the standin
640 # directory; we need to remove that now
640 # directory; we need to remove that now
641 for pat in pats:
641 for pat in pats:
642 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
642 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
643 newpats.append(pat.replace(lfutil.shortname, ''))
643 newpats.append(pat.replace(lfutil.shortname, ''))
644 else:
644 else:
645 newpats.append(pat)
645 newpats.append(pat)
646 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
646 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
647 m = copy.copy(match)
647 m = copy.copy(match)
648 lfile = lambda f: lfutil.standin(f) in manifest
648 lfile = lambda f: lfutil.standin(f) in manifest
649 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
649 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
650 m._fileroots = set(m._files)
650 m._fileroots = set(m._files)
651 origmatchfn = m.matchfn
651 origmatchfn = m.matchfn
652 def matchfn(f):
652 def matchfn(f):
653 lfile = lfutil.splitstandin(f)
653 lfile = lfutil.splitstandin(f)
654 return (lfile is not None and
654 return (lfile is not None and
655 (f in manifest) and
655 (f in manifest) and
656 origmatchfn(lfile) or
656 origmatchfn(lfile) or
657 None)
657 None)
658 m.matchfn = matchfn
658 m.matchfn = matchfn
659 return m
659 return m
660 oldmatch = installmatchfn(overridematch)
660 oldmatch = installmatchfn(overridematch)
661 listpats = []
661 listpats = []
662 for pat in pats:
662 for pat in pats:
663 if matchmod.patkind(pat) is not None:
663 if matchmod.patkind(pat) is not None:
664 listpats.append(pat)
664 listpats.append(pat)
665 else:
665 else:
666 listpats.append(makestandin(pat))
666 listpats.append(makestandin(pat))
667
667
668 try:
668 try:
669 origcopyfile = util.copyfile
669 origcopyfile = util.copyfile
670 copiedfiles = []
670 copiedfiles = []
671 def overridecopyfile(src, dest):
671 def overridecopyfile(src, dest):
672 if (lfutil.shortname in src and
672 if (lfutil.shortname in src and
673 dest.startswith(repo.wjoin(lfutil.shortname))):
673 dest.startswith(repo.wjoin(lfutil.shortname))):
674 destlfile = dest.replace(lfutil.shortname, '')
674 destlfile = dest.replace(lfutil.shortname, '')
675 if not opts['force'] and os.path.exists(destlfile):
675 if not opts['force'] and os.path.exists(destlfile):
676 raise IOError('',
676 raise IOError('',
677 _('destination largefile already exists'))
677 _('destination largefile already exists'))
678 copiedfiles.append((src, dest))
678 copiedfiles.append((src, dest))
679 origcopyfile(src, dest)
679 origcopyfile(src, dest)
680
680
681 util.copyfile = overridecopyfile
681 util.copyfile = overridecopyfile
682 result += orig(ui, repo, listpats, opts, rename)
682 result += orig(ui, repo, listpats, opts, rename)
683 finally:
683 finally:
684 util.copyfile = origcopyfile
684 util.copyfile = origcopyfile
685
685
686 lfdirstate = lfutil.openlfdirstate(ui, repo)
686 lfdirstate = lfutil.openlfdirstate(ui, repo)
687 for (src, dest) in copiedfiles:
687 for (src, dest) in copiedfiles:
688 if (lfutil.shortname in src and
688 if (lfutil.shortname in src and
689 dest.startswith(repo.wjoin(lfutil.shortname))):
689 dest.startswith(repo.wjoin(lfutil.shortname))):
690 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
690 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
691 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
691 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
692 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
692 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
693 if not os.path.isdir(destlfiledir):
693 if not os.path.isdir(destlfiledir):
694 os.makedirs(destlfiledir)
694 os.makedirs(destlfiledir)
695 if rename:
695 if rename:
696 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
696 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
697
697
698 # The file is gone, but this deletes any empty parent
698 # The file is gone, but this deletes any empty parent
699 # directories as a side-effect.
699 # directories as a side-effect.
700 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
700 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
701 lfdirstate.remove(srclfile)
701 lfdirstate.remove(srclfile)
702 else:
702 else:
703 util.copyfile(repo.wjoin(srclfile),
703 util.copyfile(repo.wjoin(srclfile),
704 repo.wjoin(destlfile))
704 repo.wjoin(destlfile))
705
705
706 lfdirstate.add(destlfile)
706 lfdirstate.add(destlfile)
707 lfdirstate.write()
707 lfdirstate.write()
708 except error.Abort as e:
708 except error.Abort as e:
709 if str(e) != _('no files to copy'):
709 if str(e) != _('no files to copy'):
710 raise e
710 raise e
711 else:
711 else:
712 nolfiles = True
712 nolfiles = True
713 finally:
713 finally:
714 restorematchfn()
714 restorematchfn()
715 wlock.release()
715 wlock.release()
716
716
717 if nolfiles and nonormalfiles:
717 if nolfiles and nonormalfiles:
718 raise error.Abort(_('no files to copy'))
718 raise error.Abort(_('no files to copy'))
719
719
720 return result
720 return result
721
721
722 # When the user calls revert, we have to be careful to not revert any
722 # When the user calls revert, we have to be careful to not revert any
723 # changes to other largefiles accidentally. This means we have to keep
723 # changes to other largefiles accidentally. This means we have to keep
724 # track of the largefiles that are being reverted so we only pull down
724 # track of the largefiles that are being reverted so we only pull down
725 # the necessary largefiles.
725 # the necessary largefiles.
726 #
726 #
727 # Standins are only updated (to match the hash of largefiles) before
727 # Standins are only updated (to match the hash of largefiles) before
728 # commits. Update the standins then run the original revert, changing
728 # commits. Update the standins then run the original revert, changing
729 # the matcher to hit standins instead of largefiles. Based on the
729 # the matcher to hit standins instead of largefiles. Based on the
730 # resulting standins update the largefiles.
730 # resulting standins update the largefiles.
731 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
731 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
732 # Because we put the standins in a bad state (by updating them)
732 # Because we put the standins in a bad state (by updating them)
733 # and then return them to a correct state we need to lock to
733 # and then return them to a correct state we need to lock to
734 # prevent others from changing them in their incorrect state.
734 # prevent others from changing them in their incorrect state.
735 with repo.wlock():
735 with repo.wlock():
736 lfdirstate = lfutil.openlfdirstate(ui, repo)
736 lfdirstate = lfutil.openlfdirstate(ui, repo)
737 s = lfutil.lfdirstatestatus(lfdirstate, repo)
737 s = lfutil.lfdirstatestatus(lfdirstate, repo)
738 lfdirstate.write()
738 lfdirstate.write()
739 for lfile in s.modified:
739 for lfile in s.modified:
740 lfutil.updatestandin(repo, lfutil.standin(lfile))
740 lfutil.updatestandin(repo, lfutil.standin(lfile))
741 for lfile in s.deleted:
741 for lfile in s.deleted:
742 if (repo.wvfs.exists(lfutil.standin(lfile))):
742 fstandin = lfutil.standin(lfile)
743 repo.wvfs.unlink(lfutil.standin(lfile))
743 if (repo.wvfs.exists(fstandin)):
744 repo.wvfs.unlink(fstandin)
744
745
745 oldstandins = lfutil.getstandinsstate(repo)
746 oldstandins = lfutil.getstandinsstate(repo)
746
747
747 def overridematch(mctx, pats=(), opts=None, globbed=False,
748 def overridematch(mctx, pats=(), opts=None, globbed=False,
748 default='relpath', badfn=None):
749 default='relpath', badfn=None):
749 if opts is None:
750 if opts is None:
750 opts = {}
751 opts = {}
751 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
752 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
752 m = copy.copy(match)
753 m = copy.copy(match)
753
754
754 # revert supports recursing into subrepos, and though largefiles
755 # revert supports recursing into subrepos, and though largefiles
755 # currently doesn't work correctly in that case, this match is
756 # currently doesn't work correctly in that case, this match is
756 # called, so the lfdirstate above may not be the correct one for
757 # called, so the lfdirstate above may not be the correct one for
757 # this invocation of match.
758 # this invocation of match.
758 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
759 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
759 False)
760 False)
760
761
761 def tostandin(f):
762 def tostandin(f):
762 standin = lfutil.standin(f)
763 standin = lfutil.standin(f)
763 if standin in ctx or standin in mctx:
764 if standin in ctx or standin in mctx:
764 return standin
765 return standin
765 elif standin in repo[None] or lfdirstate[f] == 'r':
766 elif standin in repo[None] or lfdirstate[f] == 'r':
766 return None
767 return None
767 return f
768 return f
768 m._files = [tostandin(f) for f in m._files]
769 m._files = [tostandin(f) for f in m._files]
769 m._files = [f for f in m._files if f is not None]
770 m._files = [f for f in m._files if f is not None]
770 m._fileroots = set(m._files)
771 m._fileroots = set(m._files)
771 origmatchfn = m.matchfn
772 origmatchfn = m.matchfn
772 def matchfn(f):
773 def matchfn(f):
773 lfile = lfutil.splitstandin(f)
774 lfile = lfutil.splitstandin(f)
774 if lfile is not None:
775 if lfile is not None:
775 return (origmatchfn(lfile) and
776 return (origmatchfn(lfile) and
776 (f in ctx or f in mctx))
777 (f in ctx or f in mctx))
777 return origmatchfn(f)
778 return origmatchfn(f)
778 m.matchfn = matchfn
779 m.matchfn = matchfn
779 return m
780 return m
780 oldmatch = installmatchfn(overridematch)
781 oldmatch = installmatchfn(overridematch)
781 try:
782 try:
782 orig(ui, repo, ctx, parents, *pats, **opts)
783 orig(ui, repo, ctx, parents, *pats, **opts)
783 finally:
784 finally:
784 restorematchfn()
785 restorematchfn()
785
786
786 newstandins = lfutil.getstandinsstate(repo)
787 newstandins = lfutil.getstandinsstate(repo)
787 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
788 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
788 # lfdirstate should be 'normallookup'-ed for updated files,
789 # lfdirstate should be 'normallookup'-ed for updated files,
789 # because reverting doesn't touch dirstate for 'normal' files
790 # because reverting doesn't touch dirstate for 'normal' files
790 # when target revision is explicitly specified: in such case,
791 # when target revision is explicitly specified: in such case,
791 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
792 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
792 # of target (standin) file.
793 # of target (standin) file.
793 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
794 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
794 normallookup=True)
795 normallookup=True)
795
796
796 # after pulling changesets, we need to take some extra care to get
797 # after pulling changesets, we need to take some extra care to get
797 # largefiles updated remotely
798 # largefiles updated remotely
798 def overridepull(orig, ui, repo, source=None, **opts):
799 def overridepull(orig, ui, repo, source=None, **opts):
799 revsprepull = len(repo)
800 revsprepull = len(repo)
800 if not source:
801 if not source:
801 source = 'default'
802 source = 'default'
802 repo.lfpullsource = source
803 repo.lfpullsource = source
803 result = orig(ui, repo, source, **opts)
804 result = orig(ui, repo, source, **opts)
804 revspostpull = len(repo)
805 revspostpull = len(repo)
805 lfrevs = opts.get('lfrev', [])
806 lfrevs = opts.get('lfrev', [])
806 if opts.get('all_largefiles'):
807 if opts.get('all_largefiles'):
807 lfrevs.append('pulled()')
808 lfrevs.append('pulled()')
808 if lfrevs and revspostpull > revsprepull:
809 if lfrevs and revspostpull > revsprepull:
809 numcached = 0
810 numcached = 0
810 repo.firstpulled = revsprepull # for pulled() revset expression
811 repo.firstpulled = revsprepull # for pulled() revset expression
811 try:
812 try:
812 for rev in scmutil.revrange(repo, lfrevs):
813 for rev in scmutil.revrange(repo, lfrevs):
813 ui.note(_('pulling largefiles for revision %s\n') % rev)
814 ui.note(_('pulling largefiles for revision %s\n') % rev)
814 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
815 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
815 numcached += len(cached)
816 numcached += len(cached)
816 finally:
817 finally:
817 del repo.firstpulled
818 del repo.firstpulled
818 ui.status(_("%d largefiles cached\n") % numcached)
819 ui.status(_("%d largefiles cached\n") % numcached)
819 return result
820 return result
820
821
821 def overridepush(orig, ui, repo, *args, **kwargs):
822 def overridepush(orig, ui, repo, *args, **kwargs):
822 """Override push command and store --lfrev parameters in opargs"""
823 """Override push command and store --lfrev parameters in opargs"""
823 lfrevs = kwargs.pop('lfrev', None)
824 lfrevs = kwargs.pop('lfrev', None)
824 if lfrevs:
825 if lfrevs:
825 opargs = kwargs.setdefault('opargs', {})
826 opargs = kwargs.setdefault('opargs', {})
826 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
827 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
827 return orig(ui, repo, *args, **kwargs)
828 return orig(ui, repo, *args, **kwargs)
828
829
829 def exchangepushoperation(orig, *args, **kwargs):
830 def exchangepushoperation(orig, *args, **kwargs):
830 """Override pushoperation constructor and store lfrevs parameter"""
831 """Override pushoperation constructor and store lfrevs parameter"""
831 lfrevs = kwargs.pop('lfrevs', None)
832 lfrevs = kwargs.pop('lfrevs', None)
832 pushop = orig(*args, **kwargs)
833 pushop = orig(*args, **kwargs)
833 pushop.lfrevs = lfrevs
834 pushop.lfrevs = lfrevs
834 return pushop
835 return pushop
835
836
836 revsetpredicate = registrar.revsetpredicate()
837 revsetpredicate = registrar.revsetpredicate()
837
838
838 @revsetpredicate('pulled()')
839 @revsetpredicate('pulled()')
839 def pulledrevsetsymbol(repo, subset, x):
840 def pulledrevsetsymbol(repo, subset, x):
840 """Changesets that just has been pulled.
841 """Changesets that just has been pulled.
841
842
842 Only available with largefiles from pull --lfrev expressions.
843 Only available with largefiles from pull --lfrev expressions.
843
844
844 .. container:: verbose
845 .. container:: verbose
845
846
846 Some examples:
847 Some examples:
847
848
848 - pull largefiles for all new changesets::
849 - pull largefiles for all new changesets::
849
850
850 hg pull -lfrev "pulled()"
851 hg pull -lfrev "pulled()"
851
852
852 - pull largefiles for all new branch heads::
853 - pull largefiles for all new branch heads::
853
854
854 hg pull -lfrev "head(pulled()) and not closed()"
855 hg pull -lfrev "head(pulled()) and not closed()"
855
856
856 """
857 """
857
858
858 try:
859 try:
859 firstpulled = repo.firstpulled
860 firstpulled = repo.firstpulled
860 except AttributeError:
861 except AttributeError:
861 raise error.Abort(_("pulled() only available in --lfrev"))
862 raise error.Abort(_("pulled() only available in --lfrev"))
862 return smartset.baseset([r for r in subset if r >= firstpulled])
863 return smartset.baseset([r for r in subset if r >= firstpulled])
863
864
864 def overrideclone(orig, ui, source, dest=None, **opts):
865 def overrideclone(orig, ui, source, dest=None, **opts):
865 d = dest
866 d = dest
866 if d is None:
867 if d is None:
867 d = hg.defaultdest(source)
868 d = hg.defaultdest(source)
868 if opts.get('all_largefiles') and not hg.islocal(d):
869 if opts.get('all_largefiles') and not hg.islocal(d):
869 raise error.Abort(_(
870 raise error.Abort(_(
870 '--all-largefiles is incompatible with non-local destination %s') %
871 '--all-largefiles is incompatible with non-local destination %s') %
871 d)
872 d)
872
873
873 return orig(ui, source, dest, **opts)
874 return orig(ui, source, dest, **opts)
874
875
875 def hgclone(orig, ui, opts, *args, **kwargs):
876 def hgclone(orig, ui, opts, *args, **kwargs):
876 result = orig(ui, opts, *args, **kwargs)
877 result = orig(ui, opts, *args, **kwargs)
877
878
878 if result is not None:
879 if result is not None:
879 sourcerepo, destrepo = result
880 sourcerepo, destrepo = result
880 repo = destrepo.local()
881 repo = destrepo.local()
881
882
882 # When cloning to a remote repo (like through SSH), no repo is available
883 # When cloning to a remote repo (like through SSH), no repo is available
883 # from the peer. Therefore the largefiles can't be downloaded and the
884 # from the peer. Therefore the largefiles can't be downloaded and the
884 # hgrc can't be updated.
885 # hgrc can't be updated.
885 if not repo:
886 if not repo:
886 return result
887 return result
887
888
888 # If largefiles is required for this repo, permanently enable it locally
889 # If largefiles is required for this repo, permanently enable it locally
889 if 'largefiles' in repo.requirements:
890 if 'largefiles' in repo.requirements:
890 with repo.vfs('hgrc', 'a', text=True) as fp:
891 with repo.vfs('hgrc', 'a', text=True) as fp:
891 fp.write('\n[extensions]\nlargefiles=\n')
892 fp.write('\n[extensions]\nlargefiles=\n')
892
893
893 # Caching is implicitly limited to 'rev' option, since the dest repo was
894 # Caching is implicitly limited to 'rev' option, since the dest repo was
894 # truncated at that point. The user may expect a download count with
895 # truncated at that point. The user may expect a download count with
895 # this option, so attempt whether or not this is a largefile repo.
896 # this option, so attempt whether or not this is a largefile repo.
896 if opts.get('all_largefiles'):
897 if opts.get('all_largefiles'):
897 success, missing = lfcommands.downloadlfiles(ui, repo, None)
898 success, missing = lfcommands.downloadlfiles(ui, repo, None)
898
899
899 if missing != 0:
900 if missing != 0:
900 return None
901 return None
901
902
902 return result
903 return result
903
904
904 def overriderebase(orig, ui, repo, **opts):
905 def overriderebase(orig, ui, repo, **opts):
905 if not util.safehasattr(repo, '_largefilesenabled'):
906 if not util.safehasattr(repo, '_largefilesenabled'):
906 return orig(ui, repo, **opts)
907 return orig(ui, repo, **opts)
907
908
908 resuming = opts.get('continue')
909 resuming = opts.get('continue')
909 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
910 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
910 repo._lfstatuswriters.append(lambda *msg, **opts: None)
911 repo._lfstatuswriters.append(lambda *msg, **opts: None)
911 try:
912 try:
912 return orig(ui, repo, **opts)
913 return orig(ui, repo, **opts)
913 finally:
914 finally:
914 repo._lfstatuswriters.pop()
915 repo._lfstatuswriters.pop()
915 repo._lfcommithooks.pop()
916 repo._lfcommithooks.pop()
916
917
917 def overridearchivecmd(orig, ui, repo, dest, **opts):
918 def overridearchivecmd(orig, ui, repo, dest, **opts):
918 repo.unfiltered().lfstatus = True
919 repo.unfiltered().lfstatus = True
919
920
920 try:
921 try:
921 return orig(ui, repo.unfiltered(), dest, **opts)
922 return orig(ui, repo.unfiltered(), dest, **opts)
922 finally:
923 finally:
923 repo.unfiltered().lfstatus = False
924 repo.unfiltered().lfstatus = False
924
925
925 def hgwebarchive(orig, web, req, tmpl):
926 def hgwebarchive(orig, web, req, tmpl):
926 web.repo.lfstatus = True
927 web.repo.lfstatus = True
927
928
928 try:
929 try:
929 return orig(web, req, tmpl)
930 return orig(web, req, tmpl)
930 finally:
931 finally:
931 web.repo.lfstatus = False
932 web.repo.lfstatus = False
932
933
933 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
934 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
934 prefix='', mtime=None, subrepos=None):
935 prefix='', mtime=None, subrepos=None):
935 # For some reason setting repo.lfstatus in hgwebarchive only changes the
936 # For some reason setting repo.lfstatus in hgwebarchive only changes the
936 # unfiltered repo's attr, so check that as well.
937 # unfiltered repo's attr, so check that as well.
937 if not repo.lfstatus and not repo.unfiltered().lfstatus:
938 if not repo.lfstatus and not repo.unfiltered().lfstatus:
938 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
939 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
939 subrepos)
940 subrepos)
940
941
941 # No need to lock because we are only reading history and
942 # No need to lock because we are only reading history and
942 # largefile caches, neither of which are modified.
943 # largefile caches, neither of which are modified.
943 if node is not None:
944 if node is not None:
944 lfcommands.cachelfiles(repo.ui, repo, node)
945 lfcommands.cachelfiles(repo.ui, repo, node)
945
946
946 if kind not in archival.archivers:
947 if kind not in archival.archivers:
947 raise error.Abort(_("unknown archive type '%s'") % kind)
948 raise error.Abort(_("unknown archive type '%s'") % kind)
948
949
949 ctx = repo[node]
950 ctx = repo[node]
950
951
951 if kind == 'files':
952 if kind == 'files':
952 if prefix:
953 if prefix:
953 raise error.Abort(
954 raise error.Abort(
954 _('cannot give prefix when archiving to files'))
955 _('cannot give prefix when archiving to files'))
955 else:
956 else:
956 prefix = archival.tidyprefix(dest, kind, prefix)
957 prefix = archival.tidyprefix(dest, kind, prefix)
957
958
958 def write(name, mode, islink, getdata):
959 def write(name, mode, islink, getdata):
959 if matchfn and not matchfn(name):
960 if matchfn and not matchfn(name):
960 return
961 return
961 data = getdata()
962 data = getdata()
962 if decode:
963 if decode:
963 data = repo.wwritedata(name, data)
964 data = repo.wwritedata(name, data)
964 archiver.addfile(prefix + name, mode, islink, data)
965 archiver.addfile(prefix + name, mode, islink, data)
965
966
966 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
967 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
967
968
968 if repo.ui.configbool("ui", "archivemeta", True):
969 if repo.ui.configbool("ui", "archivemeta", True):
969 write('.hg_archival.txt', 0o644, False,
970 write('.hg_archival.txt', 0o644, False,
970 lambda: archival.buildmetadata(ctx))
971 lambda: archival.buildmetadata(ctx))
971
972
972 for f in ctx:
973 for f in ctx:
973 ff = ctx.flags(f)
974 ff = ctx.flags(f)
974 getdata = ctx[f].data
975 getdata = ctx[f].data
975 lfile = lfutil.splitstandin(f)
976 lfile = lfutil.splitstandin(f)
976 if lfile is not None:
977 if lfile is not None:
977 if node is not None:
978 if node is not None:
978 path = lfutil.findfile(repo, getdata().strip())
979 path = lfutil.findfile(repo, getdata().strip())
979
980
980 if path is None:
981 if path is None:
981 raise error.Abort(
982 raise error.Abort(
982 _('largefile %s not found in repo store or system cache')
983 _('largefile %s not found in repo store or system cache')
983 % lfile)
984 % lfile)
984 else:
985 else:
985 path = lfile
986 path = lfile
986
987
987 f = lfile
988 f = lfile
988
989
989 getdata = lambda: util.readfile(path)
990 getdata = lambda: util.readfile(path)
990 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
991 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
991
992
992 if subrepos:
993 if subrepos:
993 for subpath in sorted(ctx.substate):
994 for subpath in sorted(ctx.substate):
994 sub = ctx.workingsub(subpath)
995 sub = ctx.workingsub(subpath)
995 submatch = matchmod.subdirmatcher(subpath, matchfn)
996 submatch = matchmod.subdirmatcher(subpath, matchfn)
996 sub._repo.lfstatus = True
997 sub._repo.lfstatus = True
997 sub.archive(archiver, prefix, submatch)
998 sub.archive(archiver, prefix, submatch)
998
999
999 archiver.done()
1000 archiver.done()
1000
1001
1001 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1002 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1002 if not repo._repo.lfstatus:
1003 if not repo._repo.lfstatus:
1003 return orig(repo, archiver, prefix, match, decode)
1004 return orig(repo, archiver, prefix, match, decode)
1004
1005
1005 repo._get(repo._state + ('hg',))
1006 repo._get(repo._state + ('hg',))
1006 rev = repo._state[1]
1007 rev = repo._state[1]
1007 ctx = repo._repo[rev]
1008 ctx = repo._repo[rev]
1008
1009
1009 if ctx.node() is not None:
1010 if ctx.node() is not None:
1010 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1011 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1011
1012
1012 def write(name, mode, islink, getdata):
1013 def write(name, mode, islink, getdata):
1013 # At this point, the standin has been replaced with the largefile name,
1014 # At this point, the standin has been replaced with the largefile name,
1014 # so the normal matcher works here without the lfutil variants.
1015 # so the normal matcher works here without the lfutil variants.
1015 if match and not match(f):
1016 if match and not match(f):
1016 return
1017 return
1017 data = getdata()
1018 data = getdata()
1018 if decode:
1019 if decode:
1019 data = repo._repo.wwritedata(name, data)
1020 data = repo._repo.wwritedata(name, data)
1020
1021
1021 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1022 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1022
1023
1023 for f in ctx:
1024 for f in ctx:
1024 ff = ctx.flags(f)
1025 ff = ctx.flags(f)
1025 getdata = ctx[f].data
1026 getdata = ctx[f].data
1026 lfile = lfutil.splitstandin(f)
1027 lfile = lfutil.splitstandin(f)
1027 if lfile is not None:
1028 if lfile is not None:
1028 if ctx.node() is not None:
1029 if ctx.node() is not None:
1029 path = lfutil.findfile(repo._repo, getdata().strip())
1030 path = lfutil.findfile(repo._repo, getdata().strip())
1030
1031
1031 if path is None:
1032 if path is None:
1032 raise error.Abort(
1033 raise error.Abort(
1033 _('largefile %s not found in repo store or system cache')
1034 _('largefile %s not found in repo store or system cache')
1034 % lfile)
1035 % lfile)
1035 else:
1036 else:
1036 path = lfile
1037 path = lfile
1037
1038
1038 f = lfile
1039 f = lfile
1039
1040
1040 getdata = lambda: util.readfile(os.path.join(prefix, path))
1041 getdata = lambda: util.readfile(os.path.join(prefix, path))
1041
1042
1042 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1043 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1043
1044
1044 for subpath in sorted(ctx.substate):
1045 for subpath in sorted(ctx.substate):
1045 sub = ctx.workingsub(subpath)
1046 sub = ctx.workingsub(subpath)
1046 submatch = matchmod.subdirmatcher(subpath, match)
1047 submatch = matchmod.subdirmatcher(subpath, match)
1047 sub._repo.lfstatus = True
1048 sub._repo.lfstatus = True
1048 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1049 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1049
1050
1050 # If a largefile is modified, the change is not reflected in its
1051 # If a largefile is modified, the change is not reflected in its
1051 # standin until a commit. cmdutil.bailifchanged() raises an exception
1052 # standin until a commit. cmdutil.bailifchanged() raises an exception
1052 # if the repo has uncommitted changes. Wrap it to also check if
1053 # if the repo has uncommitted changes. Wrap it to also check if
1053 # largefiles were changed. This is used by bisect, backout and fetch.
1054 # largefiles were changed. This is used by bisect, backout and fetch.
1054 def overridebailifchanged(orig, repo, *args, **kwargs):
1055 def overridebailifchanged(orig, repo, *args, **kwargs):
1055 orig(repo, *args, **kwargs)
1056 orig(repo, *args, **kwargs)
1056 repo.lfstatus = True
1057 repo.lfstatus = True
1057 s = repo.status()
1058 s = repo.status()
1058 repo.lfstatus = False
1059 repo.lfstatus = False
1059 if s.modified or s.added or s.removed or s.deleted:
1060 if s.modified or s.added or s.removed or s.deleted:
1060 raise error.Abort(_('uncommitted changes'))
1061 raise error.Abort(_('uncommitted changes'))
1061
1062
1062 def postcommitstatus(orig, repo, *args, **kwargs):
1063 def postcommitstatus(orig, repo, *args, **kwargs):
1063 repo.lfstatus = True
1064 repo.lfstatus = True
1064 try:
1065 try:
1065 return orig(repo, *args, **kwargs)
1066 return orig(repo, *args, **kwargs)
1066 finally:
1067 finally:
1067 repo.lfstatus = False
1068 repo.lfstatus = False
1068
1069
1069 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1070 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1070 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1071 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1071 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1072 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1072 m = composelargefilematcher(match, repo[None].manifest())
1073 m = composelargefilematcher(match, repo[None].manifest())
1073
1074
1074 try:
1075 try:
1075 repo.lfstatus = True
1076 repo.lfstatus = True
1076 s = repo.status(match=m, clean=True)
1077 s = repo.status(match=m, clean=True)
1077 finally:
1078 finally:
1078 repo.lfstatus = False
1079 repo.lfstatus = False
1079 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1080 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1080 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1081 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1081
1082
1082 for f in forget:
1083 for f in forget:
1083 if lfutil.standin(f) not in repo.dirstate and not \
1084 fstandin = lfutil.standin(f)
1084 repo.wvfs.isdir(lfutil.standin(f)):
1085 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1085 ui.warn(_('not removing %s: file is already untracked\n')
1086 ui.warn(_('not removing %s: file is already untracked\n')
1086 % m.rel(f))
1087 % m.rel(f))
1087 bad.append(f)
1088 bad.append(f)
1088
1089
1089 for f in forget:
1090 for f in forget:
1090 if ui.verbose or not m.exact(f):
1091 if ui.verbose or not m.exact(f):
1091 ui.status(_('removing %s\n') % m.rel(f))
1092 ui.status(_('removing %s\n') % m.rel(f))
1092
1093
1093 # Need to lock because standin files are deleted then removed from the
1094 # Need to lock because standin files are deleted then removed from the
1094 # repository and we could race in-between.
1095 # repository and we could race in-between.
1095 with repo.wlock():
1096 with repo.wlock():
1096 lfdirstate = lfutil.openlfdirstate(ui, repo)
1097 lfdirstate = lfutil.openlfdirstate(ui, repo)
1097 for f in forget:
1098 for f in forget:
1098 if lfdirstate[f] == 'a':
1099 if lfdirstate[f] == 'a':
1099 lfdirstate.drop(f)
1100 lfdirstate.drop(f)
1100 else:
1101 else:
1101 lfdirstate.remove(f)
1102 lfdirstate.remove(f)
1102 lfdirstate.write()
1103 lfdirstate.write()
1103 standins = [lfutil.standin(f) for f in forget]
1104 standins = [lfutil.standin(f) for f in forget]
1104 for f in standins:
1105 for f in standins:
1105 repo.wvfs.unlinkpath(f, ignoremissing=True)
1106 repo.wvfs.unlinkpath(f, ignoremissing=True)
1106 rejected = repo[None].forget(standins)
1107 rejected = repo[None].forget(standins)
1107
1108
1108 bad.extend(f for f in rejected if f in m.files())
1109 bad.extend(f for f in rejected if f in m.files())
1109 forgot.extend(f for f in forget if f not in rejected)
1110 forgot.extend(f for f in forget if f not in rejected)
1110 return bad, forgot
1111 return bad, forgot
1111
1112
1112 def _getoutgoings(repo, other, missing, addfunc):
1113 def _getoutgoings(repo, other, missing, addfunc):
1113 """get pairs of filename and largefile hash in outgoing revisions
1114 """get pairs of filename and largefile hash in outgoing revisions
1114 in 'missing'.
1115 in 'missing'.
1115
1116
1116 largefiles already existing on 'other' repository are ignored.
1117 largefiles already existing on 'other' repository are ignored.
1117
1118
1118 'addfunc' is invoked with each unique pairs of filename and
1119 'addfunc' is invoked with each unique pairs of filename and
1119 largefile hash value.
1120 largefile hash value.
1120 """
1121 """
1121 knowns = set()
1122 knowns = set()
1122 lfhashes = set()
1123 lfhashes = set()
1123 def dedup(fn, lfhash):
1124 def dedup(fn, lfhash):
1124 k = (fn, lfhash)
1125 k = (fn, lfhash)
1125 if k not in knowns:
1126 if k not in knowns:
1126 knowns.add(k)
1127 knowns.add(k)
1127 lfhashes.add(lfhash)
1128 lfhashes.add(lfhash)
1128 lfutil.getlfilestoupload(repo, missing, dedup)
1129 lfutil.getlfilestoupload(repo, missing, dedup)
1129 if lfhashes:
1130 if lfhashes:
1130 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1131 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1131 for fn, lfhash in knowns:
1132 for fn, lfhash in knowns:
1132 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1133 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1133 addfunc(fn, lfhash)
1134 addfunc(fn, lfhash)
1134
1135
1135 def outgoinghook(ui, repo, other, opts, missing):
1136 def outgoinghook(ui, repo, other, opts, missing):
1136 if opts.pop('large', None):
1137 if opts.pop('large', None):
1137 lfhashes = set()
1138 lfhashes = set()
1138 if ui.debugflag:
1139 if ui.debugflag:
1139 toupload = {}
1140 toupload = {}
1140 def addfunc(fn, lfhash):
1141 def addfunc(fn, lfhash):
1141 if fn not in toupload:
1142 if fn not in toupload:
1142 toupload[fn] = []
1143 toupload[fn] = []
1143 toupload[fn].append(lfhash)
1144 toupload[fn].append(lfhash)
1144 lfhashes.add(lfhash)
1145 lfhashes.add(lfhash)
1145 def showhashes(fn):
1146 def showhashes(fn):
1146 for lfhash in sorted(toupload[fn]):
1147 for lfhash in sorted(toupload[fn]):
1147 ui.debug(' %s\n' % (lfhash))
1148 ui.debug(' %s\n' % (lfhash))
1148 else:
1149 else:
1149 toupload = set()
1150 toupload = set()
1150 def addfunc(fn, lfhash):
1151 def addfunc(fn, lfhash):
1151 toupload.add(fn)
1152 toupload.add(fn)
1152 lfhashes.add(lfhash)
1153 lfhashes.add(lfhash)
1153 def showhashes(fn):
1154 def showhashes(fn):
1154 pass
1155 pass
1155 _getoutgoings(repo, other, missing, addfunc)
1156 _getoutgoings(repo, other, missing, addfunc)
1156
1157
1157 if not toupload:
1158 if not toupload:
1158 ui.status(_('largefiles: no files to upload\n'))
1159 ui.status(_('largefiles: no files to upload\n'))
1159 else:
1160 else:
1160 ui.status(_('largefiles to upload (%d entities):\n')
1161 ui.status(_('largefiles to upload (%d entities):\n')
1161 % (len(lfhashes)))
1162 % (len(lfhashes)))
1162 for file in sorted(toupload):
1163 for file in sorted(toupload):
1163 ui.status(lfutil.splitstandin(file) + '\n')
1164 ui.status(lfutil.splitstandin(file) + '\n')
1164 showhashes(file)
1165 showhashes(file)
1165 ui.status('\n')
1166 ui.status('\n')
1166
1167
1167 def summaryremotehook(ui, repo, opts, changes):
1168 def summaryremotehook(ui, repo, opts, changes):
1168 largeopt = opts.get('large', False)
1169 largeopt = opts.get('large', False)
1169 if changes is None:
1170 if changes is None:
1170 if largeopt:
1171 if largeopt:
1171 return (False, True) # only outgoing check is needed
1172 return (False, True) # only outgoing check is needed
1172 else:
1173 else:
1173 return (False, False)
1174 return (False, False)
1174 elif largeopt:
1175 elif largeopt:
1175 url, branch, peer, outgoing = changes[1]
1176 url, branch, peer, outgoing = changes[1]
1176 if peer is None:
1177 if peer is None:
1177 # i18n: column positioning for "hg summary"
1178 # i18n: column positioning for "hg summary"
1178 ui.status(_('largefiles: (no remote repo)\n'))
1179 ui.status(_('largefiles: (no remote repo)\n'))
1179 return
1180 return
1180
1181
1181 toupload = set()
1182 toupload = set()
1182 lfhashes = set()
1183 lfhashes = set()
1183 def addfunc(fn, lfhash):
1184 def addfunc(fn, lfhash):
1184 toupload.add(fn)
1185 toupload.add(fn)
1185 lfhashes.add(lfhash)
1186 lfhashes.add(lfhash)
1186 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1187 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1187
1188
1188 if not toupload:
1189 if not toupload:
1189 # i18n: column positioning for "hg summary"
1190 # i18n: column positioning for "hg summary"
1190 ui.status(_('largefiles: (no files to upload)\n'))
1191 ui.status(_('largefiles: (no files to upload)\n'))
1191 else:
1192 else:
1192 # i18n: column positioning for "hg summary"
1193 # i18n: column positioning for "hg summary"
1193 ui.status(_('largefiles: %d entities for %d files to upload\n')
1194 ui.status(_('largefiles: %d entities for %d files to upload\n')
1194 % (len(lfhashes), len(toupload)))
1195 % (len(lfhashes), len(toupload)))
1195
1196
1196 def overridesummary(orig, ui, repo, *pats, **opts):
1197 def overridesummary(orig, ui, repo, *pats, **opts):
1197 try:
1198 try:
1198 repo.lfstatus = True
1199 repo.lfstatus = True
1199 orig(ui, repo, *pats, **opts)
1200 orig(ui, repo, *pats, **opts)
1200 finally:
1201 finally:
1201 repo.lfstatus = False
1202 repo.lfstatus = False
1202
1203
1203 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1204 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1204 similarity=None):
1205 similarity=None):
1205 if opts is None:
1206 if opts is None:
1206 opts = {}
1207 opts = {}
1207 if not lfutil.islfilesrepo(repo):
1208 if not lfutil.islfilesrepo(repo):
1208 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1209 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1209 # Get the list of missing largefiles so we can remove them
1210 # Get the list of missing largefiles so we can remove them
1210 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1211 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1211 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1212 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1212 False, False, False)
1213 False, False, False)
1213
1214
1214 # Call into the normal remove code, but the removing of the standin, we want
1215 # Call into the normal remove code, but the removing of the standin, we want
1215 # to have handled by original addremove. Monkey patching here makes sure
1216 # to have handled by original addremove. Monkey patching here makes sure
1216 # we don't remove the standin in the largefiles code, preventing a very
1217 # we don't remove the standin in the largefiles code, preventing a very
1217 # confused state later.
1218 # confused state later.
1218 if s.deleted:
1219 if s.deleted:
1219 m = copy.copy(matcher)
1220 m = copy.copy(matcher)
1220
1221
1221 # The m._files and m._map attributes are not changed to the deleted list
1222 # The m._files and m._map attributes are not changed to the deleted list
1222 # because that affects the m.exact() test, which in turn governs whether
1223 # because that affects the m.exact() test, which in turn governs whether
1223 # or not the file name is printed, and how. Simply limit the original
1224 # or not the file name is printed, and how. Simply limit the original
1224 # matches to those in the deleted status list.
1225 # matches to those in the deleted status list.
1225 matchfn = m.matchfn
1226 matchfn = m.matchfn
1226 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1227 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1227
1228
1228 removelargefiles(repo.ui, repo, True, m, **opts)
1229 removelargefiles(repo.ui, repo, True, m, **opts)
1229 # Call into the normal add code, and any files that *should* be added as
1230 # Call into the normal add code, and any files that *should* be added as
1230 # largefiles will be
1231 # largefiles will be
1231 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1232 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1232 # Now that we've handled largefiles, hand off to the original addremove
1233 # Now that we've handled largefiles, hand off to the original addremove
1233 # function to take care of the rest. Make sure it doesn't do anything with
1234 # function to take care of the rest. Make sure it doesn't do anything with
1234 # largefiles by passing a matcher that will ignore them.
1235 # largefiles by passing a matcher that will ignore them.
1235 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1236 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1236 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1237 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1237
1238
1238 # Calling purge with --all will cause the largefiles to be deleted.
1239 # Calling purge with --all will cause the largefiles to be deleted.
1239 # Override repo.status to prevent this from happening.
1240 # Override repo.status to prevent this from happening.
1240 def overridepurge(orig, ui, repo, *dirs, **opts):
1241 def overridepurge(orig, ui, repo, *dirs, **opts):
1241 # XXX Monkey patching a repoview will not work. The assigned attribute will
1242 # XXX Monkey patching a repoview will not work. The assigned attribute will
1242 # be set on the unfiltered repo, but we will only lookup attributes in the
1243 # be set on the unfiltered repo, but we will only lookup attributes in the
1243 # unfiltered repo if the lookup in the repoview object itself fails. As the
1244 # unfiltered repo if the lookup in the repoview object itself fails. As the
1244 # monkey patched method exists on the repoview class the lookup will not
1245 # monkey patched method exists on the repoview class the lookup will not
1245 # fail. As a result, the original version will shadow the monkey patched
1246 # fail. As a result, the original version will shadow the monkey patched
1246 # one, defeating the monkey patch.
1247 # one, defeating the monkey patch.
1247 #
1248 #
1248 # As a work around we use an unfiltered repo here. We should do something
1249 # As a work around we use an unfiltered repo here. We should do something
1249 # cleaner instead.
1250 # cleaner instead.
1250 repo = repo.unfiltered()
1251 repo = repo.unfiltered()
1251 oldstatus = repo.status
1252 oldstatus = repo.status
1252 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1253 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1253 clean=False, unknown=False, listsubrepos=False):
1254 clean=False, unknown=False, listsubrepos=False):
1254 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1255 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1255 listsubrepos)
1256 listsubrepos)
1256 lfdirstate = lfutil.openlfdirstate(ui, repo)
1257 lfdirstate = lfutil.openlfdirstate(ui, repo)
1257 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1258 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1258 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1259 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1259 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1260 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1260 unknown, ignored, r.clean)
1261 unknown, ignored, r.clean)
1261 repo.status = overridestatus
1262 repo.status = overridestatus
1262 orig(ui, repo, *dirs, **opts)
1263 orig(ui, repo, *dirs, **opts)
1263 repo.status = oldstatus
1264 repo.status = oldstatus
1264 def overriderollback(orig, ui, repo, **opts):
1265 def overriderollback(orig, ui, repo, **opts):
1265 with repo.wlock():
1266 with repo.wlock():
1266 before = repo.dirstate.parents()
1267 before = repo.dirstate.parents()
1267 orphans = set(f for f in repo.dirstate
1268 orphans = set(f for f in repo.dirstate
1268 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1269 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1269 result = orig(ui, repo, **opts)
1270 result = orig(ui, repo, **opts)
1270 after = repo.dirstate.parents()
1271 after = repo.dirstate.parents()
1271 if before == after:
1272 if before == after:
1272 return result # no need to restore standins
1273 return result # no need to restore standins
1273
1274
1274 pctx = repo['.']
1275 pctx = repo['.']
1275 for f in repo.dirstate:
1276 for f in repo.dirstate:
1276 if lfutil.isstandin(f):
1277 if lfutil.isstandin(f):
1277 orphans.discard(f)
1278 orphans.discard(f)
1278 if repo.dirstate[f] == 'r':
1279 if repo.dirstate[f] == 'r':
1279 repo.wvfs.unlinkpath(f, ignoremissing=True)
1280 repo.wvfs.unlinkpath(f, ignoremissing=True)
1280 elif f in pctx:
1281 elif f in pctx:
1281 fctx = pctx[f]
1282 fctx = pctx[f]
1282 repo.wwrite(f, fctx.data(), fctx.flags())
1283 repo.wwrite(f, fctx.data(), fctx.flags())
1283 else:
1284 else:
1284 # content of standin is not so important in 'a',
1285 # content of standin is not so important in 'a',
1285 # 'm' or 'n' (coming from the 2nd parent) cases
1286 # 'm' or 'n' (coming from the 2nd parent) cases
1286 lfutil.writestandin(repo, f, '', False)
1287 lfutil.writestandin(repo, f, '', False)
1287 for standin in orphans:
1288 for standin in orphans:
1288 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1289 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1289
1290
1290 lfdirstate = lfutil.openlfdirstate(ui, repo)
1291 lfdirstate = lfutil.openlfdirstate(ui, repo)
1291 orphans = set(lfdirstate)
1292 orphans = set(lfdirstate)
1292 lfiles = lfutil.listlfiles(repo)
1293 lfiles = lfutil.listlfiles(repo)
1293 for file in lfiles:
1294 for file in lfiles:
1294 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1295 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1295 orphans.discard(file)
1296 orphans.discard(file)
1296 for lfile in orphans:
1297 for lfile in orphans:
1297 lfdirstate.drop(lfile)
1298 lfdirstate.drop(lfile)
1298 lfdirstate.write()
1299 lfdirstate.write()
1299 return result
1300 return result
1300
1301
1301 def overridetransplant(orig, ui, repo, *revs, **opts):
1302 def overridetransplant(orig, ui, repo, *revs, **opts):
1302 resuming = opts.get('continue')
1303 resuming = opts.get('continue')
1303 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1304 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1304 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1305 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1305 try:
1306 try:
1306 result = orig(ui, repo, *revs, **opts)
1307 result = orig(ui, repo, *revs, **opts)
1307 finally:
1308 finally:
1308 repo._lfstatuswriters.pop()
1309 repo._lfstatuswriters.pop()
1309 repo._lfcommithooks.pop()
1310 repo._lfcommithooks.pop()
1310 return result
1311 return result
1311
1312
1312 def overridecat(orig, ui, repo, file1, *pats, **opts):
1313 def overridecat(orig, ui, repo, file1, *pats, **opts):
1313 ctx = scmutil.revsingle(repo, opts.get('rev'))
1314 ctx = scmutil.revsingle(repo, opts.get('rev'))
1314 err = 1
1315 err = 1
1315 notbad = set()
1316 notbad = set()
1316 m = scmutil.match(ctx, (file1,) + pats, opts)
1317 m = scmutil.match(ctx, (file1,) + pats, opts)
1317 origmatchfn = m.matchfn
1318 origmatchfn = m.matchfn
1318 def lfmatchfn(f):
1319 def lfmatchfn(f):
1319 if origmatchfn(f):
1320 if origmatchfn(f):
1320 return True
1321 return True
1321 lf = lfutil.splitstandin(f)
1322 lf = lfutil.splitstandin(f)
1322 if lf is None:
1323 if lf is None:
1323 return False
1324 return False
1324 notbad.add(lf)
1325 notbad.add(lf)
1325 return origmatchfn(lf)
1326 return origmatchfn(lf)
1326 m.matchfn = lfmatchfn
1327 m.matchfn = lfmatchfn
1327 origbadfn = m.bad
1328 origbadfn = m.bad
1328 def lfbadfn(f, msg):
1329 def lfbadfn(f, msg):
1329 if not f in notbad:
1330 if not f in notbad:
1330 origbadfn(f, msg)
1331 origbadfn(f, msg)
1331 m.bad = lfbadfn
1332 m.bad = lfbadfn
1332
1333
1333 origvisitdirfn = m.visitdir
1334 origvisitdirfn = m.visitdir
1334 def lfvisitdirfn(dir):
1335 def lfvisitdirfn(dir):
1335 if dir == lfutil.shortname:
1336 if dir == lfutil.shortname:
1336 return True
1337 return True
1337 ret = origvisitdirfn(dir)
1338 ret = origvisitdirfn(dir)
1338 if ret:
1339 if ret:
1339 return ret
1340 return ret
1340 lf = lfutil.splitstandin(dir)
1341 lf = lfutil.splitstandin(dir)
1341 if lf is None:
1342 if lf is None:
1342 return False
1343 return False
1343 return origvisitdirfn(lf)
1344 return origvisitdirfn(lf)
1344 m.visitdir = lfvisitdirfn
1345 m.visitdir = lfvisitdirfn
1345
1346
1346 for f in ctx.walk(m):
1347 for f in ctx.walk(m):
1347 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1348 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1348 pathname=f) as fp:
1349 pathname=f) as fp:
1349 lf = lfutil.splitstandin(f)
1350 lf = lfutil.splitstandin(f)
1350 if lf is None or origmatchfn(f):
1351 if lf is None or origmatchfn(f):
1351 # duplicating unreachable code from commands.cat
1352 # duplicating unreachable code from commands.cat
1352 data = ctx[f].data()
1353 data = ctx[f].data()
1353 if opts.get('decode'):
1354 if opts.get('decode'):
1354 data = repo.wwritedata(f, data)
1355 data = repo.wwritedata(f, data)
1355 fp.write(data)
1356 fp.write(data)
1356 else:
1357 else:
1357 hash = lfutil.readstandin(repo, lf, ctx)
1358 hash = lfutil.readstandin(repo, lf, ctx)
1358 if not lfutil.inusercache(repo.ui, hash):
1359 if not lfutil.inusercache(repo.ui, hash):
1359 store = storefactory.openstore(repo)
1360 store = storefactory.openstore(repo)
1360 success, missing = store.get([(lf, hash)])
1361 success, missing = store.get([(lf, hash)])
1361 if len(success) != 1:
1362 if len(success) != 1:
1362 raise error.Abort(
1363 raise error.Abort(
1363 _('largefile %s is not in cache and could not be '
1364 _('largefile %s is not in cache and could not be '
1364 'downloaded') % lf)
1365 'downloaded') % lf)
1365 path = lfutil.usercachepath(repo.ui, hash)
1366 path = lfutil.usercachepath(repo.ui, hash)
1366 with open(path, "rb") as fpin:
1367 with open(path, "rb") as fpin:
1367 for chunk in util.filechunkiter(fpin):
1368 for chunk in util.filechunkiter(fpin):
1368 fp.write(chunk)
1369 fp.write(chunk)
1369 err = 0
1370 err = 0
1370 return err
1371 return err
1371
1372
1372 def mergeupdate(orig, repo, node, branchmerge, force,
1373 def mergeupdate(orig, repo, node, branchmerge, force,
1373 *args, **kwargs):
1374 *args, **kwargs):
1374 matcher = kwargs.get('matcher', None)
1375 matcher = kwargs.get('matcher', None)
1375 # note if this is a partial update
1376 # note if this is a partial update
1376 partial = matcher and not matcher.always()
1377 partial = matcher and not matcher.always()
1377 with repo.wlock():
1378 with repo.wlock():
1378 # branch | | |
1379 # branch | | |
1379 # merge | force | partial | action
1380 # merge | force | partial | action
1380 # -------+-------+---------+--------------
1381 # -------+-------+---------+--------------
1381 # x | x | x | linear-merge
1382 # x | x | x | linear-merge
1382 # o | x | x | branch-merge
1383 # o | x | x | branch-merge
1383 # x | o | x | overwrite (as clean update)
1384 # x | o | x | overwrite (as clean update)
1384 # o | o | x | force-branch-merge (*1)
1385 # o | o | x | force-branch-merge (*1)
1385 # x | x | o | (*)
1386 # x | x | o | (*)
1386 # o | x | o | (*)
1387 # o | x | o | (*)
1387 # x | o | o | overwrite (as revert)
1388 # x | o | o | overwrite (as revert)
1388 # o | o | o | (*)
1389 # o | o | o | (*)
1389 #
1390 #
1390 # (*) don't care
1391 # (*) don't care
1391 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1392 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1392
1393
1393 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1394 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1394 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1395 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1395 repo.getcwd()),
1396 repo.getcwd()),
1396 [], False, True, False)
1397 [], False, True, False)
1397 oldclean = set(s.clean)
1398 oldclean = set(s.clean)
1398 pctx = repo['.']
1399 pctx = repo['.']
1399 for lfile in unsure + s.modified:
1400 for lfile in unsure + s.modified:
1400 lfileabs = repo.wvfs.join(lfile)
1401 lfileabs = repo.wvfs.join(lfile)
1401 if not repo.wvfs.exists(lfileabs):
1402 if not repo.wvfs.exists(lfileabs):
1402 continue
1403 continue
1403 lfhash = lfutil.hashfile(lfileabs)
1404 lfhash = lfutil.hashfile(lfileabs)
1404 standin = lfutil.standin(lfile)
1405 standin = lfutil.standin(lfile)
1405 lfutil.writestandin(repo, standin, lfhash,
1406 lfutil.writestandin(repo, standin, lfhash,
1406 lfutil.getexecutable(lfileabs))
1407 lfutil.getexecutable(lfileabs))
1407 if (standin in pctx and
1408 if (standin in pctx and
1408 lfhash == lfutil.readstandin(repo, lfile, pctx)):
1409 lfhash == lfutil.readstandin(repo, lfile, pctx)):
1409 oldclean.add(lfile)
1410 oldclean.add(lfile)
1410 for lfile in s.added:
1411 for lfile in s.added:
1411 lfutil.updatestandin(repo, lfutil.standin(lfile))
1412 lfutil.updatestandin(repo, lfutil.standin(lfile))
1412 # mark all clean largefiles as dirty, just in case the update gets
1413 # mark all clean largefiles as dirty, just in case the update gets
1413 # interrupted before largefiles and lfdirstate are synchronized
1414 # interrupted before largefiles and lfdirstate are synchronized
1414 for lfile in oldclean:
1415 for lfile in oldclean:
1415 lfdirstate.normallookup(lfile)
1416 lfdirstate.normallookup(lfile)
1416 lfdirstate.write()
1417 lfdirstate.write()
1417
1418
1418 oldstandins = lfutil.getstandinsstate(repo)
1419 oldstandins = lfutil.getstandinsstate(repo)
1419
1420
1420 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1421 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1421
1422
1422 newstandins = lfutil.getstandinsstate(repo)
1423 newstandins = lfutil.getstandinsstate(repo)
1423 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1424 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1424
1425
1425 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1426 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1426 # all the ones that didn't change as clean
1427 # all the ones that didn't change as clean
1427 for lfile in oldclean.difference(filelist):
1428 for lfile in oldclean.difference(filelist):
1428 lfdirstate.normal(lfile)
1429 lfdirstate.normal(lfile)
1429 lfdirstate.write()
1430 lfdirstate.write()
1430
1431
1431 if branchmerge or force or partial:
1432 if branchmerge or force or partial:
1432 filelist.extend(s.deleted + s.removed)
1433 filelist.extend(s.deleted + s.removed)
1433
1434
1434 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1435 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1435 normallookup=partial)
1436 normallookup=partial)
1436
1437
1437 return result
1438 return result
1438
1439
1439 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1440 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1440 result = orig(repo, files, *args, **kwargs)
1441 result = orig(repo, files, *args, **kwargs)
1441
1442
1442 filelist = []
1443 filelist = []
1443 for f in files:
1444 for f in files:
1444 lf = lfutil.splitstandin(f)
1445 lf = lfutil.splitstandin(f)
1445 if lf is not None:
1446 if lf is not None:
1446 filelist.append(lf)
1447 filelist.append(lf)
1447 if filelist:
1448 if filelist:
1448 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1449 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1449 printmessage=False, normallookup=True)
1450 printmessage=False, normallookup=True)
1450
1451
1451 return result
1452 return result
General Comments 0
You need to be logged in to leave comments. Login now