##// END OF EJS Templates
largefiles: make storefactory._openstore public...
liscju -
r29355:85868ecf default
parent child Browse files
Show More
@@ -1,569 +1,569 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 commands,
21 commands,
22 context,
22 context,
23 error,
23 error,
24 hg,
24 hg,
25 lock,
25 lock,
26 match as matchmod,
26 match as matchmod,
27 node,
27 node,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 from ..convert import (
32 from ..convert import (
33 convcmd,
33 convcmd,
34 filemap,
34 filemap,
35 )
35 )
36
36
37 from . import (
37 from . import (
38 lfutil,
38 lfutil,
39 storefactory
39 storefactory
40 )
40 )
41
41
42 release = lock.release
42 release = lock.release
43
43
44 # -- Commands ----------------------------------------------------------
44 # -- Commands ----------------------------------------------------------
45
45
46 cmdtable = {}
46 cmdtable = {}
47 command = cmdutil.command(cmdtable)
47 command = cmdutil.command(cmdtable)
48
48
49 @command('lfconvert',
49 @command('lfconvert',
50 [('s', 'size', '',
50 [('s', 'size', '',
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 ('', 'to-normal', False,
52 ('', 'to-normal', False,
53 _('convert from a largefiles repo to a normal repo')),
53 _('convert from a largefiles repo to a normal repo')),
54 ],
54 ],
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 norepo=True,
56 norepo=True,
57 inferrepo=True)
57 inferrepo=True)
58 def lfconvert(ui, src, dest, *pats, **opts):
58 def lfconvert(ui, src, dest, *pats, **opts):
59 '''convert a normal repository to a largefiles repository
59 '''convert a normal repository to a largefiles repository
60
60
61 Convert repository SOURCE to a new repository DEST, identical to
61 Convert repository SOURCE to a new repository DEST, identical to
62 SOURCE except that certain files will be converted as largefiles:
62 SOURCE except that certain files will be converted as largefiles:
63 specifically, any file that matches any PATTERN *or* whose size is
63 specifically, any file that matches any PATTERN *or* whose size is
64 above the minimum size threshold is converted as a largefile. The
64 above the minimum size threshold is converted as a largefile. The
65 size used to determine whether or not to track a file as a
65 size used to determine whether or not to track a file as a
66 largefile is the size of the first version of the file. The
66 largefile is the size of the first version of the file. The
67 minimum size can be specified either with --size or in
67 minimum size can be specified either with --size or in
68 configuration as ``largefiles.size``.
68 configuration as ``largefiles.size``.
69
69
70 After running this command you will need to make sure that
70 After running this command you will need to make sure that
71 largefiles is enabled anywhere you intend to push the new
71 largefiles is enabled anywhere you intend to push the new
72 repository.
72 repository.
73
73
74 Use --to-normal to convert largefiles back to normal files; after
74 Use --to-normal to convert largefiles back to normal files; after
75 this, the DEST repository can be used without largefiles at all.'''
75 this, the DEST repository can be used without largefiles at all.'''
76
76
77 if opts['to_normal']:
77 if opts['to_normal']:
78 tolfile = False
78 tolfile = False
79 else:
79 else:
80 tolfile = True
80 tolfile = True
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82
82
83 if not hg.islocal(src):
83 if not hg.islocal(src):
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 if not hg.islocal(dest):
85 if not hg.islocal(dest):
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87
87
88 rsrc = hg.repository(ui, src)
88 rsrc = hg.repository(ui, src)
89 ui.status(_('initializing destination %s\n') % dest)
89 ui.status(_('initializing destination %s\n') % dest)
90 rdst = hg.repository(ui, dest, create=True)
90 rdst = hg.repository(ui, dest, create=True)
91
91
92 success = False
92 success = False
93 dstwlock = dstlock = None
93 dstwlock = dstlock = None
94 try:
94 try:
95 # Get a list of all changesets in the source. The easy way to do this
95 # Get a list of all changesets in the source. The easy way to do this
96 # is to simply walk the changelog, using changelog.nodesbetween().
96 # is to simply walk the changelog, using changelog.nodesbetween().
97 # Take a look at mercurial/revlog.py:639 for more details.
97 # Take a look at mercurial/revlog.py:639 for more details.
98 # Use a generator instead of a list to decrease memory usage
98 # Use a generator instead of a list to decrease memory usage
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 rsrc.heads())[0])
100 rsrc.heads())[0])
101 revmap = {node.nullid: node.nullid}
101 revmap = {node.nullid: node.nullid}
102 if tolfile:
102 if tolfile:
103 # Lock destination to prevent modification while it is converted to.
103 # Lock destination to prevent modification while it is converted to.
104 # Don't need to lock src because we are just reading from its
104 # Don't need to lock src because we are just reading from its
105 # history which can't change.
105 # history which can't change.
106 dstwlock = rdst.wlock()
106 dstwlock = rdst.wlock()
107 dstlock = rdst.lock()
107 dstlock = rdst.lock()
108
108
109 lfiles = set()
109 lfiles = set()
110 normalfiles = set()
110 normalfiles = set()
111 if not pats:
111 if not pats:
112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
113 if pats:
113 if pats:
114 matcher = matchmod.match(rsrc.root, '', list(pats))
114 matcher = matchmod.match(rsrc.root, '', list(pats))
115 else:
115 else:
116 matcher = None
116 matcher = None
117
117
118 lfiletohash = {}
118 lfiletohash = {}
119 for ctx in ctxs:
119 for ctx in ctxs:
120 ui.progress(_('converting revisions'), ctx.rev(),
120 ui.progress(_('converting revisions'), ctx.rev(),
121 unit=_('revisions'), total=rsrc['tip'].rev())
121 unit=_('revisions'), total=rsrc['tip'].rev())
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
123 lfiles, normalfiles, matcher, size, lfiletohash)
123 lfiles, normalfiles, matcher, size, lfiletohash)
124 ui.progress(_('converting revisions'), None)
124 ui.progress(_('converting revisions'), None)
125
125
126 if rdst.wvfs.exists(lfutil.shortname):
126 if rdst.wvfs.exists(lfutil.shortname):
127 rdst.wvfs.rmtree(lfutil.shortname)
127 rdst.wvfs.rmtree(lfutil.shortname)
128
128
129 for f in lfiletohash.keys():
129 for f in lfiletohash.keys():
130 if rdst.wvfs.isfile(f):
130 if rdst.wvfs.isfile(f):
131 rdst.wvfs.unlink(f)
131 rdst.wvfs.unlink(f)
132 try:
132 try:
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
134 except OSError:
134 except OSError:
135 pass
135 pass
136
136
137 # If there were any files converted to largefiles, add largefiles
137 # If there were any files converted to largefiles, add largefiles
138 # to the destination repository's requirements.
138 # to the destination repository's requirements.
139 if lfiles:
139 if lfiles:
140 rdst.requirements.add('largefiles')
140 rdst.requirements.add('largefiles')
141 rdst._writerequirements()
141 rdst._writerequirements()
142 else:
142 else:
143 class lfsource(filemap.filemap_source):
143 class lfsource(filemap.filemap_source):
144 def __init__(self, ui, source):
144 def __init__(self, ui, source):
145 super(lfsource, self).__init__(ui, source, None)
145 super(lfsource, self).__init__(ui, source, None)
146 self.filemapper.rename[lfutil.shortname] = '.'
146 self.filemapper.rename[lfutil.shortname] = '.'
147
147
148 def getfile(self, name, rev):
148 def getfile(self, name, rev):
149 realname, realrev = rev
149 realname, realrev = rev
150 f = super(lfsource, self).getfile(name, rev)
150 f = super(lfsource, self).getfile(name, rev)
151
151
152 if (not realname.startswith(lfutil.shortnameslash)
152 if (not realname.startswith(lfutil.shortnameslash)
153 or f[0] is None):
153 or f[0] is None):
154 return f
154 return f
155
155
156 # Substitute in the largefile data for the hash
156 # Substitute in the largefile data for the hash
157 hash = f[0].strip()
157 hash = f[0].strip()
158 path = lfutil.findfile(rsrc, hash)
158 path = lfutil.findfile(rsrc, hash)
159
159
160 if path is None:
160 if path is None:
161 raise error.Abort(_("missing largefile for '%s' in %s")
161 raise error.Abort(_("missing largefile for '%s' in %s")
162 % (realname, realrev))
162 % (realname, realrev))
163 return util.readfile(path), f[1]
163 return util.readfile(path), f[1]
164
164
165 class converter(convcmd.converter):
165 class converter(convcmd.converter):
166 def __init__(self, ui, source, dest, revmapfile, opts):
166 def __init__(self, ui, source, dest, revmapfile, opts):
167 src = lfsource(ui, source)
167 src = lfsource(ui, source)
168
168
169 super(converter, self).__init__(ui, src, dest, revmapfile,
169 super(converter, self).__init__(ui, src, dest, revmapfile,
170 opts)
170 opts)
171
171
172 found, missing = downloadlfiles(ui, rsrc)
172 found, missing = downloadlfiles(ui, rsrc)
173 if missing != 0:
173 if missing != 0:
174 raise error.Abort(_("all largefiles must be present locally"))
174 raise error.Abort(_("all largefiles must be present locally"))
175
175
176 orig = convcmd.converter
176 orig = convcmd.converter
177 convcmd.converter = converter
177 convcmd.converter = converter
178
178
179 try:
179 try:
180 convcmd.convert(ui, src, dest)
180 convcmd.convert(ui, src, dest)
181 finally:
181 finally:
182 convcmd.converter = orig
182 convcmd.converter = orig
183 success = True
183 success = True
184 finally:
184 finally:
185 if tolfile:
185 if tolfile:
186 rdst.dirstate.clear()
186 rdst.dirstate.clear()
187 release(dstlock, dstwlock)
187 release(dstlock, dstwlock)
188 if not success:
188 if not success:
189 # we failed, remove the new directory
189 # we failed, remove the new directory
190 shutil.rmtree(rdst.root)
190 shutil.rmtree(rdst.root)
191
191
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
193 matcher, size, lfiletohash):
193 matcher, size, lfiletohash):
194 # Convert src parents to dst parents
194 # Convert src parents to dst parents
195 parents = _convertparents(ctx, revmap)
195 parents = _convertparents(ctx, revmap)
196
196
197 # Generate list of changed files
197 # Generate list of changed files
198 files = _getchangedfiles(ctx, parents)
198 files = _getchangedfiles(ctx, parents)
199
199
200 dstfiles = []
200 dstfiles = []
201 for f in files:
201 for f in files:
202 if f not in lfiles and f not in normalfiles:
202 if f not in lfiles and f not in normalfiles:
203 islfile = _islfile(f, ctx, matcher, size)
203 islfile = _islfile(f, ctx, matcher, size)
204 # If this file was renamed or copied then copy
204 # If this file was renamed or copied then copy
205 # the largefile-ness of its predecessor
205 # the largefile-ness of its predecessor
206 if f in ctx.manifest():
206 if f in ctx.manifest():
207 fctx = ctx.filectx(f)
207 fctx = ctx.filectx(f)
208 renamed = fctx.renamed()
208 renamed = fctx.renamed()
209 renamedlfile = renamed and renamed[0] in lfiles
209 renamedlfile = renamed and renamed[0] in lfiles
210 islfile |= renamedlfile
210 islfile |= renamedlfile
211 if 'l' in fctx.flags():
211 if 'l' in fctx.flags():
212 if renamedlfile:
212 if renamedlfile:
213 raise error.Abort(
213 raise error.Abort(
214 _('renamed/copied largefile %s becomes symlink')
214 _('renamed/copied largefile %s becomes symlink')
215 % f)
215 % f)
216 islfile = False
216 islfile = False
217 if islfile:
217 if islfile:
218 lfiles.add(f)
218 lfiles.add(f)
219 else:
219 else:
220 normalfiles.add(f)
220 normalfiles.add(f)
221
221
222 if f in lfiles:
222 if f in lfiles:
223 dstfiles.append(lfutil.standin(f))
223 dstfiles.append(lfutil.standin(f))
224 # largefile in manifest if it has not been removed/renamed
224 # largefile in manifest if it has not been removed/renamed
225 if f in ctx.manifest():
225 if f in ctx.manifest():
226 fctx = ctx.filectx(f)
226 fctx = ctx.filectx(f)
227 if 'l' in fctx.flags():
227 if 'l' in fctx.flags():
228 renamed = fctx.renamed()
228 renamed = fctx.renamed()
229 if renamed and renamed[0] in lfiles:
229 if renamed and renamed[0] in lfiles:
230 raise error.Abort(_('largefile %s becomes symlink') % f)
230 raise error.Abort(_('largefile %s becomes symlink') % f)
231
231
232 # largefile was modified, update standins
232 # largefile was modified, update standins
233 m = hashlib.sha1('')
233 m = hashlib.sha1('')
234 m.update(ctx[f].data())
234 m.update(ctx[f].data())
235 hash = m.hexdigest()
235 hash = m.hexdigest()
236 if f not in lfiletohash or lfiletohash[f] != hash:
236 if f not in lfiletohash or lfiletohash[f] != hash:
237 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
237 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
238 executable = 'x' in ctx[f].flags()
238 executable = 'x' in ctx[f].flags()
239 lfutil.writestandin(rdst, lfutil.standin(f), hash,
239 lfutil.writestandin(rdst, lfutil.standin(f), hash,
240 executable)
240 executable)
241 lfiletohash[f] = hash
241 lfiletohash[f] = hash
242 else:
242 else:
243 # normal file
243 # normal file
244 dstfiles.append(f)
244 dstfiles.append(f)
245
245
246 def getfilectx(repo, memctx, f):
246 def getfilectx(repo, memctx, f):
247 if lfutil.isstandin(f):
247 if lfutil.isstandin(f):
248 # if the file isn't in the manifest then it was removed
248 # if the file isn't in the manifest then it was removed
249 # or renamed, raise IOError to indicate this
249 # or renamed, raise IOError to indicate this
250 srcfname = lfutil.splitstandin(f)
250 srcfname = lfutil.splitstandin(f)
251 try:
251 try:
252 fctx = ctx.filectx(srcfname)
252 fctx = ctx.filectx(srcfname)
253 except error.LookupError:
253 except error.LookupError:
254 return None
254 return None
255 renamed = fctx.renamed()
255 renamed = fctx.renamed()
256 if renamed:
256 if renamed:
257 # standin is always a largefile because largefile-ness
257 # standin is always a largefile because largefile-ness
258 # doesn't change after rename or copy
258 # doesn't change after rename or copy
259 renamed = lfutil.standin(renamed[0])
259 renamed = lfutil.standin(renamed[0])
260
260
261 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
261 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
262 'l' in fctx.flags(), 'x' in fctx.flags(),
262 'l' in fctx.flags(), 'x' in fctx.flags(),
263 renamed)
263 renamed)
264 else:
264 else:
265 return _getnormalcontext(repo, ctx, f, revmap)
265 return _getnormalcontext(repo, ctx, f, revmap)
266
266
267 # Commit
267 # Commit
268 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
268 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
269
269
270 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
270 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
271 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
271 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
272 getfilectx, ctx.user(), ctx.date(), ctx.extra())
272 getfilectx, ctx.user(), ctx.date(), ctx.extra())
273 ret = rdst.commitctx(mctx)
273 ret = rdst.commitctx(mctx)
274 lfutil.copyalltostore(rdst, ret)
274 lfutil.copyalltostore(rdst, ret)
275 rdst.setparents(ret)
275 rdst.setparents(ret)
276 revmap[ctx.node()] = rdst.changelog.tip()
276 revmap[ctx.node()] = rdst.changelog.tip()
277
277
278 # Generate list of changed files
278 # Generate list of changed files
279 def _getchangedfiles(ctx, parents):
279 def _getchangedfiles(ctx, parents):
280 files = set(ctx.files())
280 files = set(ctx.files())
281 if node.nullid not in parents:
281 if node.nullid not in parents:
282 mc = ctx.manifest()
282 mc = ctx.manifest()
283 mp1 = ctx.parents()[0].manifest()
283 mp1 = ctx.parents()[0].manifest()
284 mp2 = ctx.parents()[1].manifest()
284 mp2 = ctx.parents()[1].manifest()
285 files |= (set(mp1) | set(mp2)) - set(mc)
285 files |= (set(mp1) | set(mp2)) - set(mc)
286 for f in mc:
286 for f in mc:
287 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
287 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
288 files.add(f)
288 files.add(f)
289 return files
289 return files
290
290
291 # Convert src parents to dst parents
291 # Convert src parents to dst parents
292 def _convertparents(ctx, revmap):
292 def _convertparents(ctx, revmap):
293 parents = []
293 parents = []
294 for p in ctx.parents():
294 for p in ctx.parents():
295 parents.append(revmap[p.node()])
295 parents.append(revmap[p.node()])
296 while len(parents) < 2:
296 while len(parents) < 2:
297 parents.append(node.nullid)
297 parents.append(node.nullid)
298 return parents
298 return parents
299
299
300 # Get memfilectx for a normal file
300 # Get memfilectx for a normal file
301 def _getnormalcontext(repo, ctx, f, revmap):
301 def _getnormalcontext(repo, ctx, f, revmap):
302 try:
302 try:
303 fctx = ctx.filectx(f)
303 fctx = ctx.filectx(f)
304 except error.LookupError:
304 except error.LookupError:
305 return None
305 return None
306 renamed = fctx.renamed()
306 renamed = fctx.renamed()
307 if renamed:
307 if renamed:
308 renamed = renamed[0]
308 renamed = renamed[0]
309
309
310 data = fctx.data()
310 data = fctx.data()
311 if f == '.hgtags':
311 if f == '.hgtags':
312 data = _converttags (repo.ui, revmap, data)
312 data = _converttags (repo.ui, revmap, data)
313 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
313 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
314 'x' in fctx.flags(), renamed)
314 'x' in fctx.flags(), renamed)
315
315
316 # Remap tag data using a revision map
316 # Remap tag data using a revision map
317 def _converttags(ui, revmap, data):
317 def _converttags(ui, revmap, data):
318 newdata = []
318 newdata = []
319 for line in data.splitlines():
319 for line in data.splitlines():
320 try:
320 try:
321 id, name = line.split(' ', 1)
321 id, name = line.split(' ', 1)
322 except ValueError:
322 except ValueError:
323 ui.warn(_('skipping incorrectly formatted tag %s\n')
323 ui.warn(_('skipping incorrectly formatted tag %s\n')
324 % line)
324 % line)
325 continue
325 continue
326 try:
326 try:
327 newid = node.bin(id)
327 newid = node.bin(id)
328 except TypeError:
328 except TypeError:
329 ui.warn(_('skipping incorrectly formatted id %s\n')
329 ui.warn(_('skipping incorrectly formatted id %s\n')
330 % id)
330 % id)
331 continue
331 continue
332 try:
332 try:
333 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
333 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
334 name))
334 name))
335 except KeyError:
335 except KeyError:
336 ui.warn(_('no mapping for id %s\n') % id)
336 ui.warn(_('no mapping for id %s\n') % id)
337 continue
337 continue
338 return ''.join(newdata)
338 return ''.join(newdata)
339
339
340 def _islfile(file, ctx, matcher, size):
340 def _islfile(file, ctx, matcher, size):
341 '''Return true if file should be considered a largefile, i.e.
341 '''Return true if file should be considered a largefile, i.e.
342 matcher matches it or it is larger than size.'''
342 matcher matches it or it is larger than size.'''
343 # never store special .hg* files as largefiles
343 # never store special .hg* files as largefiles
344 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
344 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
345 return False
345 return False
346 if matcher and matcher(file):
346 if matcher and matcher(file):
347 return True
347 return True
348 try:
348 try:
349 return ctx.filectx(file).size() >= size * 1024 * 1024
349 return ctx.filectx(file).size() >= size * 1024 * 1024
350 except error.LookupError:
350 except error.LookupError:
351 return False
351 return False
352
352
353 def uploadlfiles(ui, rsrc, rdst, files):
353 def uploadlfiles(ui, rsrc, rdst, files):
354 '''upload largefiles to the central store'''
354 '''upload largefiles to the central store'''
355
355
356 if not files:
356 if not files:
357 return
357 return
358
358
359 store = storefactory._openstore(rsrc, rdst, put=True)
359 store = storefactory.openstore(rsrc, rdst, put=True)
360
360
361 at = 0
361 at = 0
362 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
362 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
363 retval = store.exists(files)
363 retval = store.exists(files)
364 files = filter(lambda h: not retval[h], files)
364 files = filter(lambda h: not retval[h], files)
365 ui.debug("%d largefiles need to be uploaded\n" % len(files))
365 ui.debug("%d largefiles need to be uploaded\n" % len(files))
366
366
367 for hash in files:
367 for hash in files:
368 ui.progress(_('uploading largefiles'), at, unit=_('files'),
368 ui.progress(_('uploading largefiles'), at, unit=_('files'),
369 total=len(files))
369 total=len(files))
370 source = lfutil.findfile(rsrc, hash)
370 source = lfutil.findfile(rsrc, hash)
371 if not source:
371 if not source:
372 raise error.Abort(_('largefile %s missing from store'
372 raise error.Abort(_('largefile %s missing from store'
373 ' (needs to be uploaded)') % hash)
373 ' (needs to be uploaded)') % hash)
374 # XXX check for errors here
374 # XXX check for errors here
375 store.put(source, hash)
375 store.put(source, hash)
376 at += 1
376 at += 1
377 ui.progress(_('uploading largefiles'), None)
377 ui.progress(_('uploading largefiles'), None)
378
378
379 def verifylfiles(ui, repo, all=False, contents=False):
379 def verifylfiles(ui, repo, all=False, contents=False):
380 '''Verify that every largefile revision in the current changeset
380 '''Verify that every largefile revision in the current changeset
381 exists in the central store. With --contents, also verify that
381 exists in the central store. With --contents, also verify that
382 the contents of each local largefile file revision are correct (SHA-1 hash
382 the contents of each local largefile file revision are correct (SHA-1 hash
383 matches the revision ID). With --all, check every changeset in
383 matches the revision ID). With --all, check every changeset in
384 this repository.'''
384 this repository.'''
385 if all:
385 if all:
386 revs = repo.revs('all()')
386 revs = repo.revs('all()')
387 else:
387 else:
388 revs = ['.']
388 revs = ['.']
389
389
390 store = storefactory._openstore(repo)
390 store = storefactory.openstore(repo)
391 return store.verify(revs, contents=contents)
391 return store.verify(revs, contents=contents)
392
392
393 def cachelfiles(ui, repo, node, filelist=None):
393 def cachelfiles(ui, repo, node, filelist=None):
394 '''cachelfiles ensures that all largefiles needed by the specified revision
394 '''cachelfiles ensures that all largefiles needed by the specified revision
395 are present in the repository's largefile cache.
395 are present in the repository's largefile cache.
396
396
397 returns a tuple (cached, missing). cached is the list of files downloaded
397 returns a tuple (cached, missing). cached is the list of files downloaded
398 by this operation; missing is the list of files that were needed but could
398 by this operation; missing is the list of files that were needed but could
399 not be found.'''
399 not be found.'''
400 lfiles = lfutil.listlfiles(repo, node)
400 lfiles = lfutil.listlfiles(repo, node)
401 if filelist:
401 if filelist:
402 lfiles = set(lfiles) & set(filelist)
402 lfiles = set(lfiles) & set(filelist)
403 toget = []
403 toget = []
404
404
405 for lfile in lfiles:
405 for lfile in lfiles:
406 try:
406 try:
407 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
407 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
408 except IOError as err:
408 except IOError as err:
409 if err.errno == errno.ENOENT:
409 if err.errno == errno.ENOENT:
410 continue # node must be None and standin wasn't found in wctx
410 continue # node must be None and standin wasn't found in wctx
411 raise
411 raise
412 if not lfutil.findfile(repo, expectedhash):
412 if not lfutil.findfile(repo, expectedhash):
413 toget.append((lfile, expectedhash))
413 toget.append((lfile, expectedhash))
414
414
415 if toget:
415 if toget:
416 store = storefactory._openstore(repo)
416 store = storefactory.openstore(repo)
417 ret = store.get(toget)
417 ret = store.get(toget)
418 return ret
418 return ret
419
419
420 return ([], [])
420 return ([], [])
421
421
422 def downloadlfiles(ui, repo, rev=None):
422 def downloadlfiles(ui, repo, rev=None):
423 matchfn = scmutil.match(repo[None],
423 matchfn = scmutil.match(repo[None],
424 [repo.wjoin(lfutil.shortname)], {})
424 [repo.wjoin(lfutil.shortname)], {})
425 def prepare(ctx, fns):
425 def prepare(ctx, fns):
426 pass
426 pass
427 totalsuccess = 0
427 totalsuccess = 0
428 totalmissing = 0
428 totalmissing = 0
429 if rev != []: # walkchangerevs on empty list would return all revs
429 if rev != []: # walkchangerevs on empty list would return all revs
430 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
430 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
431 prepare):
431 prepare):
432 success, missing = cachelfiles(ui, repo, ctx.node())
432 success, missing = cachelfiles(ui, repo, ctx.node())
433 totalsuccess += len(success)
433 totalsuccess += len(success)
434 totalmissing += len(missing)
434 totalmissing += len(missing)
435 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
435 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
436 if totalmissing > 0:
436 if totalmissing > 0:
437 ui.status(_("%d largefiles failed to download\n") % totalmissing)
437 ui.status(_("%d largefiles failed to download\n") % totalmissing)
438 return totalsuccess, totalmissing
438 return totalsuccess, totalmissing
439
439
440 def updatelfiles(ui, repo, filelist=None, printmessage=None,
440 def updatelfiles(ui, repo, filelist=None, printmessage=None,
441 normallookup=False):
441 normallookup=False):
442 '''Update largefiles according to standins in the working directory
442 '''Update largefiles according to standins in the working directory
443
443
444 If ``printmessage`` is other than ``None``, it means "print (or
444 If ``printmessage`` is other than ``None``, it means "print (or
445 ignore, for false) message forcibly".
445 ignore, for false) message forcibly".
446 '''
446 '''
447 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
447 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
448 with repo.wlock():
448 with repo.wlock():
449 lfdirstate = lfutil.openlfdirstate(ui, repo)
449 lfdirstate = lfutil.openlfdirstate(ui, repo)
450 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
450 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
451
451
452 if filelist is not None:
452 if filelist is not None:
453 filelist = set(filelist)
453 filelist = set(filelist)
454 lfiles = [f for f in lfiles if f in filelist]
454 lfiles = [f for f in lfiles if f in filelist]
455
455
456 update = {}
456 update = {}
457 updated, removed = 0, 0
457 updated, removed = 0, 0
458 wvfs = repo.wvfs
458 wvfs = repo.wvfs
459 for lfile in lfiles:
459 for lfile in lfiles:
460 rellfile = lfile
460 rellfile = lfile
461 rellfileorig = os.path.relpath(
461 rellfileorig = os.path.relpath(
462 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
462 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
463 start=repo.root)
463 start=repo.root)
464 relstandin = lfutil.standin(lfile)
464 relstandin = lfutil.standin(lfile)
465 relstandinorig = os.path.relpath(
465 relstandinorig = os.path.relpath(
466 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
466 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
467 start=repo.root)
467 start=repo.root)
468 if wvfs.exists(relstandin):
468 if wvfs.exists(relstandin):
469 if (wvfs.exists(relstandinorig) and
469 if (wvfs.exists(relstandinorig) and
470 wvfs.exists(rellfile)):
470 wvfs.exists(rellfile)):
471 shutil.copyfile(wvfs.join(rellfile),
471 shutil.copyfile(wvfs.join(rellfile),
472 wvfs.join(rellfileorig))
472 wvfs.join(rellfileorig))
473 wvfs.unlinkpath(relstandinorig)
473 wvfs.unlinkpath(relstandinorig)
474 expecthash = lfutil.readstandin(repo, lfile)
474 expecthash = lfutil.readstandin(repo, lfile)
475 if expecthash != '':
475 if expecthash != '':
476 if lfile not in repo[None]: # not switched to normal file
476 if lfile not in repo[None]: # not switched to normal file
477 wvfs.unlinkpath(rellfile, ignoremissing=True)
477 wvfs.unlinkpath(rellfile, ignoremissing=True)
478 # use normallookup() to allocate an entry in largefiles
478 # use normallookup() to allocate an entry in largefiles
479 # dirstate to prevent lfilesrepo.status() from reporting
479 # dirstate to prevent lfilesrepo.status() from reporting
480 # missing files as removed.
480 # missing files as removed.
481 lfdirstate.normallookup(lfile)
481 lfdirstate.normallookup(lfile)
482 update[lfile] = expecthash
482 update[lfile] = expecthash
483 else:
483 else:
484 # Remove lfiles for which the standin is deleted, unless the
484 # Remove lfiles for which the standin is deleted, unless the
485 # lfile is added to the repository again. This happens when a
485 # lfile is added to the repository again. This happens when a
486 # largefile is converted back to a normal file: the standin
486 # largefile is converted back to a normal file: the standin
487 # disappears, but a new (normal) file appears as the lfile.
487 # disappears, but a new (normal) file appears as the lfile.
488 if (wvfs.exists(rellfile) and
488 if (wvfs.exists(rellfile) and
489 repo.dirstate.normalize(lfile) not in repo[None]):
489 repo.dirstate.normalize(lfile) not in repo[None]):
490 wvfs.unlinkpath(rellfile)
490 wvfs.unlinkpath(rellfile)
491 removed += 1
491 removed += 1
492
492
493 # largefile processing might be slow and be interrupted - be prepared
493 # largefile processing might be slow and be interrupted - be prepared
494 lfdirstate.write()
494 lfdirstate.write()
495
495
496 if lfiles:
496 if lfiles:
497 statuswriter(_('getting changed largefiles\n'))
497 statuswriter(_('getting changed largefiles\n'))
498 cachelfiles(ui, repo, None, lfiles)
498 cachelfiles(ui, repo, None, lfiles)
499
499
500 for lfile in lfiles:
500 for lfile in lfiles:
501 update1 = 0
501 update1 = 0
502
502
503 expecthash = update.get(lfile)
503 expecthash = update.get(lfile)
504 if expecthash:
504 if expecthash:
505 if not lfutil.copyfromcache(repo, expecthash, lfile):
505 if not lfutil.copyfromcache(repo, expecthash, lfile):
506 # failed ... but already removed and set to normallookup
506 # failed ... but already removed and set to normallookup
507 continue
507 continue
508 # Synchronize largefile dirstate to the last modified
508 # Synchronize largefile dirstate to the last modified
509 # time of the file
509 # time of the file
510 lfdirstate.normal(lfile)
510 lfdirstate.normal(lfile)
511 update1 = 1
511 update1 = 1
512
512
513 # copy the state of largefile standin from the repository's
513 # copy the state of largefile standin from the repository's
514 # dirstate to its state in the lfdirstate.
514 # dirstate to its state in the lfdirstate.
515 rellfile = lfile
515 rellfile = lfile
516 relstandin = lfutil.standin(lfile)
516 relstandin = lfutil.standin(lfile)
517 if wvfs.exists(relstandin):
517 if wvfs.exists(relstandin):
518 mode = wvfs.stat(relstandin).st_mode
518 mode = wvfs.stat(relstandin).st_mode
519 if mode != wvfs.stat(rellfile).st_mode:
519 if mode != wvfs.stat(rellfile).st_mode:
520 wvfs.chmod(rellfile, mode)
520 wvfs.chmod(rellfile, mode)
521 update1 = 1
521 update1 = 1
522
522
523 updated += update1
523 updated += update1
524
524
525 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
525 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
526
526
527 lfdirstate.write()
527 lfdirstate.write()
528 if lfiles:
528 if lfiles:
529 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
529 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
530 removed))
530 removed))
531
531
532 @command('lfpull',
532 @command('lfpull',
533 [('r', 'rev', [], _('pull largefiles for these revisions'))
533 [('r', 'rev', [], _('pull largefiles for these revisions'))
534 ] + commands.remoteopts,
534 ] + commands.remoteopts,
535 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
535 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
536 def lfpull(ui, repo, source="default", **opts):
536 def lfpull(ui, repo, source="default", **opts):
537 """pull largefiles for the specified revisions from the specified source
537 """pull largefiles for the specified revisions from the specified source
538
538
539 Pull largefiles that are referenced from local changesets but missing
539 Pull largefiles that are referenced from local changesets but missing
540 locally, pulling from a remote repository to the local cache.
540 locally, pulling from a remote repository to the local cache.
541
541
542 If SOURCE is omitted, the 'default' path will be used.
542 If SOURCE is omitted, the 'default' path will be used.
543 See :hg:`help urls` for more information.
543 See :hg:`help urls` for more information.
544
544
545 .. container:: verbose
545 .. container:: verbose
546
546
547 Some examples:
547 Some examples:
548
548
549 - pull largefiles for all branch heads::
549 - pull largefiles for all branch heads::
550
550
551 hg lfpull -r "head() and not closed()"
551 hg lfpull -r "head() and not closed()"
552
552
553 - pull largefiles on the default branch::
553 - pull largefiles on the default branch::
554
554
555 hg lfpull -r "branch(default)"
555 hg lfpull -r "branch(default)"
556 """
556 """
557 repo.lfpullsource = source
557 repo.lfpullsource = source
558
558
559 revs = opts.get('rev', [])
559 revs = opts.get('rev', [])
560 if not revs:
560 if not revs:
561 raise error.Abort(_('no revisions specified'))
561 raise error.Abort(_('no revisions specified'))
562 revs = scmutil.revrange(repo, revs)
562 revs = scmutil.revrange(repo, revs)
563
563
564 numcached = 0
564 numcached = 0
565 for rev in revs:
565 for rev in revs:
566 ui.note(_('pulling largefiles for revision %s\n') % rev)
566 ui.note(_('pulling largefiles for revision %s\n') % rev)
567 (cached, missing) = cachelfiles(ui, repo, rev)
567 (cached, missing) = cachelfiles(ui, repo, rev)
568 numcached += len(cached)
568 numcached += len(cached)
569 ui.status(_("%d largefiles cached\n") % numcached)
569 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,1433 +1,1433 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial import (
17 from mercurial import (
18 archival,
18 archival,
19 cmdutil,
19 cmdutil,
20 error,
20 error,
21 hg,
21 hg,
22 match as matchmod,
22 match as matchmod,
23 pathutil,
23 pathutil,
24 registrar,
24 registrar,
25 revset,
25 revset,
26 scmutil,
26 scmutil,
27 util,
27 util,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 lfcommands,
31 lfcommands,
32 lfutil,
32 lfutil,
33 storefactory,
33 storefactory,
34 )
34 )
35
35
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37
37
38 def composelargefilematcher(match, manifest):
38 def composelargefilematcher(match, manifest):
39 '''create a matcher that matches only the largefiles in the original
39 '''create a matcher that matches only the largefiles in the original
40 matcher'''
40 matcher'''
41 m = copy.copy(match)
41 m = copy.copy(match)
42 lfile = lambda f: lfutil.standin(f) in manifest
42 lfile = lambda f: lfutil.standin(f) in manifest
43 m._files = filter(lfile, m._files)
43 m._files = filter(lfile, m._files)
44 m._fileroots = set(m._files)
44 m._fileroots = set(m._files)
45 m._always = False
45 m._always = False
46 origmatchfn = m.matchfn
46 origmatchfn = m.matchfn
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 return m
48 return m
49
49
50 def composenormalfilematcher(match, manifest, exclude=None):
50 def composenormalfilematcher(match, manifest, exclude=None):
51 excluded = set()
51 excluded = set()
52 if exclude is not None:
52 if exclude is not None:
53 excluded.update(exclude)
53 excluded.update(exclude)
54
54
55 m = copy.copy(match)
55 m = copy.copy(match)
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 manifest or f in excluded)
57 manifest or f in excluded)
58 m._files = filter(notlfile, m._files)
58 m._files = filter(notlfile, m._files)
59 m._fileroots = set(m._files)
59 m._fileroots = set(m._files)
60 m._always = False
60 m._always = False
61 origmatchfn = m.matchfn
61 origmatchfn = m.matchfn
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 return m
63 return m
64
64
65 def installnormalfilesmatchfn(manifest):
65 def installnormalfilesmatchfn(manifest):
66 '''installmatchfn with a matchfn that ignores all largefiles'''
66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 default='relpath', badfn=None):
68 default='relpath', badfn=None):
69 if opts is None:
69 if opts is None:
70 opts = {}
70 opts = {}
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 return composenormalfilematcher(match, manifest)
72 return composenormalfilematcher(match, manifest)
73 oldmatch = installmatchfn(overridematch)
73 oldmatch = installmatchfn(overridematch)
74
74
75 def installmatchfn(f):
75 def installmatchfn(f):
76 '''monkey patch the scmutil module with a custom match function.
76 '''monkey patch the scmutil module with a custom match function.
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 oldmatch = scmutil.match
78 oldmatch = scmutil.match
79 setattr(f, 'oldmatch', oldmatch)
79 setattr(f, 'oldmatch', oldmatch)
80 scmutil.match = f
80 scmutil.match = f
81 return oldmatch
81 return oldmatch
82
82
83 def restorematchfn():
83 def restorematchfn():
84 '''restores scmutil.match to what it was before installmatchfn
84 '''restores scmutil.match to what it was before installmatchfn
85 was called. no-op if scmutil.match is its original function.
85 was called. no-op if scmutil.match is its original function.
86
86
87 Note that n calls to installmatchfn will require n calls to
87 Note that n calls to installmatchfn will require n calls to
88 restore the original matchfn.'''
88 restore the original matchfn.'''
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90
90
91 def installmatchandpatsfn(f):
91 def installmatchandpatsfn(f):
92 oldmatchandpats = scmutil.matchandpats
92 oldmatchandpats = scmutil.matchandpats
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 scmutil.matchandpats = f
94 scmutil.matchandpats = f
95 return oldmatchandpats
95 return oldmatchandpats
96
96
97 def restorematchandpatsfn():
97 def restorematchandpatsfn():
98 '''restores scmutil.matchandpats to what it was before
98 '''restores scmutil.matchandpats to what it was before
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 is its original function.
100 is its original function.
101
101
102 Note that n calls to installmatchandpatsfn will require n calls
102 Note that n calls to installmatchandpatsfn will require n calls
103 to restore the original matchfn.'''
103 to restore the original matchfn.'''
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 scmutil.matchandpats)
105 scmutil.matchandpats)
106
106
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 large = opts.get('large')
108 large = opts.get('large')
109 lfsize = lfutil.getminsize(
109 lfsize = lfutil.getminsize(
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111
111
112 lfmatcher = None
112 lfmatcher = None
113 if lfutil.islfilesrepo(repo):
113 if lfutil.islfilesrepo(repo):
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 if lfpats:
115 if lfpats:
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117
117
118 lfnames = []
118 lfnames = []
119 m = matcher
119 m = matcher
120
120
121 wctx = repo[None]
121 wctx = repo[None]
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 exact = m.exact(f)
123 exact = m.exact(f)
124 lfile = lfutil.standin(f) in wctx
124 lfile = lfutil.standin(f) in wctx
125 nfile = f in wctx
125 nfile = f in wctx
126 exists = lfile or nfile
126 exists = lfile or nfile
127
127
128 # addremove in core gets fancy with the name, add doesn't
128 # addremove in core gets fancy with the name, add doesn't
129 if isaddremove:
129 if isaddremove:
130 name = m.uipath(f)
130 name = m.uipath(f)
131 else:
131 else:
132 name = m.rel(f)
132 name = m.rel(f)
133
133
134 # Don't warn the user when they attempt to add a normal tracked file.
134 # Don't warn the user when they attempt to add a normal tracked file.
135 # The normal add code will do that for us.
135 # The normal add code will do that for us.
136 if exact and exists:
136 if exact and exists:
137 if lfile:
137 if lfile:
138 ui.warn(_('%s already a largefile\n') % name)
138 ui.warn(_('%s already a largefile\n') % name)
139 continue
139 continue
140
140
141 if (exact or not exists) and not lfutil.isstandin(f):
141 if (exact or not exists) and not lfutil.isstandin(f):
142 # In case the file was removed previously, but not committed
142 # In case the file was removed previously, but not committed
143 # (issue3507)
143 # (issue3507)
144 if not repo.wvfs.exists(f):
144 if not repo.wvfs.exists(f):
145 continue
145 continue
146
146
147 abovemin = (lfsize and
147 abovemin = (lfsize and
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 lfnames.append(f)
150 lfnames.append(f)
151 if ui.verbose or not exact:
151 if ui.verbose or not exact:
152 ui.status(_('adding %s as a largefile\n') % name)
152 ui.status(_('adding %s as a largefile\n') % name)
153
153
154 bad = []
154 bad = []
155
155
156 # Need to lock, otherwise there could be a race condition between
156 # Need to lock, otherwise there could be a race condition between
157 # when standins are created and added to the repo.
157 # when standins are created and added to the repo.
158 with repo.wlock():
158 with repo.wlock():
159 if not opts.get('dry_run'):
159 if not opts.get('dry_run'):
160 standins = []
160 standins = []
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 for f in lfnames:
162 for f in lfnames:
163 standinname = lfutil.standin(f)
163 standinname = lfutil.standin(f)
164 lfutil.writestandin(repo, standinname, hash='',
164 lfutil.writestandin(repo, standinname, hash='',
165 executable=lfutil.getexecutable(repo.wjoin(f)))
165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 standins.append(standinname)
166 standins.append(standinname)
167 if lfdirstate[f] == 'r':
167 if lfdirstate[f] == 'r':
168 lfdirstate.normallookup(f)
168 lfdirstate.normallookup(f)
169 else:
169 else:
170 lfdirstate.add(f)
170 lfdirstate.add(f)
171 lfdirstate.write()
171 lfdirstate.write()
172 bad += [lfutil.splitstandin(f)
172 bad += [lfutil.splitstandin(f)
173 for f in repo[None].add(standins)
173 for f in repo[None].add(standins)
174 if f in m.files()]
174 if f in m.files()]
175
175
176 added = [f for f in lfnames if f not in bad]
176 added = [f for f in lfnames if f not in bad]
177 return added, bad
177 return added, bad
178
178
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 after = opts.get('after')
180 after = opts.get('after')
181 m = composelargefilematcher(matcher, repo[None].manifest())
181 m = composelargefilematcher(matcher, repo[None].manifest())
182 try:
182 try:
183 repo.lfstatus = True
183 repo.lfstatus = True
184 s = repo.status(match=m, clean=not isaddremove)
184 s = repo.status(match=m, clean=not isaddremove)
185 finally:
185 finally:
186 repo.lfstatus = False
186 repo.lfstatus = False
187 manifest = repo[None].manifest()
187 manifest = repo[None].manifest()
188 modified, added, deleted, clean = [[f for f in list
188 modified, added, deleted, clean = [[f for f in list
189 if lfutil.standin(f) in manifest]
189 if lfutil.standin(f) in manifest]
190 for list in (s.modified, s.added,
190 for list in (s.modified, s.added,
191 s.deleted, s.clean)]
191 s.deleted, s.clean)]
192
192
193 def warn(files, msg):
193 def warn(files, msg):
194 for f in files:
194 for f in files:
195 ui.warn(msg % m.rel(f))
195 ui.warn(msg % m.rel(f))
196 return int(len(files) > 0)
196 return int(len(files) > 0)
197
197
198 result = 0
198 result = 0
199
199
200 if after:
200 if after:
201 remove = deleted
201 remove = deleted
202 result = warn(modified + added + clean,
202 result = warn(modified + added + clean,
203 _('not removing %s: file still exists\n'))
203 _('not removing %s: file still exists\n'))
204 else:
204 else:
205 remove = deleted + clean
205 remove = deleted + clean
206 result = warn(modified, _('not removing %s: file is modified (use -f'
206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 ' to force removal)\n'))
207 ' to force removal)\n'))
208 result = warn(added, _('not removing %s: file has been marked for add'
208 result = warn(added, _('not removing %s: file has been marked for add'
209 ' (use forget to undo)\n')) or result
209 ' (use forget to undo)\n')) or result
210
210
211 # Need to lock because standin files are deleted then removed from the
211 # Need to lock because standin files are deleted then removed from the
212 # repository and we could race in-between.
212 # repository and we could race in-between.
213 with repo.wlock():
213 with repo.wlock():
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 for f in sorted(remove):
215 for f in sorted(remove):
216 if ui.verbose or not m.exact(f):
216 if ui.verbose or not m.exact(f):
217 # addremove in core gets fancy with the name, remove doesn't
217 # addremove in core gets fancy with the name, remove doesn't
218 if isaddremove:
218 if isaddremove:
219 name = m.uipath(f)
219 name = m.uipath(f)
220 else:
220 else:
221 name = m.rel(f)
221 name = m.rel(f)
222 ui.status(_('removing %s\n') % name)
222 ui.status(_('removing %s\n') % name)
223
223
224 if not opts.get('dry_run'):
224 if not opts.get('dry_run'):
225 if not after:
225 if not after:
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227
227
228 if opts.get('dry_run'):
228 if opts.get('dry_run'):
229 return result
229 return result
230
230
231 remove = [lfutil.standin(f) for f in remove]
231 remove = [lfutil.standin(f) for f in remove]
232 # If this is being called by addremove, let the original addremove
232 # If this is being called by addremove, let the original addremove
233 # function handle this.
233 # function handle this.
234 if not isaddremove:
234 if not isaddremove:
235 for f in remove:
235 for f in remove:
236 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
236 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
237 repo[None].forget(remove)
237 repo[None].forget(remove)
238
238
239 for f in remove:
239 for f in remove:
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 False)
241 False)
242
242
243 lfdirstate.write()
243 lfdirstate.write()
244
244
245 return result
245 return result
246
246
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 # appear at their right place in the manifests.
248 # appear at their right place in the manifests.
249 def decodepath(orig, path):
249 def decodepath(orig, path):
250 return lfutil.splitstandin(path) or path
250 return lfutil.splitstandin(path) or path
251
251
252 # -- Wrappers: modify existing commands --------------------------------
252 # -- Wrappers: modify existing commands --------------------------------
253
253
254 def overrideadd(orig, ui, repo, *pats, **opts):
254 def overrideadd(orig, ui, repo, *pats, **opts):
255 if opts.get('normal') and opts.get('large'):
255 if opts.get('normal') and opts.get('large'):
256 raise error.Abort(_('--normal cannot be used with --large'))
256 raise error.Abort(_('--normal cannot be used with --large'))
257 return orig(ui, repo, *pats, **opts)
257 return orig(ui, repo, *pats, **opts)
258
258
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 # The --normal flag short circuits this override
260 # The --normal flag short circuits this override
261 if opts.get('normal'):
261 if opts.get('normal'):
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263
263
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 ladded)
266 ladded)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268
268
269 bad.extend(f for f in lbad)
269 bad.extend(f for f in lbad)
270 return bad
270 return bad
271
271
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 return removelargefiles(ui, repo, False, matcher, after=after,
275 return removelargefiles(ui, repo, False, matcher, after=after,
276 force=force) or result
276 force=force) or result
277
277
278 def overridestatusfn(orig, repo, rev2, **opts):
278 def overridestatusfn(orig, repo, rev2, **opts):
279 try:
279 try:
280 repo._repo.lfstatus = True
280 repo._repo.lfstatus = True
281 return orig(repo, rev2, **opts)
281 return orig(repo, rev2, **opts)
282 finally:
282 finally:
283 repo._repo.lfstatus = False
283 repo._repo.lfstatus = False
284
284
285 def overridestatus(orig, ui, repo, *pats, **opts):
285 def overridestatus(orig, ui, repo, *pats, **opts):
286 try:
286 try:
287 repo.lfstatus = True
287 repo.lfstatus = True
288 return orig(ui, repo, *pats, **opts)
288 return orig(ui, repo, *pats, **opts)
289 finally:
289 finally:
290 repo.lfstatus = False
290 repo.lfstatus = False
291
291
292 def overridedirty(orig, repo, ignoreupdate=False):
292 def overridedirty(orig, repo, ignoreupdate=False):
293 try:
293 try:
294 repo._repo.lfstatus = True
294 repo._repo.lfstatus = True
295 return orig(repo, ignoreupdate)
295 return orig(repo, ignoreupdate)
296 finally:
296 finally:
297 repo._repo.lfstatus = False
297 repo._repo.lfstatus = False
298
298
299 def overridelog(orig, ui, repo, *pats, **opts):
299 def overridelog(orig, ui, repo, *pats, **opts):
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 default='relpath', badfn=None):
301 default='relpath', badfn=None):
302 """Matcher that merges root directory with .hglf, suitable for log.
302 """Matcher that merges root directory with .hglf, suitable for log.
303 It is still possible to match .hglf directly.
303 It is still possible to match .hglf directly.
304 For any listed files run log on the standin too.
304 For any listed files run log on the standin too.
305 matchfn tries both the given filename and with .hglf stripped.
305 matchfn tries both the given filename and with .hglf stripped.
306 """
306 """
307 if opts is None:
307 if opts is None:
308 opts = {}
308 opts = {}
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 badfn=badfn)
310 badfn=badfn)
311 m, p = copy.copy(matchandpats)
311 m, p = copy.copy(matchandpats)
312
312
313 if m.always():
313 if m.always():
314 # We want to match everything anyway, so there's no benefit trying
314 # We want to match everything anyway, so there's no benefit trying
315 # to add standins.
315 # to add standins.
316 return matchandpats
316 return matchandpats
317
317
318 pats = set(p)
318 pats = set(p)
319
319
320 def fixpats(pat, tostandin=lfutil.standin):
320 def fixpats(pat, tostandin=lfutil.standin):
321 if pat.startswith('set:'):
321 if pat.startswith('set:'):
322 return pat
322 return pat
323
323
324 kindpat = matchmod._patsplit(pat, None)
324 kindpat = matchmod._patsplit(pat, None)
325
325
326 if kindpat[0] is not None:
326 if kindpat[0] is not None:
327 return kindpat[0] + ':' + tostandin(kindpat[1])
327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 return tostandin(kindpat[1])
328 return tostandin(kindpat[1])
329
329
330 if m._cwd:
330 if m._cwd:
331 hglf = lfutil.shortname
331 hglf = lfutil.shortname
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333
333
334 def tostandin(f):
334 def tostandin(f):
335 # The file may already be a standin, so truncate the back
335 # The file may already be a standin, so truncate the back
336 # prefix and test before mangling it. This avoids turning
336 # prefix and test before mangling it. This avoids turning
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 return f
339 return f
340
340
341 # An absolute path is from outside the repo, so truncate the
341 # An absolute path is from outside the repo, so truncate the
342 # path to the root before building the standin. Otherwise cwd
342 # path to the root before building the standin. Otherwise cwd
343 # is somewhere in the repo, relative to root, and needs to be
343 # is somewhere in the repo, relative to root, and needs to be
344 # prepended before building the standin.
344 # prepended before building the standin.
345 if os.path.isabs(m._cwd):
345 if os.path.isabs(m._cwd):
346 f = f[len(back):]
346 f = f[len(back):]
347 else:
347 else:
348 f = m._cwd + '/' + f
348 f = m._cwd + '/' + f
349 return back + lfutil.standin(f)
349 return back + lfutil.standin(f)
350
350
351 pats.update(fixpats(f, tostandin) for f in p)
351 pats.update(fixpats(f, tostandin) for f in p)
352 else:
352 else:
353 def tostandin(f):
353 def tostandin(f):
354 if lfutil.splitstandin(f):
354 if lfutil.splitstandin(f):
355 return f
355 return f
356 return lfutil.standin(f)
356 return lfutil.standin(f)
357 pats.update(fixpats(f, tostandin) for f in p)
357 pats.update(fixpats(f, tostandin) for f in p)
358
358
359 for i in range(0, len(m._files)):
359 for i in range(0, len(m._files)):
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 if m._files[i] == '.':
361 if m._files[i] == '.':
362 continue
362 continue
363 standin = lfutil.standin(m._files[i])
363 standin = lfutil.standin(m._files[i])
364 # If the "standin" is a directory, append instead of replace to
364 # If the "standin" is a directory, append instead of replace to
365 # support naming a directory on the command line with only
365 # support naming a directory on the command line with only
366 # largefiles. The original directory is kept to support normal
366 # largefiles. The original directory is kept to support normal
367 # files.
367 # files.
368 if standin in repo[ctx.node()]:
368 if standin in repo[ctx.node()]:
369 m._files[i] = standin
369 m._files[i] = standin
370 elif m._files[i] not in repo[ctx.node()] \
370 elif m._files[i] not in repo[ctx.node()] \
371 and repo.wvfs.isdir(standin):
371 and repo.wvfs.isdir(standin):
372 m._files.append(standin)
372 m._files.append(standin)
373
373
374 m._fileroots = set(m._files)
374 m._fileroots = set(m._files)
375 m._always = False
375 m._always = False
376 origmatchfn = m.matchfn
376 origmatchfn = m.matchfn
377 def lfmatchfn(f):
377 def lfmatchfn(f):
378 lf = lfutil.splitstandin(f)
378 lf = lfutil.splitstandin(f)
379 if lf is not None and origmatchfn(lf):
379 if lf is not None and origmatchfn(lf):
380 return True
380 return True
381 r = origmatchfn(f)
381 r = origmatchfn(f)
382 return r
382 return r
383 m.matchfn = lfmatchfn
383 m.matchfn = lfmatchfn
384
384
385 ui.debug('updated patterns: %s\n' % sorted(pats))
385 ui.debug('updated patterns: %s\n' % sorted(pats))
386 return m, pats
386 return m, pats
387
387
388 # For hg log --patch, the match object is used in two different senses:
388 # For hg log --patch, the match object is used in two different senses:
389 # (1) to determine what revisions should be printed out, and
389 # (1) to determine what revisions should be printed out, and
390 # (2) to determine what files to print out diffs for.
390 # (2) to determine what files to print out diffs for.
391 # The magic matchandpats override should be used for case (1) but not for
391 # The magic matchandpats override should be used for case (1) but not for
392 # case (2).
392 # case (2).
393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
394 wctx = repo[None]
394 wctx = repo[None]
395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
396 return lambda rev: match
396 return lambda rev: match
397
397
398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
401
401
402 try:
402 try:
403 return orig(ui, repo, *pats, **opts)
403 return orig(ui, repo, *pats, **opts)
404 finally:
404 finally:
405 restorematchandpatsfn()
405 restorematchandpatsfn()
406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
407
407
408 def overrideverify(orig, ui, repo, *pats, **opts):
408 def overrideverify(orig, ui, repo, *pats, **opts):
409 large = opts.pop('large', False)
409 large = opts.pop('large', False)
410 all = opts.pop('lfa', False)
410 all = opts.pop('lfa', False)
411 contents = opts.pop('lfc', False)
411 contents = opts.pop('lfc', False)
412
412
413 result = orig(ui, repo, *pats, **opts)
413 result = orig(ui, repo, *pats, **opts)
414 if large or all or contents:
414 if large or all or contents:
415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
416 return result
416 return result
417
417
418 def overridedebugstate(orig, ui, repo, *pats, **opts):
418 def overridedebugstate(orig, ui, repo, *pats, **opts):
419 large = opts.pop('large', False)
419 large = opts.pop('large', False)
420 if large:
420 if large:
421 class fakerepo(object):
421 class fakerepo(object):
422 dirstate = lfutil.openlfdirstate(ui, repo)
422 dirstate = lfutil.openlfdirstate(ui, repo)
423 orig(ui, fakerepo, *pats, **opts)
423 orig(ui, fakerepo, *pats, **opts)
424 else:
424 else:
425 orig(ui, repo, *pats, **opts)
425 orig(ui, repo, *pats, **opts)
426
426
427 # Before starting the manifest merge, merge.updates will call
427 # Before starting the manifest merge, merge.updates will call
428 # _checkunknownfile to check if there are any files in the merged-in
428 # _checkunknownfile to check if there are any files in the merged-in
429 # changeset that collide with unknown files in the working copy.
429 # changeset that collide with unknown files in the working copy.
430 #
430 #
431 # The largefiles are seen as unknown, so this prevents us from merging
431 # The largefiles are seen as unknown, so this prevents us from merging
432 # in a file 'foo' if we already have a largefile with the same name.
432 # in a file 'foo' if we already have a largefile with the same name.
433 #
433 #
434 # The overridden function filters the unknown files by removing any
434 # The overridden function filters the unknown files by removing any
435 # largefiles. This makes the merge proceed and we can then handle this
435 # largefiles. This makes the merge proceed and we can then handle this
436 # case further in the overridden calculateupdates function below.
436 # case further in the overridden calculateupdates function below.
437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
439 return False
439 return False
440 return origfn(repo, wctx, mctx, f, f2)
440 return origfn(repo, wctx, mctx, f, f2)
441
441
442 # The manifest merge handles conflicts on the manifest level. We want
442 # The manifest merge handles conflicts on the manifest level. We want
443 # to handle changes in largefile-ness of files at this level too.
443 # to handle changes in largefile-ness of files at this level too.
444 #
444 #
445 # The strategy is to run the original calculateupdates and then process
445 # The strategy is to run the original calculateupdates and then process
446 # the action list it outputs. There are two cases we need to deal with:
446 # the action list it outputs. There are two cases we need to deal with:
447 #
447 #
448 # 1. Normal file in p1, largefile in p2. Here the largefile is
448 # 1. Normal file in p1, largefile in p2. Here the largefile is
449 # detected via its standin file, which will enter the working copy
449 # detected via its standin file, which will enter the working copy
450 # with a "get" action. It is not "merge" since the standin is all
450 # with a "get" action. It is not "merge" since the standin is all
451 # Mercurial is concerned with at this level -- the link to the
451 # Mercurial is concerned with at this level -- the link to the
452 # existing normal file is not relevant here.
452 # existing normal file is not relevant here.
453 #
453 #
454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
455 # since the largefile will be present in the working copy and
455 # since the largefile will be present in the working copy and
456 # different from the normal file in p2. Mercurial therefore
456 # different from the normal file in p2. Mercurial therefore
457 # triggers a merge action.
457 # triggers a merge action.
458 #
458 #
459 # In both cases, we prompt the user and emit new actions to either
459 # In both cases, we prompt the user and emit new actions to either
460 # remove the standin (if the normal file was kept) or to remove the
460 # remove the standin (if the normal file was kept) or to remove the
461 # normal file and get the standin (if the largefile was kept). The
461 # normal file and get the standin (if the largefile was kept). The
462 # default prompt answer is to use the largefile version since it was
462 # default prompt answer is to use the largefile version since it was
463 # presumably changed on purpose.
463 # presumably changed on purpose.
464 #
464 #
465 # Finally, the merge.applyupdates function will then take care of
465 # Finally, the merge.applyupdates function will then take care of
466 # writing the files into the working copy and lfcommands.updatelfiles
466 # writing the files into the working copy and lfcommands.updatelfiles
467 # will update the largefiles.
467 # will update the largefiles.
468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
469 acceptremote, *args, **kwargs):
469 acceptremote, *args, **kwargs):
470 overwrite = force and not branchmerge
470 overwrite = force and not branchmerge
471 actions, diverge, renamedelete = origfn(
471 actions, diverge, renamedelete = origfn(
472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
473
473
474 if overwrite:
474 if overwrite:
475 return actions, diverge, renamedelete
475 return actions, diverge, renamedelete
476
476
477 # Convert to dictionary with filename as key and action as value.
477 # Convert to dictionary with filename as key and action as value.
478 lfiles = set()
478 lfiles = set()
479 for f in actions:
479 for f in actions:
480 splitstandin = lfutil.splitstandin(f)
480 splitstandin = lfutil.splitstandin(f)
481 if splitstandin in p1:
481 if splitstandin in p1:
482 lfiles.add(splitstandin)
482 lfiles.add(splitstandin)
483 elif lfutil.standin(f) in p1:
483 elif lfutil.standin(f) in p1:
484 lfiles.add(f)
484 lfiles.add(f)
485
485
486 for lfile in sorted(lfiles):
486 for lfile in sorted(lfiles):
487 standin = lfutil.standin(lfile)
487 standin = lfutil.standin(lfile)
488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
490 if sm in ('g', 'dc') and lm != 'r':
490 if sm in ('g', 'dc') and lm != 'r':
491 if sm == 'dc':
491 if sm == 'dc':
492 f1, f2, fa, move, anc = sargs
492 f1, f2, fa, move, anc = sargs
493 sargs = (p2[f2].flags(), False)
493 sargs = (p2[f2].flags(), False)
494 # Case 1: normal file in the working copy, largefile in
494 # Case 1: normal file in the working copy, largefile in
495 # the second parent
495 # the second parent
496 usermsg = _('remote turned local normal file %s into a largefile\n'
496 usermsg = _('remote turned local normal file %s into a largefile\n'
497 'use (l)argefile or keep (n)ormal file?'
497 'use (l)argefile or keep (n)ormal file?'
498 '$$ &Largefile $$ &Normal file') % lfile
498 '$$ &Largefile $$ &Normal file') % lfile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
500 actions[lfile] = ('r', None, 'replaced by standin')
500 actions[lfile] = ('r', None, 'replaced by standin')
501 actions[standin] = ('g', sargs, 'replaces standin')
501 actions[standin] = ('g', sargs, 'replaces standin')
502 else: # keep local normal file
502 else: # keep local normal file
503 actions[lfile] = ('k', None, 'replaces standin')
503 actions[lfile] = ('k', None, 'replaces standin')
504 if branchmerge:
504 if branchmerge:
505 actions[standin] = ('k', None, 'replaced by non-standin')
505 actions[standin] = ('k', None, 'replaced by non-standin')
506 else:
506 else:
507 actions[standin] = ('r', None, 'replaced by non-standin')
507 actions[standin] = ('r', None, 'replaced by non-standin')
508 elif lm in ('g', 'dc') and sm != 'r':
508 elif lm in ('g', 'dc') and sm != 'r':
509 if lm == 'dc':
509 if lm == 'dc':
510 f1, f2, fa, move, anc = largs
510 f1, f2, fa, move, anc = largs
511 largs = (p2[f2].flags(), False)
511 largs = (p2[f2].flags(), False)
512 # Case 2: largefile in the working copy, normal file in
512 # Case 2: largefile in the working copy, normal file in
513 # the second parent
513 # the second parent
514 usermsg = _('remote turned local largefile %s into a normal file\n'
514 usermsg = _('remote turned local largefile %s into a normal file\n'
515 'keep (l)argefile or use (n)ormal file?'
515 'keep (l)argefile or use (n)ormal file?'
516 '$$ &Largefile $$ &Normal file') % lfile
516 '$$ &Largefile $$ &Normal file') % lfile
517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
518 if branchmerge:
518 if branchmerge:
519 # largefile can be restored from standin safely
519 # largefile can be restored from standin safely
520 actions[lfile] = ('k', None, 'replaced by standin')
520 actions[lfile] = ('k', None, 'replaced by standin')
521 actions[standin] = ('k', None, 'replaces standin')
521 actions[standin] = ('k', None, 'replaces standin')
522 else:
522 else:
523 # "lfile" should be marked as "removed" without
523 # "lfile" should be marked as "removed" without
524 # removal of itself
524 # removal of itself
525 actions[lfile] = ('lfmr', None,
525 actions[lfile] = ('lfmr', None,
526 'forget non-standin largefile')
526 'forget non-standin largefile')
527
527
528 # linear-merge should treat this largefile as 're-added'
528 # linear-merge should treat this largefile as 're-added'
529 actions[standin] = ('a', None, 'keep standin')
529 actions[standin] = ('a', None, 'keep standin')
530 else: # pick remote normal file
530 else: # pick remote normal file
531 actions[lfile] = ('g', largs, 'replaces standin')
531 actions[lfile] = ('g', largs, 'replaces standin')
532 actions[standin] = ('r', None, 'replaced by non-standin')
532 actions[standin] = ('r', None, 'replaced by non-standin')
533
533
534 return actions, diverge, renamedelete
534 return actions, diverge, renamedelete
535
535
536 def mergerecordupdates(orig, repo, actions, branchmerge):
536 def mergerecordupdates(orig, repo, actions, branchmerge):
537 if 'lfmr' in actions:
537 if 'lfmr' in actions:
538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
539 for lfile, args, msg in actions['lfmr']:
539 for lfile, args, msg in actions['lfmr']:
540 # this should be executed before 'orig', to execute 'remove'
540 # this should be executed before 'orig', to execute 'remove'
541 # before all other actions
541 # before all other actions
542 repo.dirstate.remove(lfile)
542 repo.dirstate.remove(lfile)
543 # make sure lfile doesn't get synclfdirstate'd as normal
543 # make sure lfile doesn't get synclfdirstate'd as normal
544 lfdirstate.add(lfile)
544 lfdirstate.add(lfile)
545 lfdirstate.write()
545 lfdirstate.write()
546
546
547 return orig(repo, actions, branchmerge)
547 return orig(repo, actions, branchmerge)
548
548
549
549
550 # Override filemerge to prompt the user about how they wish to merge
550 # Override filemerge to prompt the user about how they wish to merge
551 # largefiles. This will handle identical edits without prompting the user.
551 # largefiles. This will handle identical edits without prompting the user.
552 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
552 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
553 labels=None):
553 labels=None):
554 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
554 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
555 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
555 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
556 labels=labels)
556 labels=labels)
557
557
558 ahash = fca.data().strip().lower()
558 ahash = fca.data().strip().lower()
559 dhash = fcd.data().strip().lower()
559 dhash = fcd.data().strip().lower()
560 ohash = fco.data().strip().lower()
560 ohash = fco.data().strip().lower()
561 if (ohash != ahash and
561 if (ohash != ahash and
562 ohash != dhash and
562 ohash != dhash and
563 (dhash == ahash or
563 (dhash == ahash or
564 repo.ui.promptchoice(
564 repo.ui.promptchoice(
565 _('largefile %s has a merge conflict\nancestor was %s\n'
565 _('largefile %s has a merge conflict\nancestor was %s\n'
566 'keep (l)ocal %s or\ntake (o)ther %s?'
566 'keep (l)ocal %s or\ntake (o)ther %s?'
567 '$$ &Local $$ &Other') %
567 '$$ &Local $$ &Other') %
568 (lfutil.splitstandin(orig), ahash, dhash, ohash),
568 (lfutil.splitstandin(orig), ahash, dhash, ohash),
569 0) == 1)):
569 0) == 1)):
570 repo.wwrite(fcd.path(), fco.data(), fco.flags())
570 repo.wwrite(fcd.path(), fco.data(), fco.flags())
571 return True, 0, False
571 return True, 0, False
572
572
573 def copiespathcopies(orig, ctx1, ctx2, match=None):
573 def copiespathcopies(orig, ctx1, ctx2, match=None):
574 copies = orig(ctx1, ctx2, match=match)
574 copies = orig(ctx1, ctx2, match=match)
575 updated = {}
575 updated = {}
576
576
577 for k, v in copies.iteritems():
577 for k, v in copies.iteritems():
578 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
578 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
579
579
580 return updated
580 return updated
581
581
582 # Copy first changes the matchers to match standins instead of
582 # Copy first changes the matchers to match standins instead of
583 # largefiles. Then it overrides util.copyfile in that function it
583 # largefiles. Then it overrides util.copyfile in that function it
584 # checks if the destination largefile already exists. It also keeps a
584 # checks if the destination largefile already exists. It also keeps a
585 # list of copied files so that the largefiles can be copied and the
585 # list of copied files so that the largefiles can be copied and the
586 # dirstate updated.
586 # dirstate updated.
587 def overridecopy(orig, ui, repo, pats, opts, rename=False):
587 def overridecopy(orig, ui, repo, pats, opts, rename=False):
588 # doesn't remove largefile on rename
588 # doesn't remove largefile on rename
589 if len(pats) < 2:
589 if len(pats) < 2:
590 # this isn't legal, let the original function deal with it
590 # this isn't legal, let the original function deal with it
591 return orig(ui, repo, pats, opts, rename)
591 return orig(ui, repo, pats, opts, rename)
592
592
593 # This could copy both lfiles and normal files in one command,
593 # This could copy both lfiles and normal files in one command,
594 # but we don't want to do that. First replace their matcher to
594 # but we don't want to do that. First replace their matcher to
595 # only match normal files and run it, then replace it to just
595 # only match normal files and run it, then replace it to just
596 # match largefiles and run it again.
596 # match largefiles and run it again.
597 nonormalfiles = False
597 nonormalfiles = False
598 nolfiles = False
598 nolfiles = False
599 installnormalfilesmatchfn(repo[None].manifest())
599 installnormalfilesmatchfn(repo[None].manifest())
600 try:
600 try:
601 result = orig(ui, repo, pats, opts, rename)
601 result = orig(ui, repo, pats, opts, rename)
602 except error.Abort as e:
602 except error.Abort as e:
603 if str(e) != _('no files to copy'):
603 if str(e) != _('no files to copy'):
604 raise e
604 raise e
605 else:
605 else:
606 nonormalfiles = True
606 nonormalfiles = True
607 result = 0
607 result = 0
608 finally:
608 finally:
609 restorematchfn()
609 restorematchfn()
610
610
611 # The first rename can cause our current working directory to be removed.
611 # The first rename can cause our current working directory to be removed.
612 # In that case there is nothing left to copy/rename so just quit.
612 # In that case there is nothing left to copy/rename so just quit.
613 try:
613 try:
614 repo.getcwd()
614 repo.getcwd()
615 except OSError:
615 except OSError:
616 return result
616 return result
617
617
618 def makestandin(relpath):
618 def makestandin(relpath):
619 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
619 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
620 return repo.wvfs.join(lfutil.standin(path))
620 return repo.wvfs.join(lfutil.standin(path))
621
621
622 fullpats = scmutil.expandpats(pats)
622 fullpats = scmutil.expandpats(pats)
623 dest = fullpats[-1]
623 dest = fullpats[-1]
624
624
625 if os.path.isdir(dest):
625 if os.path.isdir(dest):
626 if not os.path.isdir(makestandin(dest)):
626 if not os.path.isdir(makestandin(dest)):
627 os.makedirs(makestandin(dest))
627 os.makedirs(makestandin(dest))
628
628
629 try:
629 try:
630 # When we call orig below it creates the standins but we don't add
630 # When we call orig below it creates the standins but we don't add
631 # them to the dir state until later so lock during that time.
631 # them to the dir state until later so lock during that time.
632 wlock = repo.wlock()
632 wlock = repo.wlock()
633
633
634 manifest = repo[None].manifest()
634 manifest = repo[None].manifest()
635 def overridematch(ctx, pats=(), opts=None, globbed=False,
635 def overridematch(ctx, pats=(), opts=None, globbed=False,
636 default='relpath', badfn=None):
636 default='relpath', badfn=None):
637 if opts is None:
637 if opts is None:
638 opts = {}
638 opts = {}
639 newpats = []
639 newpats = []
640 # The patterns were previously mangled to add the standin
640 # The patterns were previously mangled to add the standin
641 # directory; we need to remove that now
641 # directory; we need to remove that now
642 for pat in pats:
642 for pat in pats:
643 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
643 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
644 newpats.append(pat.replace(lfutil.shortname, ''))
644 newpats.append(pat.replace(lfutil.shortname, ''))
645 else:
645 else:
646 newpats.append(pat)
646 newpats.append(pat)
647 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
647 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
648 m = copy.copy(match)
648 m = copy.copy(match)
649 lfile = lambda f: lfutil.standin(f) in manifest
649 lfile = lambda f: lfutil.standin(f) in manifest
650 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
650 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
651 m._fileroots = set(m._files)
651 m._fileroots = set(m._files)
652 origmatchfn = m.matchfn
652 origmatchfn = m.matchfn
653 m.matchfn = lambda f: (lfutil.isstandin(f) and
653 m.matchfn = lambda f: (lfutil.isstandin(f) and
654 (f in manifest) and
654 (f in manifest) and
655 origmatchfn(lfutil.splitstandin(f)) or
655 origmatchfn(lfutil.splitstandin(f)) or
656 None)
656 None)
657 return m
657 return m
658 oldmatch = installmatchfn(overridematch)
658 oldmatch = installmatchfn(overridematch)
659 listpats = []
659 listpats = []
660 for pat in pats:
660 for pat in pats:
661 if matchmod.patkind(pat) is not None:
661 if matchmod.patkind(pat) is not None:
662 listpats.append(pat)
662 listpats.append(pat)
663 else:
663 else:
664 listpats.append(makestandin(pat))
664 listpats.append(makestandin(pat))
665
665
666 try:
666 try:
667 origcopyfile = util.copyfile
667 origcopyfile = util.copyfile
668 copiedfiles = []
668 copiedfiles = []
669 def overridecopyfile(src, dest):
669 def overridecopyfile(src, dest):
670 if (lfutil.shortname in src and
670 if (lfutil.shortname in src and
671 dest.startswith(repo.wjoin(lfutil.shortname))):
671 dest.startswith(repo.wjoin(lfutil.shortname))):
672 destlfile = dest.replace(lfutil.shortname, '')
672 destlfile = dest.replace(lfutil.shortname, '')
673 if not opts['force'] and os.path.exists(destlfile):
673 if not opts['force'] and os.path.exists(destlfile):
674 raise IOError('',
674 raise IOError('',
675 _('destination largefile already exists'))
675 _('destination largefile already exists'))
676 copiedfiles.append((src, dest))
676 copiedfiles.append((src, dest))
677 origcopyfile(src, dest)
677 origcopyfile(src, dest)
678
678
679 util.copyfile = overridecopyfile
679 util.copyfile = overridecopyfile
680 result += orig(ui, repo, listpats, opts, rename)
680 result += orig(ui, repo, listpats, opts, rename)
681 finally:
681 finally:
682 util.copyfile = origcopyfile
682 util.copyfile = origcopyfile
683
683
684 lfdirstate = lfutil.openlfdirstate(ui, repo)
684 lfdirstate = lfutil.openlfdirstate(ui, repo)
685 for (src, dest) in copiedfiles:
685 for (src, dest) in copiedfiles:
686 if (lfutil.shortname in src and
686 if (lfutil.shortname in src and
687 dest.startswith(repo.wjoin(lfutil.shortname))):
687 dest.startswith(repo.wjoin(lfutil.shortname))):
688 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
688 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
689 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
689 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
690 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
690 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
691 if not os.path.isdir(destlfiledir):
691 if not os.path.isdir(destlfiledir):
692 os.makedirs(destlfiledir)
692 os.makedirs(destlfiledir)
693 if rename:
693 if rename:
694 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
694 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
695
695
696 # The file is gone, but this deletes any empty parent
696 # The file is gone, but this deletes any empty parent
697 # directories as a side-effect.
697 # directories as a side-effect.
698 util.unlinkpath(repo.wjoin(srclfile), True)
698 util.unlinkpath(repo.wjoin(srclfile), True)
699 lfdirstate.remove(srclfile)
699 lfdirstate.remove(srclfile)
700 else:
700 else:
701 util.copyfile(repo.wjoin(srclfile),
701 util.copyfile(repo.wjoin(srclfile),
702 repo.wjoin(destlfile))
702 repo.wjoin(destlfile))
703
703
704 lfdirstate.add(destlfile)
704 lfdirstate.add(destlfile)
705 lfdirstate.write()
705 lfdirstate.write()
706 except error.Abort as e:
706 except error.Abort as e:
707 if str(e) != _('no files to copy'):
707 if str(e) != _('no files to copy'):
708 raise e
708 raise e
709 else:
709 else:
710 nolfiles = True
710 nolfiles = True
711 finally:
711 finally:
712 restorematchfn()
712 restorematchfn()
713 wlock.release()
713 wlock.release()
714
714
715 if nolfiles and nonormalfiles:
715 if nolfiles and nonormalfiles:
716 raise error.Abort(_('no files to copy'))
716 raise error.Abort(_('no files to copy'))
717
717
718 return result
718 return result
719
719
720 # When the user calls revert, we have to be careful to not revert any
720 # When the user calls revert, we have to be careful to not revert any
721 # changes to other largefiles accidentally. This means we have to keep
721 # changes to other largefiles accidentally. This means we have to keep
722 # track of the largefiles that are being reverted so we only pull down
722 # track of the largefiles that are being reverted so we only pull down
723 # the necessary largefiles.
723 # the necessary largefiles.
724 #
724 #
725 # Standins are only updated (to match the hash of largefiles) before
725 # Standins are only updated (to match the hash of largefiles) before
726 # commits. Update the standins then run the original revert, changing
726 # commits. Update the standins then run the original revert, changing
727 # the matcher to hit standins instead of largefiles. Based on the
727 # the matcher to hit standins instead of largefiles. Based on the
728 # resulting standins update the largefiles.
728 # resulting standins update the largefiles.
729 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
729 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
730 # Because we put the standins in a bad state (by updating them)
730 # Because we put the standins in a bad state (by updating them)
731 # and then return them to a correct state we need to lock to
731 # and then return them to a correct state we need to lock to
732 # prevent others from changing them in their incorrect state.
732 # prevent others from changing them in their incorrect state.
733 with repo.wlock():
733 with repo.wlock():
734 lfdirstate = lfutil.openlfdirstate(ui, repo)
734 lfdirstate = lfutil.openlfdirstate(ui, repo)
735 s = lfutil.lfdirstatestatus(lfdirstate, repo)
735 s = lfutil.lfdirstatestatus(lfdirstate, repo)
736 lfdirstate.write()
736 lfdirstate.write()
737 for lfile in s.modified:
737 for lfile in s.modified:
738 lfutil.updatestandin(repo, lfutil.standin(lfile))
738 lfutil.updatestandin(repo, lfutil.standin(lfile))
739 for lfile in s.deleted:
739 for lfile in s.deleted:
740 if (repo.wvfs.exists(lfutil.standin(lfile))):
740 if (repo.wvfs.exists(lfutil.standin(lfile))):
741 repo.wvfs.unlink(lfutil.standin(lfile))
741 repo.wvfs.unlink(lfutil.standin(lfile))
742
742
743 oldstandins = lfutil.getstandinsstate(repo)
743 oldstandins = lfutil.getstandinsstate(repo)
744
744
745 def overridematch(mctx, pats=(), opts=None, globbed=False,
745 def overridematch(mctx, pats=(), opts=None, globbed=False,
746 default='relpath', badfn=None):
746 default='relpath', badfn=None):
747 if opts is None:
747 if opts is None:
748 opts = {}
748 opts = {}
749 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
749 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
750 m = copy.copy(match)
750 m = copy.copy(match)
751
751
752 # revert supports recursing into subrepos, and though largefiles
752 # revert supports recursing into subrepos, and though largefiles
753 # currently doesn't work correctly in that case, this match is
753 # currently doesn't work correctly in that case, this match is
754 # called, so the lfdirstate above may not be the correct one for
754 # called, so the lfdirstate above may not be the correct one for
755 # this invocation of match.
755 # this invocation of match.
756 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
756 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
757 False)
757 False)
758
758
759 def tostandin(f):
759 def tostandin(f):
760 standin = lfutil.standin(f)
760 standin = lfutil.standin(f)
761 if standin in ctx or standin in mctx:
761 if standin in ctx or standin in mctx:
762 return standin
762 return standin
763 elif standin in repo[None] or lfdirstate[f] == 'r':
763 elif standin in repo[None] or lfdirstate[f] == 'r':
764 return None
764 return None
765 return f
765 return f
766 m._files = [tostandin(f) for f in m._files]
766 m._files = [tostandin(f) for f in m._files]
767 m._files = [f for f in m._files if f is not None]
767 m._files = [f for f in m._files if f is not None]
768 m._fileroots = set(m._files)
768 m._fileroots = set(m._files)
769 origmatchfn = m.matchfn
769 origmatchfn = m.matchfn
770 def matchfn(f):
770 def matchfn(f):
771 if lfutil.isstandin(f):
771 if lfutil.isstandin(f):
772 return (origmatchfn(lfutil.splitstandin(f)) and
772 return (origmatchfn(lfutil.splitstandin(f)) and
773 (f in ctx or f in mctx))
773 (f in ctx or f in mctx))
774 return origmatchfn(f)
774 return origmatchfn(f)
775 m.matchfn = matchfn
775 m.matchfn = matchfn
776 return m
776 return m
777 oldmatch = installmatchfn(overridematch)
777 oldmatch = installmatchfn(overridematch)
778 try:
778 try:
779 orig(ui, repo, ctx, parents, *pats, **opts)
779 orig(ui, repo, ctx, parents, *pats, **opts)
780 finally:
780 finally:
781 restorematchfn()
781 restorematchfn()
782
782
783 newstandins = lfutil.getstandinsstate(repo)
783 newstandins = lfutil.getstandinsstate(repo)
784 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
784 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
785 # lfdirstate should be 'normallookup'-ed for updated files,
785 # lfdirstate should be 'normallookup'-ed for updated files,
786 # because reverting doesn't touch dirstate for 'normal' files
786 # because reverting doesn't touch dirstate for 'normal' files
787 # when target revision is explicitly specified: in such case,
787 # when target revision is explicitly specified: in such case,
788 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
788 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
789 # of target (standin) file.
789 # of target (standin) file.
790 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
790 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
791 normallookup=True)
791 normallookup=True)
792
792
793 # after pulling changesets, we need to take some extra care to get
793 # after pulling changesets, we need to take some extra care to get
794 # largefiles updated remotely
794 # largefiles updated remotely
795 def overridepull(orig, ui, repo, source=None, **opts):
795 def overridepull(orig, ui, repo, source=None, **opts):
796 revsprepull = len(repo)
796 revsprepull = len(repo)
797 if not source:
797 if not source:
798 source = 'default'
798 source = 'default'
799 repo.lfpullsource = source
799 repo.lfpullsource = source
800 result = orig(ui, repo, source, **opts)
800 result = orig(ui, repo, source, **opts)
801 revspostpull = len(repo)
801 revspostpull = len(repo)
802 lfrevs = opts.get('lfrev', [])
802 lfrevs = opts.get('lfrev', [])
803 if opts.get('all_largefiles'):
803 if opts.get('all_largefiles'):
804 lfrevs.append('pulled()')
804 lfrevs.append('pulled()')
805 if lfrevs and revspostpull > revsprepull:
805 if lfrevs and revspostpull > revsprepull:
806 numcached = 0
806 numcached = 0
807 repo.firstpulled = revsprepull # for pulled() revset expression
807 repo.firstpulled = revsprepull # for pulled() revset expression
808 try:
808 try:
809 for rev in scmutil.revrange(repo, lfrevs):
809 for rev in scmutil.revrange(repo, lfrevs):
810 ui.note(_('pulling largefiles for revision %s\n') % rev)
810 ui.note(_('pulling largefiles for revision %s\n') % rev)
811 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
811 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
812 numcached += len(cached)
812 numcached += len(cached)
813 finally:
813 finally:
814 del repo.firstpulled
814 del repo.firstpulled
815 ui.status(_("%d largefiles cached\n") % numcached)
815 ui.status(_("%d largefiles cached\n") % numcached)
816 return result
816 return result
817
817
818 def overridepush(orig, ui, repo, *args, **kwargs):
818 def overridepush(orig, ui, repo, *args, **kwargs):
819 """Override push command and store --lfrev parameters in opargs"""
819 """Override push command and store --lfrev parameters in opargs"""
820 lfrevs = kwargs.pop('lfrev', None)
820 lfrevs = kwargs.pop('lfrev', None)
821 if lfrevs:
821 if lfrevs:
822 opargs = kwargs.setdefault('opargs', {})
822 opargs = kwargs.setdefault('opargs', {})
823 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
823 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
824 return orig(ui, repo, *args, **kwargs)
824 return orig(ui, repo, *args, **kwargs)
825
825
826 def exchangepushoperation(orig, *args, **kwargs):
826 def exchangepushoperation(orig, *args, **kwargs):
827 """Override pushoperation constructor and store lfrevs parameter"""
827 """Override pushoperation constructor and store lfrevs parameter"""
828 lfrevs = kwargs.pop('lfrevs', None)
828 lfrevs = kwargs.pop('lfrevs', None)
829 pushop = orig(*args, **kwargs)
829 pushop = orig(*args, **kwargs)
830 pushop.lfrevs = lfrevs
830 pushop.lfrevs = lfrevs
831 return pushop
831 return pushop
832
832
833 revsetpredicate = registrar.revsetpredicate()
833 revsetpredicate = registrar.revsetpredicate()
834
834
835 @revsetpredicate('pulled()')
835 @revsetpredicate('pulled()')
836 def pulledrevsetsymbol(repo, subset, x):
836 def pulledrevsetsymbol(repo, subset, x):
837 """Changesets that just has been pulled.
837 """Changesets that just has been pulled.
838
838
839 Only available with largefiles from pull --lfrev expressions.
839 Only available with largefiles from pull --lfrev expressions.
840
840
841 .. container:: verbose
841 .. container:: verbose
842
842
843 Some examples:
843 Some examples:
844
844
845 - pull largefiles for all new changesets::
845 - pull largefiles for all new changesets::
846
846
847 hg pull -lfrev "pulled()"
847 hg pull -lfrev "pulled()"
848
848
849 - pull largefiles for all new branch heads::
849 - pull largefiles for all new branch heads::
850
850
851 hg pull -lfrev "head(pulled()) and not closed()"
851 hg pull -lfrev "head(pulled()) and not closed()"
852
852
853 """
853 """
854
854
855 try:
855 try:
856 firstpulled = repo.firstpulled
856 firstpulled = repo.firstpulled
857 except AttributeError:
857 except AttributeError:
858 raise error.Abort(_("pulled() only available in --lfrev"))
858 raise error.Abort(_("pulled() only available in --lfrev"))
859 return revset.baseset([r for r in subset if r >= firstpulled])
859 return revset.baseset([r for r in subset if r >= firstpulled])
860
860
861 def overrideclone(orig, ui, source, dest=None, **opts):
861 def overrideclone(orig, ui, source, dest=None, **opts):
862 d = dest
862 d = dest
863 if d is None:
863 if d is None:
864 d = hg.defaultdest(source)
864 d = hg.defaultdest(source)
865 if opts.get('all_largefiles') and not hg.islocal(d):
865 if opts.get('all_largefiles') and not hg.islocal(d):
866 raise error.Abort(_(
866 raise error.Abort(_(
867 '--all-largefiles is incompatible with non-local destination %s') %
867 '--all-largefiles is incompatible with non-local destination %s') %
868 d)
868 d)
869
869
870 return orig(ui, source, dest, **opts)
870 return orig(ui, source, dest, **opts)
871
871
872 def hgclone(orig, ui, opts, *args, **kwargs):
872 def hgclone(orig, ui, opts, *args, **kwargs):
873 result = orig(ui, opts, *args, **kwargs)
873 result = orig(ui, opts, *args, **kwargs)
874
874
875 if result is not None:
875 if result is not None:
876 sourcerepo, destrepo = result
876 sourcerepo, destrepo = result
877 repo = destrepo.local()
877 repo = destrepo.local()
878
878
879 # When cloning to a remote repo (like through SSH), no repo is available
879 # When cloning to a remote repo (like through SSH), no repo is available
880 # from the peer. Therefore the largefiles can't be downloaded and the
880 # from the peer. Therefore the largefiles can't be downloaded and the
881 # hgrc can't be updated.
881 # hgrc can't be updated.
882 if not repo:
882 if not repo:
883 return result
883 return result
884
884
885 # If largefiles is required for this repo, permanently enable it locally
885 # If largefiles is required for this repo, permanently enable it locally
886 if 'largefiles' in repo.requirements:
886 if 'largefiles' in repo.requirements:
887 fp = repo.vfs('hgrc', 'a', text=True)
887 fp = repo.vfs('hgrc', 'a', text=True)
888 try:
888 try:
889 fp.write('\n[extensions]\nlargefiles=\n')
889 fp.write('\n[extensions]\nlargefiles=\n')
890 finally:
890 finally:
891 fp.close()
891 fp.close()
892
892
893 # Caching is implicitly limited to 'rev' option, since the dest repo was
893 # Caching is implicitly limited to 'rev' option, since the dest repo was
894 # truncated at that point. The user may expect a download count with
894 # truncated at that point. The user may expect a download count with
895 # this option, so attempt whether or not this is a largefile repo.
895 # this option, so attempt whether or not this is a largefile repo.
896 if opts.get('all_largefiles'):
896 if opts.get('all_largefiles'):
897 success, missing = lfcommands.downloadlfiles(ui, repo, None)
897 success, missing = lfcommands.downloadlfiles(ui, repo, None)
898
898
899 if missing != 0:
899 if missing != 0:
900 return None
900 return None
901
901
902 return result
902 return result
903
903
904 def overriderebase(orig, ui, repo, **opts):
904 def overriderebase(orig, ui, repo, **opts):
905 if not util.safehasattr(repo, '_largefilesenabled'):
905 if not util.safehasattr(repo, '_largefilesenabled'):
906 return orig(ui, repo, **opts)
906 return orig(ui, repo, **opts)
907
907
908 resuming = opts.get('continue')
908 resuming = opts.get('continue')
909 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
909 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
910 repo._lfstatuswriters.append(lambda *msg, **opts: None)
910 repo._lfstatuswriters.append(lambda *msg, **opts: None)
911 try:
911 try:
912 return orig(ui, repo, **opts)
912 return orig(ui, repo, **opts)
913 finally:
913 finally:
914 repo._lfstatuswriters.pop()
914 repo._lfstatuswriters.pop()
915 repo._lfcommithooks.pop()
915 repo._lfcommithooks.pop()
916
916
917 def overridearchivecmd(orig, ui, repo, dest, **opts):
917 def overridearchivecmd(orig, ui, repo, dest, **opts):
918 repo.unfiltered().lfstatus = True
918 repo.unfiltered().lfstatus = True
919
919
920 try:
920 try:
921 return orig(ui, repo.unfiltered(), dest, **opts)
921 return orig(ui, repo.unfiltered(), dest, **opts)
922 finally:
922 finally:
923 repo.unfiltered().lfstatus = False
923 repo.unfiltered().lfstatus = False
924
924
925 def hgwebarchive(orig, web, req, tmpl):
925 def hgwebarchive(orig, web, req, tmpl):
926 web.repo.lfstatus = True
926 web.repo.lfstatus = True
927
927
928 try:
928 try:
929 return orig(web, req, tmpl)
929 return orig(web, req, tmpl)
930 finally:
930 finally:
931 web.repo.lfstatus = False
931 web.repo.lfstatus = False
932
932
933 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
933 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
934 prefix='', mtime=None, subrepos=None):
934 prefix='', mtime=None, subrepos=None):
935 # For some reason setting repo.lfstatus in hgwebarchive only changes the
935 # For some reason setting repo.lfstatus in hgwebarchive only changes the
936 # unfiltered repo's attr, so check that as well.
936 # unfiltered repo's attr, so check that as well.
937 if not repo.lfstatus and not repo.unfiltered().lfstatus:
937 if not repo.lfstatus and not repo.unfiltered().lfstatus:
938 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
938 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
939 subrepos)
939 subrepos)
940
940
941 # No need to lock because we are only reading history and
941 # No need to lock because we are only reading history and
942 # largefile caches, neither of which are modified.
942 # largefile caches, neither of which are modified.
943 if node is not None:
943 if node is not None:
944 lfcommands.cachelfiles(repo.ui, repo, node)
944 lfcommands.cachelfiles(repo.ui, repo, node)
945
945
946 if kind not in archival.archivers:
946 if kind not in archival.archivers:
947 raise error.Abort(_("unknown archive type '%s'") % kind)
947 raise error.Abort(_("unknown archive type '%s'") % kind)
948
948
949 ctx = repo[node]
949 ctx = repo[node]
950
950
951 if kind == 'files':
951 if kind == 'files':
952 if prefix:
952 if prefix:
953 raise error.Abort(
953 raise error.Abort(
954 _('cannot give prefix when archiving to files'))
954 _('cannot give prefix when archiving to files'))
955 else:
955 else:
956 prefix = archival.tidyprefix(dest, kind, prefix)
956 prefix = archival.tidyprefix(dest, kind, prefix)
957
957
958 def write(name, mode, islink, getdata):
958 def write(name, mode, islink, getdata):
959 if matchfn and not matchfn(name):
959 if matchfn and not matchfn(name):
960 return
960 return
961 data = getdata()
961 data = getdata()
962 if decode:
962 if decode:
963 data = repo.wwritedata(name, data)
963 data = repo.wwritedata(name, data)
964 archiver.addfile(prefix + name, mode, islink, data)
964 archiver.addfile(prefix + name, mode, islink, data)
965
965
966 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
966 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
967
967
968 if repo.ui.configbool("ui", "archivemeta", True):
968 if repo.ui.configbool("ui", "archivemeta", True):
969 write('.hg_archival.txt', 0o644, False,
969 write('.hg_archival.txt', 0o644, False,
970 lambda: archival.buildmetadata(ctx))
970 lambda: archival.buildmetadata(ctx))
971
971
972 for f in ctx:
972 for f in ctx:
973 ff = ctx.flags(f)
973 ff = ctx.flags(f)
974 getdata = ctx[f].data
974 getdata = ctx[f].data
975 if lfutil.isstandin(f):
975 if lfutil.isstandin(f):
976 if node is not None:
976 if node is not None:
977 path = lfutil.findfile(repo, getdata().strip())
977 path = lfutil.findfile(repo, getdata().strip())
978
978
979 if path is None:
979 if path is None:
980 raise error.Abort(
980 raise error.Abort(
981 _('largefile %s not found in repo store or system cache')
981 _('largefile %s not found in repo store or system cache')
982 % lfutil.splitstandin(f))
982 % lfutil.splitstandin(f))
983 else:
983 else:
984 path = lfutil.splitstandin(f)
984 path = lfutil.splitstandin(f)
985
985
986 f = lfutil.splitstandin(f)
986 f = lfutil.splitstandin(f)
987
987
988 getdata = lambda: util.readfile(path)
988 getdata = lambda: util.readfile(path)
989 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
989 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
990
990
991 if subrepos:
991 if subrepos:
992 for subpath in sorted(ctx.substate):
992 for subpath in sorted(ctx.substate):
993 sub = ctx.workingsub(subpath)
993 sub = ctx.workingsub(subpath)
994 submatch = matchmod.subdirmatcher(subpath, matchfn)
994 submatch = matchmod.subdirmatcher(subpath, matchfn)
995 sub._repo.lfstatus = True
995 sub._repo.lfstatus = True
996 sub.archive(archiver, prefix, submatch)
996 sub.archive(archiver, prefix, submatch)
997
997
998 archiver.done()
998 archiver.done()
999
999
1000 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
1000 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
1001 if not repo._repo.lfstatus:
1001 if not repo._repo.lfstatus:
1002 return orig(repo, archiver, prefix, match)
1002 return orig(repo, archiver, prefix, match)
1003
1003
1004 repo._get(repo._state + ('hg',))
1004 repo._get(repo._state + ('hg',))
1005 rev = repo._state[1]
1005 rev = repo._state[1]
1006 ctx = repo._repo[rev]
1006 ctx = repo._repo[rev]
1007
1007
1008 if ctx.node() is not None:
1008 if ctx.node() is not None:
1009 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1009 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1010
1010
1011 def write(name, mode, islink, getdata):
1011 def write(name, mode, islink, getdata):
1012 # At this point, the standin has been replaced with the largefile name,
1012 # At this point, the standin has been replaced with the largefile name,
1013 # so the normal matcher works here without the lfutil variants.
1013 # so the normal matcher works here without the lfutil variants.
1014 if match and not match(f):
1014 if match and not match(f):
1015 return
1015 return
1016 data = getdata()
1016 data = getdata()
1017
1017
1018 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1018 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1019
1019
1020 for f in ctx:
1020 for f in ctx:
1021 ff = ctx.flags(f)
1021 ff = ctx.flags(f)
1022 getdata = ctx[f].data
1022 getdata = ctx[f].data
1023 if lfutil.isstandin(f):
1023 if lfutil.isstandin(f):
1024 if ctx.node() is not None:
1024 if ctx.node() is not None:
1025 path = lfutil.findfile(repo._repo, getdata().strip())
1025 path = lfutil.findfile(repo._repo, getdata().strip())
1026
1026
1027 if path is None:
1027 if path is None:
1028 raise error.Abort(
1028 raise error.Abort(
1029 _('largefile %s not found in repo store or system cache')
1029 _('largefile %s not found in repo store or system cache')
1030 % lfutil.splitstandin(f))
1030 % lfutil.splitstandin(f))
1031 else:
1031 else:
1032 path = lfutil.splitstandin(f)
1032 path = lfutil.splitstandin(f)
1033
1033
1034 f = lfutil.splitstandin(f)
1034 f = lfutil.splitstandin(f)
1035
1035
1036 getdata = lambda: util.readfile(os.path.join(prefix, path))
1036 getdata = lambda: util.readfile(os.path.join(prefix, path))
1037
1037
1038 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1038 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1039
1039
1040 for subpath in sorted(ctx.substate):
1040 for subpath in sorted(ctx.substate):
1041 sub = ctx.workingsub(subpath)
1041 sub = ctx.workingsub(subpath)
1042 submatch = matchmod.subdirmatcher(subpath, match)
1042 submatch = matchmod.subdirmatcher(subpath, match)
1043 sub._repo.lfstatus = True
1043 sub._repo.lfstatus = True
1044 sub.archive(archiver, prefix + repo._path + '/', submatch)
1044 sub.archive(archiver, prefix + repo._path + '/', submatch)
1045
1045
1046 # If a largefile is modified, the change is not reflected in its
1046 # If a largefile is modified, the change is not reflected in its
1047 # standin until a commit. cmdutil.bailifchanged() raises an exception
1047 # standin until a commit. cmdutil.bailifchanged() raises an exception
1048 # if the repo has uncommitted changes. Wrap it to also check if
1048 # if the repo has uncommitted changes. Wrap it to also check if
1049 # largefiles were changed. This is used by bisect, backout and fetch.
1049 # largefiles were changed. This is used by bisect, backout and fetch.
1050 def overridebailifchanged(orig, repo, *args, **kwargs):
1050 def overridebailifchanged(orig, repo, *args, **kwargs):
1051 orig(repo, *args, **kwargs)
1051 orig(repo, *args, **kwargs)
1052 repo.lfstatus = True
1052 repo.lfstatus = True
1053 s = repo.status()
1053 s = repo.status()
1054 repo.lfstatus = False
1054 repo.lfstatus = False
1055 if s.modified or s.added or s.removed or s.deleted:
1055 if s.modified or s.added or s.removed or s.deleted:
1056 raise error.Abort(_('uncommitted changes'))
1056 raise error.Abort(_('uncommitted changes'))
1057
1057
1058 def postcommitstatus(orig, repo, *args, **kwargs):
1058 def postcommitstatus(orig, repo, *args, **kwargs):
1059 repo.lfstatus = True
1059 repo.lfstatus = True
1060 try:
1060 try:
1061 return orig(repo, *args, **kwargs)
1061 return orig(repo, *args, **kwargs)
1062 finally:
1062 finally:
1063 repo.lfstatus = False
1063 repo.lfstatus = False
1064
1064
1065 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1065 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1066 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1066 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1067 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1067 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1068 m = composelargefilematcher(match, repo[None].manifest())
1068 m = composelargefilematcher(match, repo[None].manifest())
1069
1069
1070 try:
1070 try:
1071 repo.lfstatus = True
1071 repo.lfstatus = True
1072 s = repo.status(match=m, clean=True)
1072 s = repo.status(match=m, clean=True)
1073 finally:
1073 finally:
1074 repo.lfstatus = False
1074 repo.lfstatus = False
1075 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1075 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1076 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1076 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1077
1077
1078 for f in forget:
1078 for f in forget:
1079 if lfutil.standin(f) not in repo.dirstate and not \
1079 if lfutil.standin(f) not in repo.dirstate and not \
1080 repo.wvfs.isdir(lfutil.standin(f)):
1080 repo.wvfs.isdir(lfutil.standin(f)):
1081 ui.warn(_('not removing %s: file is already untracked\n')
1081 ui.warn(_('not removing %s: file is already untracked\n')
1082 % m.rel(f))
1082 % m.rel(f))
1083 bad.append(f)
1083 bad.append(f)
1084
1084
1085 for f in forget:
1085 for f in forget:
1086 if ui.verbose or not m.exact(f):
1086 if ui.verbose or not m.exact(f):
1087 ui.status(_('removing %s\n') % m.rel(f))
1087 ui.status(_('removing %s\n') % m.rel(f))
1088
1088
1089 # Need to lock because standin files are deleted then removed from the
1089 # Need to lock because standin files are deleted then removed from the
1090 # repository and we could race in-between.
1090 # repository and we could race in-between.
1091 with repo.wlock():
1091 with repo.wlock():
1092 lfdirstate = lfutil.openlfdirstate(ui, repo)
1092 lfdirstate = lfutil.openlfdirstate(ui, repo)
1093 for f in forget:
1093 for f in forget:
1094 if lfdirstate[f] == 'a':
1094 if lfdirstate[f] == 'a':
1095 lfdirstate.drop(f)
1095 lfdirstate.drop(f)
1096 else:
1096 else:
1097 lfdirstate.remove(f)
1097 lfdirstate.remove(f)
1098 lfdirstate.write()
1098 lfdirstate.write()
1099 standins = [lfutil.standin(f) for f in forget]
1099 standins = [lfutil.standin(f) for f in forget]
1100 for f in standins:
1100 for f in standins:
1101 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1101 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1102 rejected = repo[None].forget(standins)
1102 rejected = repo[None].forget(standins)
1103
1103
1104 bad.extend(f for f in rejected if f in m.files())
1104 bad.extend(f for f in rejected if f in m.files())
1105 forgot.extend(f for f in forget if f not in rejected)
1105 forgot.extend(f for f in forget if f not in rejected)
1106 return bad, forgot
1106 return bad, forgot
1107
1107
1108 def _getoutgoings(repo, other, missing, addfunc):
1108 def _getoutgoings(repo, other, missing, addfunc):
1109 """get pairs of filename and largefile hash in outgoing revisions
1109 """get pairs of filename and largefile hash in outgoing revisions
1110 in 'missing'.
1110 in 'missing'.
1111
1111
1112 largefiles already existing on 'other' repository are ignored.
1112 largefiles already existing on 'other' repository are ignored.
1113
1113
1114 'addfunc' is invoked with each unique pairs of filename and
1114 'addfunc' is invoked with each unique pairs of filename and
1115 largefile hash value.
1115 largefile hash value.
1116 """
1116 """
1117 knowns = set()
1117 knowns = set()
1118 lfhashes = set()
1118 lfhashes = set()
1119 def dedup(fn, lfhash):
1119 def dedup(fn, lfhash):
1120 k = (fn, lfhash)
1120 k = (fn, lfhash)
1121 if k not in knowns:
1121 if k not in knowns:
1122 knowns.add(k)
1122 knowns.add(k)
1123 lfhashes.add(lfhash)
1123 lfhashes.add(lfhash)
1124 lfutil.getlfilestoupload(repo, missing, dedup)
1124 lfutil.getlfilestoupload(repo, missing, dedup)
1125 if lfhashes:
1125 if lfhashes:
1126 lfexists = storefactory._openstore(repo, other).exists(lfhashes)
1126 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1127 for fn, lfhash in knowns:
1127 for fn, lfhash in knowns:
1128 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1128 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1129 addfunc(fn, lfhash)
1129 addfunc(fn, lfhash)
1130
1130
1131 def outgoinghook(ui, repo, other, opts, missing):
1131 def outgoinghook(ui, repo, other, opts, missing):
1132 if opts.pop('large', None):
1132 if opts.pop('large', None):
1133 lfhashes = set()
1133 lfhashes = set()
1134 if ui.debugflag:
1134 if ui.debugflag:
1135 toupload = {}
1135 toupload = {}
1136 def addfunc(fn, lfhash):
1136 def addfunc(fn, lfhash):
1137 if fn not in toupload:
1137 if fn not in toupload:
1138 toupload[fn] = []
1138 toupload[fn] = []
1139 toupload[fn].append(lfhash)
1139 toupload[fn].append(lfhash)
1140 lfhashes.add(lfhash)
1140 lfhashes.add(lfhash)
1141 def showhashes(fn):
1141 def showhashes(fn):
1142 for lfhash in sorted(toupload[fn]):
1142 for lfhash in sorted(toupload[fn]):
1143 ui.debug(' %s\n' % (lfhash))
1143 ui.debug(' %s\n' % (lfhash))
1144 else:
1144 else:
1145 toupload = set()
1145 toupload = set()
1146 def addfunc(fn, lfhash):
1146 def addfunc(fn, lfhash):
1147 toupload.add(fn)
1147 toupload.add(fn)
1148 lfhashes.add(lfhash)
1148 lfhashes.add(lfhash)
1149 def showhashes(fn):
1149 def showhashes(fn):
1150 pass
1150 pass
1151 _getoutgoings(repo, other, missing, addfunc)
1151 _getoutgoings(repo, other, missing, addfunc)
1152
1152
1153 if not toupload:
1153 if not toupload:
1154 ui.status(_('largefiles: no files to upload\n'))
1154 ui.status(_('largefiles: no files to upload\n'))
1155 else:
1155 else:
1156 ui.status(_('largefiles to upload (%d entities):\n')
1156 ui.status(_('largefiles to upload (%d entities):\n')
1157 % (len(lfhashes)))
1157 % (len(lfhashes)))
1158 for file in sorted(toupload):
1158 for file in sorted(toupload):
1159 ui.status(lfutil.splitstandin(file) + '\n')
1159 ui.status(lfutil.splitstandin(file) + '\n')
1160 showhashes(file)
1160 showhashes(file)
1161 ui.status('\n')
1161 ui.status('\n')
1162
1162
1163 def summaryremotehook(ui, repo, opts, changes):
1163 def summaryremotehook(ui, repo, opts, changes):
1164 largeopt = opts.get('large', False)
1164 largeopt = opts.get('large', False)
1165 if changes is None:
1165 if changes is None:
1166 if largeopt:
1166 if largeopt:
1167 return (False, True) # only outgoing check is needed
1167 return (False, True) # only outgoing check is needed
1168 else:
1168 else:
1169 return (False, False)
1169 return (False, False)
1170 elif largeopt:
1170 elif largeopt:
1171 url, branch, peer, outgoing = changes[1]
1171 url, branch, peer, outgoing = changes[1]
1172 if peer is None:
1172 if peer is None:
1173 # i18n: column positioning for "hg summary"
1173 # i18n: column positioning for "hg summary"
1174 ui.status(_('largefiles: (no remote repo)\n'))
1174 ui.status(_('largefiles: (no remote repo)\n'))
1175 return
1175 return
1176
1176
1177 toupload = set()
1177 toupload = set()
1178 lfhashes = set()
1178 lfhashes = set()
1179 def addfunc(fn, lfhash):
1179 def addfunc(fn, lfhash):
1180 toupload.add(fn)
1180 toupload.add(fn)
1181 lfhashes.add(lfhash)
1181 lfhashes.add(lfhash)
1182 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1182 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1183
1183
1184 if not toupload:
1184 if not toupload:
1185 # i18n: column positioning for "hg summary"
1185 # i18n: column positioning for "hg summary"
1186 ui.status(_('largefiles: (no files to upload)\n'))
1186 ui.status(_('largefiles: (no files to upload)\n'))
1187 else:
1187 else:
1188 # i18n: column positioning for "hg summary"
1188 # i18n: column positioning for "hg summary"
1189 ui.status(_('largefiles: %d entities for %d files to upload\n')
1189 ui.status(_('largefiles: %d entities for %d files to upload\n')
1190 % (len(lfhashes), len(toupload)))
1190 % (len(lfhashes), len(toupload)))
1191
1191
1192 def overridesummary(orig, ui, repo, *pats, **opts):
1192 def overridesummary(orig, ui, repo, *pats, **opts):
1193 try:
1193 try:
1194 repo.lfstatus = True
1194 repo.lfstatus = True
1195 orig(ui, repo, *pats, **opts)
1195 orig(ui, repo, *pats, **opts)
1196 finally:
1196 finally:
1197 repo.lfstatus = False
1197 repo.lfstatus = False
1198
1198
1199 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1199 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1200 similarity=None):
1200 similarity=None):
1201 if opts is None:
1201 if opts is None:
1202 opts = {}
1202 opts = {}
1203 if not lfutil.islfilesrepo(repo):
1203 if not lfutil.islfilesrepo(repo):
1204 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1204 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1205 # Get the list of missing largefiles so we can remove them
1205 # Get the list of missing largefiles so we can remove them
1206 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1206 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1207 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1207 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1208 False, False, False)
1208 False, False, False)
1209
1209
1210 # Call into the normal remove code, but the removing of the standin, we want
1210 # Call into the normal remove code, but the removing of the standin, we want
1211 # to have handled by original addremove. Monkey patching here makes sure
1211 # to have handled by original addremove. Monkey patching here makes sure
1212 # we don't remove the standin in the largefiles code, preventing a very
1212 # we don't remove the standin in the largefiles code, preventing a very
1213 # confused state later.
1213 # confused state later.
1214 if s.deleted:
1214 if s.deleted:
1215 m = copy.copy(matcher)
1215 m = copy.copy(matcher)
1216
1216
1217 # The m._files and m._map attributes are not changed to the deleted list
1217 # The m._files and m._map attributes are not changed to the deleted list
1218 # because that affects the m.exact() test, which in turn governs whether
1218 # because that affects the m.exact() test, which in turn governs whether
1219 # or not the file name is printed, and how. Simply limit the original
1219 # or not the file name is printed, and how. Simply limit the original
1220 # matches to those in the deleted status list.
1220 # matches to those in the deleted status list.
1221 matchfn = m.matchfn
1221 matchfn = m.matchfn
1222 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1222 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1223
1223
1224 removelargefiles(repo.ui, repo, True, m, **opts)
1224 removelargefiles(repo.ui, repo, True, m, **opts)
1225 # Call into the normal add code, and any files that *should* be added as
1225 # Call into the normal add code, and any files that *should* be added as
1226 # largefiles will be
1226 # largefiles will be
1227 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1227 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1228 # Now that we've handled largefiles, hand off to the original addremove
1228 # Now that we've handled largefiles, hand off to the original addremove
1229 # function to take care of the rest. Make sure it doesn't do anything with
1229 # function to take care of the rest. Make sure it doesn't do anything with
1230 # largefiles by passing a matcher that will ignore them.
1230 # largefiles by passing a matcher that will ignore them.
1231 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1231 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1232 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1232 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1233
1233
1234 # Calling purge with --all will cause the largefiles to be deleted.
1234 # Calling purge with --all will cause the largefiles to be deleted.
1235 # Override repo.status to prevent this from happening.
1235 # Override repo.status to prevent this from happening.
1236 def overridepurge(orig, ui, repo, *dirs, **opts):
1236 def overridepurge(orig, ui, repo, *dirs, **opts):
1237 # XXX Monkey patching a repoview will not work. The assigned attribute will
1237 # XXX Monkey patching a repoview will not work. The assigned attribute will
1238 # be set on the unfiltered repo, but we will only lookup attributes in the
1238 # be set on the unfiltered repo, but we will only lookup attributes in the
1239 # unfiltered repo if the lookup in the repoview object itself fails. As the
1239 # unfiltered repo if the lookup in the repoview object itself fails. As the
1240 # monkey patched method exists on the repoview class the lookup will not
1240 # monkey patched method exists on the repoview class the lookup will not
1241 # fail. As a result, the original version will shadow the monkey patched
1241 # fail. As a result, the original version will shadow the monkey patched
1242 # one, defeating the monkey patch.
1242 # one, defeating the monkey patch.
1243 #
1243 #
1244 # As a work around we use an unfiltered repo here. We should do something
1244 # As a work around we use an unfiltered repo here. We should do something
1245 # cleaner instead.
1245 # cleaner instead.
1246 repo = repo.unfiltered()
1246 repo = repo.unfiltered()
1247 oldstatus = repo.status
1247 oldstatus = repo.status
1248 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1248 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1249 clean=False, unknown=False, listsubrepos=False):
1249 clean=False, unknown=False, listsubrepos=False):
1250 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1250 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1251 listsubrepos)
1251 listsubrepos)
1252 lfdirstate = lfutil.openlfdirstate(ui, repo)
1252 lfdirstate = lfutil.openlfdirstate(ui, repo)
1253 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1253 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1254 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1254 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1255 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1255 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1256 unknown, ignored, r.clean)
1256 unknown, ignored, r.clean)
1257 repo.status = overridestatus
1257 repo.status = overridestatus
1258 orig(ui, repo, *dirs, **opts)
1258 orig(ui, repo, *dirs, **opts)
1259 repo.status = oldstatus
1259 repo.status = oldstatus
1260 def overriderollback(orig, ui, repo, **opts):
1260 def overriderollback(orig, ui, repo, **opts):
1261 with repo.wlock():
1261 with repo.wlock():
1262 before = repo.dirstate.parents()
1262 before = repo.dirstate.parents()
1263 orphans = set(f for f in repo.dirstate
1263 orphans = set(f for f in repo.dirstate
1264 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1264 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1265 result = orig(ui, repo, **opts)
1265 result = orig(ui, repo, **opts)
1266 after = repo.dirstate.parents()
1266 after = repo.dirstate.parents()
1267 if before == after:
1267 if before == after:
1268 return result # no need to restore standins
1268 return result # no need to restore standins
1269
1269
1270 pctx = repo['.']
1270 pctx = repo['.']
1271 for f in repo.dirstate:
1271 for f in repo.dirstate:
1272 if lfutil.isstandin(f):
1272 if lfutil.isstandin(f):
1273 orphans.discard(f)
1273 orphans.discard(f)
1274 if repo.dirstate[f] == 'r':
1274 if repo.dirstate[f] == 'r':
1275 repo.wvfs.unlinkpath(f, ignoremissing=True)
1275 repo.wvfs.unlinkpath(f, ignoremissing=True)
1276 elif f in pctx:
1276 elif f in pctx:
1277 fctx = pctx[f]
1277 fctx = pctx[f]
1278 repo.wwrite(f, fctx.data(), fctx.flags())
1278 repo.wwrite(f, fctx.data(), fctx.flags())
1279 else:
1279 else:
1280 # content of standin is not so important in 'a',
1280 # content of standin is not so important in 'a',
1281 # 'm' or 'n' (coming from the 2nd parent) cases
1281 # 'm' or 'n' (coming from the 2nd parent) cases
1282 lfutil.writestandin(repo, f, '', False)
1282 lfutil.writestandin(repo, f, '', False)
1283 for standin in orphans:
1283 for standin in orphans:
1284 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1284 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1285
1285
1286 lfdirstate = lfutil.openlfdirstate(ui, repo)
1286 lfdirstate = lfutil.openlfdirstate(ui, repo)
1287 orphans = set(lfdirstate)
1287 orphans = set(lfdirstate)
1288 lfiles = lfutil.listlfiles(repo)
1288 lfiles = lfutil.listlfiles(repo)
1289 for file in lfiles:
1289 for file in lfiles:
1290 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1290 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1291 orphans.discard(file)
1291 orphans.discard(file)
1292 for lfile in orphans:
1292 for lfile in orphans:
1293 lfdirstate.drop(lfile)
1293 lfdirstate.drop(lfile)
1294 lfdirstate.write()
1294 lfdirstate.write()
1295 return result
1295 return result
1296
1296
1297 def overridetransplant(orig, ui, repo, *revs, **opts):
1297 def overridetransplant(orig, ui, repo, *revs, **opts):
1298 resuming = opts.get('continue')
1298 resuming = opts.get('continue')
1299 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1299 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1300 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1300 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1301 try:
1301 try:
1302 result = orig(ui, repo, *revs, **opts)
1302 result = orig(ui, repo, *revs, **opts)
1303 finally:
1303 finally:
1304 repo._lfstatuswriters.pop()
1304 repo._lfstatuswriters.pop()
1305 repo._lfcommithooks.pop()
1305 repo._lfcommithooks.pop()
1306 return result
1306 return result
1307
1307
1308 def overridecat(orig, ui, repo, file1, *pats, **opts):
1308 def overridecat(orig, ui, repo, file1, *pats, **opts):
1309 ctx = scmutil.revsingle(repo, opts.get('rev'))
1309 ctx = scmutil.revsingle(repo, opts.get('rev'))
1310 err = 1
1310 err = 1
1311 notbad = set()
1311 notbad = set()
1312 m = scmutil.match(ctx, (file1,) + pats, opts)
1312 m = scmutil.match(ctx, (file1,) + pats, opts)
1313 origmatchfn = m.matchfn
1313 origmatchfn = m.matchfn
1314 def lfmatchfn(f):
1314 def lfmatchfn(f):
1315 if origmatchfn(f):
1315 if origmatchfn(f):
1316 return True
1316 return True
1317 lf = lfutil.splitstandin(f)
1317 lf = lfutil.splitstandin(f)
1318 if lf is None:
1318 if lf is None:
1319 return False
1319 return False
1320 notbad.add(lf)
1320 notbad.add(lf)
1321 return origmatchfn(lf)
1321 return origmatchfn(lf)
1322 m.matchfn = lfmatchfn
1322 m.matchfn = lfmatchfn
1323 origbadfn = m.bad
1323 origbadfn = m.bad
1324 def lfbadfn(f, msg):
1324 def lfbadfn(f, msg):
1325 if not f in notbad:
1325 if not f in notbad:
1326 origbadfn(f, msg)
1326 origbadfn(f, msg)
1327 m.bad = lfbadfn
1327 m.bad = lfbadfn
1328
1328
1329 origvisitdirfn = m.visitdir
1329 origvisitdirfn = m.visitdir
1330 def lfvisitdirfn(dir):
1330 def lfvisitdirfn(dir):
1331 if dir == lfutil.shortname:
1331 if dir == lfutil.shortname:
1332 return True
1332 return True
1333 ret = origvisitdirfn(dir)
1333 ret = origvisitdirfn(dir)
1334 if ret:
1334 if ret:
1335 return ret
1335 return ret
1336 lf = lfutil.splitstandin(dir)
1336 lf = lfutil.splitstandin(dir)
1337 if lf is None:
1337 if lf is None:
1338 return False
1338 return False
1339 return origvisitdirfn(lf)
1339 return origvisitdirfn(lf)
1340 m.visitdir = lfvisitdirfn
1340 m.visitdir = lfvisitdirfn
1341
1341
1342 for f in ctx.walk(m):
1342 for f in ctx.walk(m):
1343 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1343 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1344 pathname=f)
1344 pathname=f)
1345 lf = lfutil.splitstandin(f)
1345 lf = lfutil.splitstandin(f)
1346 if lf is None or origmatchfn(f):
1346 if lf is None or origmatchfn(f):
1347 # duplicating unreachable code from commands.cat
1347 # duplicating unreachable code from commands.cat
1348 data = ctx[f].data()
1348 data = ctx[f].data()
1349 if opts.get('decode'):
1349 if opts.get('decode'):
1350 data = repo.wwritedata(f, data)
1350 data = repo.wwritedata(f, data)
1351 fp.write(data)
1351 fp.write(data)
1352 else:
1352 else:
1353 hash = lfutil.readstandin(repo, lf, ctx.rev())
1353 hash = lfutil.readstandin(repo, lf, ctx.rev())
1354 if not lfutil.inusercache(repo.ui, hash):
1354 if not lfutil.inusercache(repo.ui, hash):
1355 store = storefactory._openstore(repo)
1355 store = storefactory.openstore(repo)
1356 success, missing = store.get([(lf, hash)])
1356 success, missing = store.get([(lf, hash)])
1357 if len(success) != 1:
1357 if len(success) != 1:
1358 raise error.Abort(
1358 raise error.Abort(
1359 _('largefile %s is not in cache and could not be '
1359 _('largefile %s is not in cache and could not be '
1360 'downloaded') % lf)
1360 'downloaded') % lf)
1361 path = lfutil.usercachepath(repo.ui, hash)
1361 path = lfutil.usercachepath(repo.ui, hash)
1362 fpin = open(path, "rb")
1362 fpin = open(path, "rb")
1363 for chunk in util.filechunkiter(fpin, 128 * 1024):
1363 for chunk in util.filechunkiter(fpin, 128 * 1024):
1364 fp.write(chunk)
1364 fp.write(chunk)
1365 fpin.close()
1365 fpin.close()
1366 fp.close()
1366 fp.close()
1367 err = 0
1367 err = 0
1368 return err
1368 return err
1369
1369
1370 def mergeupdate(orig, repo, node, branchmerge, force,
1370 def mergeupdate(orig, repo, node, branchmerge, force,
1371 *args, **kwargs):
1371 *args, **kwargs):
1372 matcher = kwargs.get('matcher', None)
1372 matcher = kwargs.get('matcher', None)
1373 # note if this is a partial update
1373 # note if this is a partial update
1374 partial = matcher and not matcher.always()
1374 partial = matcher and not matcher.always()
1375 with repo.wlock():
1375 with repo.wlock():
1376 # branch | | |
1376 # branch | | |
1377 # merge | force | partial | action
1377 # merge | force | partial | action
1378 # -------+-------+---------+--------------
1378 # -------+-------+---------+--------------
1379 # x | x | x | linear-merge
1379 # x | x | x | linear-merge
1380 # o | x | x | branch-merge
1380 # o | x | x | branch-merge
1381 # x | o | x | overwrite (as clean update)
1381 # x | o | x | overwrite (as clean update)
1382 # o | o | x | force-branch-merge (*1)
1382 # o | o | x | force-branch-merge (*1)
1383 # x | x | o | (*)
1383 # x | x | o | (*)
1384 # o | x | o | (*)
1384 # o | x | o | (*)
1385 # x | o | o | overwrite (as revert)
1385 # x | o | o | overwrite (as revert)
1386 # o | o | o | (*)
1386 # o | o | o | (*)
1387 #
1387 #
1388 # (*) don't care
1388 # (*) don't care
1389 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1389 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1390
1390
1391 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1391 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1392 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1392 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1393 repo.getcwd()),
1393 repo.getcwd()),
1394 [], False, False, False)
1394 [], False, False, False)
1395 pctx = repo['.']
1395 pctx = repo['.']
1396 for lfile in unsure + s.modified:
1396 for lfile in unsure + s.modified:
1397 lfileabs = repo.wvfs.join(lfile)
1397 lfileabs = repo.wvfs.join(lfile)
1398 if not repo.wvfs.exists(lfileabs):
1398 if not repo.wvfs.exists(lfileabs):
1399 continue
1399 continue
1400 lfhash = lfutil.hashrepofile(repo, lfile)
1400 lfhash = lfutil.hashrepofile(repo, lfile)
1401 standin = lfutil.standin(lfile)
1401 standin = lfutil.standin(lfile)
1402 lfutil.writestandin(repo, standin, lfhash,
1402 lfutil.writestandin(repo, standin, lfhash,
1403 lfutil.getexecutable(lfileabs))
1403 lfutil.getexecutable(lfileabs))
1404 if (standin in pctx and
1404 if (standin in pctx and
1405 lfhash == lfutil.readstandin(repo, lfile, '.')):
1405 lfhash == lfutil.readstandin(repo, lfile, '.')):
1406 lfdirstate.normal(lfile)
1406 lfdirstate.normal(lfile)
1407 for lfile in s.added:
1407 for lfile in s.added:
1408 lfutil.updatestandin(repo, lfutil.standin(lfile))
1408 lfutil.updatestandin(repo, lfutil.standin(lfile))
1409 lfdirstate.write()
1409 lfdirstate.write()
1410
1410
1411 oldstandins = lfutil.getstandinsstate(repo)
1411 oldstandins = lfutil.getstandinsstate(repo)
1412
1412
1413 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1413 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1414
1414
1415 newstandins = lfutil.getstandinsstate(repo)
1415 newstandins = lfutil.getstandinsstate(repo)
1416 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1416 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1417 if branchmerge or force or partial:
1417 if branchmerge or force or partial:
1418 filelist.extend(s.deleted + s.removed)
1418 filelist.extend(s.deleted + s.removed)
1419
1419
1420 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1420 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1421 normallookup=partial)
1421 normallookup=partial)
1422
1422
1423 return result
1423 return result
1424
1424
1425 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1425 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1426 result = orig(repo, files, *args, **kwargs)
1426 result = orig(repo, files, *args, **kwargs)
1427
1427
1428 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1428 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1429 if filelist:
1429 if filelist:
1430 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1430 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1431 printmessage=False, normallookup=True)
1431 printmessage=False, normallookup=True)
1432
1432
1433 return result
1433 return result
@@ -1,78 +1,78 b''
1 # This software may be used and distributed according to the terms of the
1 # This software may be used and distributed according to the terms of the
2 # GNU General Public License version 2 or any later version.
2 # GNU General Public License version 2 or any later version.
3
3
4 from __future__ import absolute_import
4 from __future__ import absolute_import
5
5
6 import re
6 import re
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9
9
10 from mercurial import (
10 from mercurial import (
11 error,
11 error,
12 hg,
12 hg,
13 util,
13 util,
14 )
14 )
15
15
16 from . import (
16 from . import (
17 lfutil,
17 lfutil,
18 localstore,
18 localstore,
19 wirestore,
19 wirestore,
20 )
20 )
21
21
22 # During clone this function is passed the src's ui object
22 # During clone this function is passed the src's ui object
23 # but it needs the dest's ui object so it can read out of
23 # but it needs the dest's ui object so it can read out of
24 # the config file. Use repo.ui instead.
24 # the config file. Use repo.ui instead.
25 def _openstore(repo, remote=None, put=False):
25 def openstore(repo, remote=None, put=False):
26 ui = repo.ui
26 ui = repo.ui
27
27
28 if not remote:
28 if not remote:
29 lfpullsource = getattr(repo, 'lfpullsource', None)
29 lfpullsource = getattr(repo, 'lfpullsource', None)
30 if lfpullsource:
30 if lfpullsource:
31 path = ui.expandpath(lfpullsource)
31 path = ui.expandpath(lfpullsource)
32 elif put:
32 elif put:
33 path = ui.expandpath('default-push', 'default')
33 path = ui.expandpath('default-push', 'default')
34 else:
34 else:
35 path = ui.expandpath('default')
35 path = ui.expandpath('default')
36
36
37 # ui.expandpath() leaves 'default-push' and 'default' alone if
37 # ui.expandpath() leaves 'default-push' and 'default' alone if
38 # they cannot be expanded: fallback to the empty string,
38 # they cannot be expanded: fallback to the empty string,
39 # meaning the current directory.
39 # meaning the current directory.
40 if path == 'default-push' or path == 'default':
40 if path == 'default-push' or path == 'default':
41 path = ''
41 path = ''
42 remote = repo
42 remote = repo
43 else:
43 else:
44 path, _branches = hg.parseurl(path)
44 path, _branches = hg.parseurl(path)
45 remote = hg.peer(repo, {}, path)
45 remote = hg.peer(repo, {}, path)
46
46
47 # The path could be a scheme so use Mercurial's normal functionality
47 # The path could be a scheme so use Mercurial's normal functionality
48 # to resolve the scheme to a repository and use its path
48 # to resolve the scheme to a repository and use its path
49 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
49 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
50
50
51 match = _scheme_re.match(path)
51 match = _scheme_re.match(path)
52 if not match: # regular filesystem path
52 if not match: # regular filesystem path
53 scheme = 'file'
53 scheme = 'file'
54 else:
54 else:
55 scheme = match.group(1)
55 scheme = match.group(1)
56
56
57 try:
57 try:
58 storeproviders = _storeprovider[scheme]
58 storeproviders = _storeprovider[scheme]
59 except KeyError:
59 except KeyError:
60 raise error.Abort(_('unsupported URL scheme %r') % scheme)
60 raise error.Abort(_('unsupported URL scheme %r') % scheme)
61
61
62 for classobj in storeproviders:
62 for classobj in storeproviders:
63 try:
63 try:
64 return classobj(ui, repo, remote)
64 return classobj(ui, repo, remote)
65 except lfutil.storeprotonotcapable:
65 except lfutil.storeprotonotcapable:
66 pass
66 pass
67
67
68 raise error.Abort(_('%s does not appear to be a largefile store') %
68 raise error.Abort(_('%s does not appear to be a largefile store') %
69 util.hidepassword(path))
69 util.hidepassword(path))
70
70
71 _storeprovider = {
71 _storeprovider = {
72 'file': [localstore.localstore],
72 'file': [localstore.localstore],
73 'http': [wirestore.wirestore],
73 'http': [wirestore.wirestore],
74 'https': [wirestore.wirestore],
74 'https': [wirestore.wirestore],
75 'ssh': [wirestore.wirestore],
75 'ssh': [wirestore.wirestore],
76 }
76 }
77
77
78 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
78 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
General Comments 0
You need to be logged in to leave comments. Login now