##// END OF EJS Templates
upgrade: split definition and management of the actions from the main code...
marmoute -
r46662:4b89cf08 default
parent child Browse files
Show More
@@ -1,1861 +1,1864 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 exthelper,
28 exthelper,
29 filemerge,
29 filemerge,
30 hg,
30 hg,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge,
33 merge,
34 mergestate as mergestatemod,
34 mergestate as mergestatemod,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 subrepo,
39 subrepo,
40 upgrade,
41 url as urlmod,
40 url as urlmod,
42 util,
41 util,
43 )
42 )
44
43
44 from mercurial.upgrade_utils import (
45 actions as upgrade_actions,
46 )
47
45 from . import (
48 from . import (
46 lfcommands,
49 lfcommands,
47 lfutil,
50 lfutil,
48 storefactory,
51 storefactory,
49 )
52 )
50
53
51 eh = exthelper.exthelper()
54 eh = exthelper.exthelper()
52
55
53 lfstatus = lfutil.lfstatus
56 lfstatus = lfutil.lfstatus
54
57
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
58 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
56
59
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
60 # -- Utility functions: commonly/repeatedly needed functionality ---------------
58
61
59
62
60 def composelargefilematcher(match, manifest):
63 def composelargefilematcher(match, manifest):
61 """create a matcher that matches only the largefiles in the original
64 """create a matcher that matches only the largefiles in the original
62 matcher"""
65 matcher"""
63 m = copy.copy(match)
66 m = copy.copy(match)
64 lfile = lambda f: lfutil.standin(f) in manifest
67 lfile = lambda f: lfutil.standin(f) in manifest
65 m._files = [lf for lf in m._files if lfile(lf)]
68 m._files = [lf for lf in m._files if lfile(lf)]
66 m._fileset = set(m._files)
69 m._fileset = set(m._files)
67 m.always = lambda: False
70 m.always = lambda: False
68 origmatchfn = m.matchfn
71 origmatchfn = m.matchfn
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
72 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
70 return m
73 return m
71
74
72
75
73 def composenormalfilematcher(match, manifest, exclude=None):
76 def composenormalfilematcher(match, manifest, exclude=None):
74 excluded = set()
77 excluded = set()
75 if exclude is not None:
78 if exclude is not None:
76 excluded.update(exclude)
79 excluded.update(exclude)
77
80
78 m = copy.copy(match)
81 m = copy.copy(match)
79 notlfile = lambda f: not (
82 notlfile = lambda f: not (
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
83 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
81 )
84 )
82 m._files = [lf for lf in m._files if notlfile(lf)]
85 m._files = [lf for lf in m._files if notlfile(lf)]
83 m._fileset = set(m._files)
86 m._fileset = set(m._files)
84 m.always = lambda: False
87 m.always = lambda: False
85 origmatchfn = m.matchfn
88 origmatchfn = m.matchfn
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
89 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
87 return m
90 return m
88
91
89
92
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
93 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
91 large = opts.get('large')
94 large = opts.get('large')
92 lfsize = lfutil.getminsize(
95 lfsize = lfutil.getminsize(
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
94 )
97 )
95
98
96 lfmatcher = None
99 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
100 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
101 lfpats = ui.configlist(lfutil.longname, b'patterns')
99 if lfpats:
102 if lfpats:
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
103 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
101
104
102 lfnames = []
105 lfnames = []
103 m = matcher
106 m = matcher
104
107
105 wctx = repo[None]
108 wctx = repo[None]
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
109 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
107 exact = m.exact(f)
110 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
111 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
112 nfile = f in wctx
110 exists = lfile or nfile
113 exists = lfile or nfile
111
114
112 # Don't warn the user when they attempt to add a normal tracked file.
115 # Don't warn the user when they attempt to add a normal tracked file.
113 # The normal add code will do that for us.
116 # The normal add code will do that for us.
114 if exact and exists:
117 if exact and exists:
115 if lfile:
118 if lfile:
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
119 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
117 continue
120 continue
118
121
119 if (exact or not exists) and not lfutil.isstandin(f):
122 if (exact or not exists) and not lfutil.isstandin(f):
120 # In case the file was removed previously, but not committed
123 # In case the file was removed previously, but not committed
121 # (issue3507)
124 # (issue3507)
122 if not repo.wvfs.exists(f):
125 if not repo.wvfs.exists(f):
123 continue
126 continue
124
127
125 abovemin = (
128 abovemin = (
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
129 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
127 )
130 )
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
131 if large or abovemin or (lfmatcher and lfmatcher(f)):
129 lfnames.append(f)
132 lfnames.append(f)
130 if ui.verbose or not exact:
133 if ui.verbose or not exact:
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
134 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
132
135
133 bad = []
136 bad = []
134
137
135 # Need to lock, otherwise there could be a race condition between
138 # Need to lock, otherwise there could be a race condition between
136 # when standins are created and added to the repo.
139 # when standins are created and added to the repo.
137 with repo.wlock():
140 with repo.wlock():
138 if not opts.get('dry_run'):
141 if not opts.get('dry_run'):
139 standins = []
142 standins = []
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
143 lfdirstate = lfutil.openlfdirstate(ui, repo)
141 for f in lfnames:
144 for f in lfnames:
142 standinname = lfutil.standin(f)
145 standinname = lfutil.standin(f)
143 lfutil.writestandin(
146 lfutil.writestandin(
144 repo,
147 repo,
145 standinname,
148 standinname,
146 hash=b'',
149 hash=b'',
147 executable=lfutil.getexecutable(repo.wjoin(f)),
150 executable=lfutil.getexecutable(repo.wjoin(f)),
148 )
151 )
149 standins.append(standinname)
152 standins.append(standinname)
150 if lfdirstate[f] == b'r':
153 if lfdirstate[f] == b'r':
151 lfdirstate.normallookup(f)
154 lfdirstate.normallookup(f)
152 else:
155 else:
153 lfdirstate.add(f)
156 lfdirstate.add(f)
154 lfdirstate.write()
157 lfdirstate.write()
155 bad += [
158 bad += [
156 lfutil.splitstandin(f)
159 lfutil.splitstandin(f)
157 for f in repo[None].add(standins)
160 for f in repo[None].add(standins)
158 if f in m.files()
161 if f in m.files()
159 ]
162 ]
160
163
161 added = [f for f in lfnames if f not in bad]
164 added = [f for f in lfnames if f not in bad]
162 return added, bad
165 return added, bad
163
166
164
167
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
168 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 after = opts.get('after')
169 after = opts.get('after')
167 m = composelargefilematcher(matcher, repo[None].manifest())
170 m = composelargefilematcher(matcher, repo[None].manifest())
168 with lfstatus(repo):
171 with lfstatus(repo):
169 s = repo.status(match=m, clean=not isaddremove)
172 s = repo.status(match=m, clean=not isaddremove)
170 manifest = repo[None].manifest()
173 manifest = repo[None].manifest()
171 modified, added, deleted, clean = [
174 modified, added, deleted, clean = [
172 [f for f in list if lfutil.standin(f) in manifest]
175 [f for f in list if lfutil.standin(f) in manifest]
173 for list in (s.modified, s.added, s.deleted, s.clean)
176 for list in (s.modified, s.added, s.deleted, s.clean)
174 ]
177 ]
175
178
176 def warn(files, msg):
179 def warn(files, msg):
177 for f in files:
180 for f in files:
178 ui.warn(msg % uipathfn(f))
181 ui.warn(msg % uipathfn(f))
179 return int(len(files) > 0)
182 return int(len(files) > 0)
180
183
181 if after:
184 if after:
182 remove = deleted
185 remove = deleted
183 result = warn(
186 result = warn(
184 modified + added + clean, _(b'not removing %s: file still exists\n')
187 modified + added + clean, _(b'not removing %s: file still exists\n')
185 )
188 )
186 else:
189 else:
187 remove = deleted + clean
190 remove = deleted + clean
188 result = warn(
191 result = warn(
189 modified,
192 modified,
190 _(
193 _(
191 b'not removing %s: file is modified (use -f'
194 b'not removing %s: file is modified (use -f'
192 b' to force removal)\n'
195 b' to force removal)\n'
193 ),
196 ),
194 )
197 )
195 result = (
198 result = (
196 warn(
199 warn(
197 added,
200 added,
198 _(
201 _(
199 b'not removing %s: file has been marked for add'
202 b'not removing %s: file has been marked for add'
200 b' (use forget to undo)\n'
203 b' (use forget to undo)\n'
201 ),
204 ),
202 )
205 )
203 or result
206 or result
204 )
207 )
205
208
206 # Need to lock because standin files are deleted then removed from the
209 # Need to lock because standin files are deleted then removed from the
207 # repository and we could race in-between.
210 # repository and we could race in-between.
208 with repo.wlock():
211 with repo.wlock():
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
212 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 for f in sorted(remove):
213 for f in sorted(remove):
211 if ui.verbose or not m.exact(f):
214 if ui.verbose or not m.exact(f):
212 ui.status(_(b'removing %s\n') % uipathfn(f))
215 ui.status(_(b'removing %s\n') % uipathfn(f))
213
216
214 if not dryrun:
217 if not dryrun:
215 if not after:
218 if not after:
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
219 repo.wvfs.unlinkpath(f, ignoremissing=True)
217
220
218 if dryrun:
221 if dryrun:
219 return result
222 return result
220
223
221 remove = [lfutil.standin(f) for f in remove]
224 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
225 # If this is being called by addremove, let the original addremove
223 # function handle this.
226 # function handle this.
224 if not isaddremove:
227 if not isaddremove:
225 for f in remove:
228 for f in remove:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
229 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 repo[None].forget(remove)
230 repo[None].forget(remove)
228
231
229 for f in remove:
232 for f in remove:
230 lfutil.synclfdirstate(
233 lfutil.synclfdirstate(
231 repo, lfdirstate, lfutil.splitstandin(f), False
234 repo, lfdirstate, lfutil.splitstandin(f), False
232 )
235 )
233
236
234 lfdirstate.write()
237 lfdirstate.write()
235
238
236 return result
239 return result
237
240
238
241
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
242 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
243 # appear at their right place in the manifests.
241 @eh.wrapfunction(webcommands, b'decodepath')
244 @eh.wrapfunction(webcommands, b'decodepath')
242 def decodepath(orig, path):
245 def decodepath(orig, path):
243 return lfutil.splitstandin(path) or path
246 return lfutil.splitstandin(path) or path
244
247
245
248
246 # -- Wrappers: modify existing commands --------------------------------
249 # -- Wrappers: modify existing commands --------------------------------
247
250
248
251
249 @eh.wrapcommand(
252 @eh.wrapcommand(
250 b'add',
253 b'add',
251 opts=[
254 opts=[
252 (b'', b'large', None, _(b'add as largefile')),
255 (b'', b'large', None, _(b'add as largefile')),
253 (b'', b'normal', None, _(b'add as normal file')),
256 (b'', b'normal', None, _(b'add as normal file')),
254 (
257 (
255 b'',
258 b'',
256 b'lfsize',
259 b'lfsize',
257 b'',
260 b'',
258 _(
261 _(
259 b'add all files above this size (in megabytes) '
262 b'add all files above this size (in megabytes) '
260 b'as largefiles (default: 10)'
263 b'as largefiles (default: 10)'
261 ),
264 ),
262 ),
265 ),
263 ],
266 ],
264 )
267 )
265 def overrideadd(orig, ui, repo, *pats, **opts):
268 def overrideadd(orig, ui, repo, *pats, **opts):
266 if opts.get('normal') and opts.get('large'):
269 if opts.get('normal') and opts.get('large'):
267 raise error.Abort(_(b'--normal cannot be used with --large'))
270 raise error.Abort(_(b'--normal cannot be used with --large'))
268 return orig(ui, repo, *pats, **opts)
271 return orig(ui, repo, *pats, **opts)
269
272
270
273
271 @eh.wrapfunction(cmdutil, b'add')
274 @eh.wrapfunction(cmdutil, b'add')
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
275 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
273 # The --normal flag short circuits this override
276 # The --normal flag short circuits this override
274 if opts.get('normal'):
277 if opts.get('normal'):
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
278 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
276
279
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
280 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
278 normalmatcher = composenormalfilematcher(
281 normalmatcher = composenormalfilematcher(
279 matcher, repo[None].manifest(), ladded
282 matcher, repo[None].manifest(), ladded
280 )
283 )
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
284 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
282
285
283 bad.extend(f for f in lbad)
286 bad.extend(f for f in lbad)
284 return bad
287 return bad
285
288
286
289
287 @eh.wrapfunction(cmdutil, b'remove')
290 @eh.wrapfunction(cmdutil, b'remove')
288 def cmdutilremove(
291 def cmdutilremove(
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
292 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
290 ):
293 ):
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
294 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
292 result = orig(
295 result = orig(
293 ui,
296 ui,
294 repo,
297 repo,
295 normalmatcher,
298 normalmatcher,
296 prefix,
299 prefix,
297 uipathfn,
300 uipathfn,
298 after,
301 after,
299 force,
302 force,
300 subrepos,
303 subrepos,
301 dryrun,
304 dryrun,
302 )
305 )
303 return (
306 return (
304 removelargefiles(
307 removelargefiles(
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
308 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
306 )
309 )
307 or result
310 or result
308 )
311 )
309
312
310
313
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
314 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
312 def overridestatusfn(orig, repo, rev2, **opts):
315 def overridestatusfn(orig, repo, rev2, **opts):
313 with lfstatus(repo._repo):
316 with lfstatus(repo._repo):
314 return orig(repo, rev2, **opts)
317 return orig(repo, rev2, **opts)
315
318
316
319
317 @eh.wrapcommand(b'status')
320 @eh.wrapcommand(b'status')
318 def overridestatus(orig, ui, repo, *pats, **opts):
321 def overridestatus(orig, ui, repo, *pats, **opts):
319 with lfstatus(repo):
322 with lfstatus(repo):
320 return orig(ui, repo, *pats, **opts)
323 return orig(ui, repo, *pats, **opts)
321
324
322
325
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
326 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
327 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
325 with lfstatus(repo._repo):
328 with lfstatus(repo._repo):
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
329 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
327
330
328
331
329 @eh.wrapcommand(b'log')
332 @eh.wrapcommand(b'log')
330 def overridelog(orig, ui, repo, *pats, **opts):
333 def overridelog(orig, ui, repo, *pats, **opts):
331 def overridematchandpats(
334 def overridematchandpats(
332 orig,
335 orig,
333 ctx,
336 ctx,
334 pats=(),
337 pats=(),
335 opts=None,
338 opts=None,
336 globbed=False,
339 globbed=False,
337 default=b'relpath',
340 default=b'relpath',
338 badfn=None,
341 badfn=None,
339 ):
342 ):
340 """Matcher that merges root directory with .hglf, suitable for log.
343 """Matcher that merges root directory with .hglf, suitable for log.
341 It is still possible to match .hglf directly.
344 It is still possible to match .hglf directly.
342 For any listed files run log on the standin too.
345 For any listed files run log on the standin too.
343 matchfn tries both the given filename and with .hglf stripped.
346 matchfn tries both the given filename and with .hglf stripped.
344 """
347 """
345 if opts is None:
348 if opts is None:
346 opts = {}
349 opts = {}
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
350 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
348 m, p = copy.copy(matchandpats)
351 m, p = copy.copy(matchandpats)
349
352
350 if m.always():
353 if m.always():
351 # We want to match everything anyway, so there's no benefit trying
354 # We want to match everything anyway, so there's no benefit trying
352 # to add standins.
355 # to add standins.
353 return matchandpats
356 return matchandpats
354
357
355 pats = set(p)
358 pats = set(p)
356
359
357 def fixpats(pat, tostandin=lfutil.standin):
360 def fixpats(pat, tostandin=lfutil.standin):
358 if pat.startswith(b'set:'):
361 if pat.startswith(b'set:'):
359 return pat
362 return pat
360
363
361 kindpat = matchmod._patsplit(pat, None)
364 kindpat = matchmod._patsplit(pat, None)
362
365
363 if kindpat[0] is not None:
366 if kindpat[0] is not None:
364 return kindpat[0] + b':' + tostandin(kindpat[1])
367 return kindpat[0] + b':' + tostandin(kindpat[1])
365 return tostandin(kindpat[1])
368 return tostandin(kindpat[1])
366
369
367 cwd = repo.getcwd()
370 cwd = repo.getcwd()
368 if cwd:
371 if cwd:
369 hglf = lfutil.shortname
372 hglf = lfutil.shortname
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
373 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
371
374
372 def tostandin(f):
375 def tostandin(f):
373 # The file may already be a standin, so truncate the back
376 # The file may already be a standin, so truncate the back
374 # prefix and test before mangling it. This avoids turning
377 # prefix and test before mangling it. This avoids turning
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
378 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
379 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
377 return f
380 return f
378
381
379 # An absolute path is from outside the repo, so truncate the
382 # An absolute path is from outside the repo, so truncate the
380 # path to the root before building the standin. Otherwise cwd
383 # path to the root before building the standin. Otherwise cwd
381 # is somewhere in the repo, relative to root, and needs to be
384 # is somewhere in the repo, relative to root, and needs to be
382 # prepended before building the standin.
385 # prepended before building the standin.
383 if os.path.isabs(cwd):
386 if os.path.isabs(cwd):
384 f = f[len(back) :]
387 f = f[len(back) :]
385 else:
388 else:
386 f = cwd + b'/' + f
389 f = cwd + b'/' + f
387 return back + lfutil.standin(f)
390 return back + lfutil.standin(f)
388
391
389 else:
392 else:
390
393
391 def tostandin(f):
394 def tostandin(f):
392 if lfutil.isstandin(f):
395 if lfutil.isstandin(f):
393 return f
396 return f
394 return lfutil.standin(f)
397 return lfutil.standin(f)
395
398
396 pats.update(fixpats(f, tostandin) for f in p)
399 pats.update(fixpats(f, tostandin) for f in p)
397
400
398 for i in range(0, len(m._files)):
401 for i in range(0, len(m._files)):
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
402 # Don't add '.hglf' to m.files, since that is already covered by '.'
400 if m._files[i] == b'.':
403 if m._files[i] == b'.':
401 continue
404 continue
402 standin = lfutil.standin(m._files[i])
405 standin = lfutil.standin(m._files[i])
403 # If the "standin" is a directory, append instead of replace to
406 # If the "standin" is a directory, append instead of replace to
404 # support naming a directory on the command line with only
407 # support naming a directory on the command line with only
405 # largefiles. The original directory is kept to support normal
408 # largefiles. The original directory is kept to support normal
406 # files.
409 # files.
407 if standin in ctx:
410 if standin in ctx:
408 m._files[i] = standin
411 m._files[i] = standin
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
412 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
410 m._files.append(standin)
413 m._files.append(standin)
411
414
412 m._fileset = set(m._files)
415 m._fileset = set(m._files)
413 m.always = lambda: False
416 m.always = lambda: False
414 origmatchfn = m.matchfn
417 origmatchfn = m.matchfn
415
418
416 def lfmatchfn(f):
419 def lfmatchfn(f):
417 lf = lfutil.splitstandin(f)
420 lf = lfutil.splitstandin(f)
418 if lf is not None and origmatchfn(lf):
421 if lf is not None and origmatchfn(lf):
419 return True
422 return True
420 r = origmatchfn(f)
423 r = origmatchfn(f)
421 return r
424 return r
422
425
423 m.matchfn = lfmatchfn
426 m.matchfn = lfmatchfn
424
427
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
428 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
426 return m, pats
429 return m, pats
427
430
428 # For hg log --patch, the match object is used in two different senses:
431 # For hg log --patch, the match object is used in two different senses:
429 # (1) to determine what revisions should be printed out, and
432 # (1) to determine what revisions should be printed out, and
430 # (2) to determine what files to print out diffs for.
433 # (2) to determine what files to print out diffs for.
431 # The magic matchandpats override should be used for case (1) but not for
434 # The magic matchandpats override should be used for case (1) but not for
432 # case (2).
435 # case (2).
433 oldmatchandpats = scmutil.matchandpats
436 oldmatchandpats = scmutil.matchandpats
434
437
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
438 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
436 wctx = repo[None]
439 wctx = repo[None]
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
440 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
438 return lambda ctx: match
441 return lambda ctx: match
439
442
440 wrappedmatchandpats = extensions.wrappedfunction(
443 wrappedmatchandpats = extensions.wrappedfunction(
441 scmutil, b'matchandpats', overridematchandpats
444 scmutil, b'matchandpats', overridematchandpats
442 )
445 )
443 wrappedmakefilematcher = extensions.wrappedfunction(
446 wrappedmakefilematcher = extensions.wrappedfunction(
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
447 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
445 )
448 )
446 with wrappedmatchandpats, wrappedmakefilematcher:
449 with wrappedmatchandpats, wrappedmakefilematcher:
447 return orig(ui, repo, *pats, **opts)
450 return orig(ui, repo, *pats, **opts)
448
451
449
452
450 @eh.wrapcommand(
453 @eh.wrapcommand(
451 b'verify',
454 b'verify',
452 opts=[
455 opts=[
453 (
456 (
454 b'',
457 b'',
455 b'large',
458 b'large',
456 None,
459 None,
457 _(b'verify that all largefiles in current revision exists'),
460 _(b'verify that all largefiles in current revision exists'),
458 ),
461 ),
459 (
462 (
460 b'',
463 b'',
461 b'lfa',
464 b'lfa',
462 None,
465 None,
463 _(b'verify largefiles in all revisions, not just current'),
466 _(b'verify largefiles in all revisions, not just current'),
464 ),
467 ),
465 (
468 (
466 b'',
469 b'',
467 b'lfc',
470 b'lfc',
468 None,
471 None,
469 _(b'verify local largefile contents, not just existence'),
472 _(b'verify local largefile contents, not just existence'),
470 ),
473 ),
471 ],
474 ],
472 )
475 )
473 def overrideverify(orig, ui, repo, *pats, **opts):
476 def overrideverify(orig, ui, repo, *pats, **opts):
474 large = opts.pop('large', False)
477 large = opts.pop('large', False)
475 all = opts.pop('lfa', False)
478 all = opts.pop('lfa', False)
476 contents = opts.pop('lfc', False)
479 contents = opts.pop('lfc', False)
477
480
478 result = orig(ui, repo, *pats, **opts)
481 result = orig(ui, repo, *pats, **opts)
479 if large or all or contents:
482 if large or all or contents:
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
483 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
481 return result
484 return result
482
485
483
486
484 @eh.wrapcommand(
487 @eh.wrapcommand(
485 b'debugstate',
488 b'debugstate',
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
489 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
487 )
490 )
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
491 def overridedebugstate(orig, ui, repo, *pats, **opts):
489 large = opts.pop('large', False)
492 large = opts.pop('large', False)
490 if large:
493 if large:
491
494
492 class fakerepo(object):
495 class fakerepo(object):
493 dirstate = lfutil.openlfdirstate(ui, repo)
496 dirstate = lfutil.openlfdirstate(ui, repo)
494
497
495 orig(ui, fakerepo, *pats, **opts)
498 orig(ui, fakerepo, *pats, **opts)
496 else:
499 else:
497 orig(ui, repo, *pats, **opts)
500 orig(ui, repo, *pats, **opts)
498
501
499
502
500 # Before starting the manifest merge, merge.updates will call
503 # Before starting the manifest merge, merge.updates will call
501 # _checkunknownfile to check if there are any files in the merged-in
504 # _checkunknownfile to check if there are any files in the merged-in
502 # changeset that collide with unknown files in the working copy.
505 # changeset that collide with unknown files in the working copy.
503 #
506 #
504 # The largefiles are seen as unknown, so this prevents us from merging
507 # The largefiles are seen as unknown, so this prevents us from merging
505 # in a file 'foo' if we already have a largefile with the same name.
508 # in a file 'foo' if we already have a largefile with the same name.
506 #
509 #
507 # The overridden function filters the unknown files by removing any
510 # The overridden function filters the unknown files by removing any
508 # largefiles. This makes the merge proceed and we can then handle this
511 # largefiles. This makes the merge proceed and we can then handle this
509 # case further in the overridden calculateupdates function below.
512 # case further in the overridden calculateupdates function below.
510 @eh.wrapfunction(merge, b'_checkunknownfile')
513 @eh.wrapfunction(merge, b'_checkunknownfile')
511 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
514 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
512 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
515 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
513 return False
516 return False
514 return origfn(repo, wctx, mctx, f, f2)
517 return origfn(repo, wctx, mctx, f, f2)
515
518
516
519
517 # The manifest merge handles conflicts on the manifest level. We want
520 # The manifest merge handles conflicts on the manifest level. We want
518 # to handle changes in largefile-ness of files at this level too.
521 # to handle changes in largefile-ness of files at this level too.
519 #
522 #
520 # The strategy is to run the original calculateupdates and then process
523 # The strategy is to run the original calculateupdates and then process
521 # the action list it outputs. There are two cases we need to deal with:
524 # the action list it outputs. There are two cases we need to deal with:
522 #
525 #
523 # 1. Normal file in p1, largefile in p2. Here the largefile is
526 # 1. Normal file in p1, largefile in p2. Here the largefile is
524 # detected via its standin file, which will enter the working copy
527 # detected via its standin file, which will enter the working copy
525 # with a "get" action. It is not "merge" since the standin is all
528 # with a "get" action. It is not "merge" since the standin is all
526 # Mercurial is concerned with at this level -- the link to the
529 # Mercurial is concerned with at this level -- the link to the
527 # existing normal file is not relevant here.
530 # existing normal file is not relevant here.
528 #
531 #
529 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
532 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
530 # since the largefile will be present in the working copy and
533 # since the largefile will be present in the working copy and
531 # different from the normal file in p2. Mercurial therefore
534 # different from the normal file in p2. Mercurial therefore
532 # triggers a merge action.
535 # triggers a merge action.
533 #
536 #
534 # In both cases, we prompt the user and emit new actions to either
537 # In both cases, we prompt the user and emit new actions to either
535 # remove the standin (if the normal file was kept) or to remove the
538 # remove the standin (if the normal file was kept) or to remove the
536 # normal file and get the standin (if the largefile was kept). The
539 # normal file and get the standin (if the largefile was kept). The
537 # default prompt answer is to use the largefile version since it was
540 # default prompt answer is to use the largefile version since it was
538 # presumably changed on purpose.
541 # presumably changed on purpose.
539 #
542 #
540 # Finally, the merge.applyupdates function will then take care of
543 # Finally, the merge.applyupdates function will then take care of
541 # writing the files into the working copy and lfcommands.updatelfiles
544 # writing the files into the working copy and lfcommands.updatelfiles
542 # will update the largefiles.
545 # will update the largefiles.
543 @eh.wrapfunction(merge, b'calculateupdates')
546 @eh.wrapfunction(merge, b'calculateupdates')
544 def overridecalculateupdates(
547 def overridecalculateupdates(
545 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
548 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
546 ):
549 ):
547 overwrite = force and not branchmerge
550 overwrite = force and not branchmerge
548 mresult = origfn(
551 mresult = origfn(
549 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
552 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
550 )
553 )
551
554
552 if overwrite:
555 if overwrite:
553 return mresult
556 return mresult
554
557
555 # Convert to dictionary with filename as key and action as value.
558 # Convert to dictionary with filename as key and action as value.
556 lfiles = set()
559 lfiles = set()
557 for f in mresult.files():
560 for f in mresult.files():
558 splitstandin = lfutil.splitstandin(f)
561 splitstandin = lfutil.splitstandin(f)
559 if splitstandin is not None and splitstandin in p1:
562 if splitstandin is not None and splitstandin in p1:
560 lfiles.add(splitstandin)
563 lfiles.add(splitstandin)
561 elif lfutil.standin(f) in p1:
564 elif lfutil.standin(f) in p1:
562 lfiles.add(f)
565 lfiles.add(f)
563
566
564 for lfile in sorted(lfiles):
567 for lfile in sorted(lfiles):
565 standin = lfutil.standin(lfile)
568 standin = lfutil.standin(lfile)
566 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
569 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
567 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
570 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
568 if sm in (b'g', b'dc') and lm != b'r':
571 if sm in (b'g', b'dc') and lm != b'r':
569 if sm == b'dc':
572 if sm == b'dc':
570 f1, f2, fa, move, anc = sargs
573 f1, f2, fa, move, anc = sargs
571 sargs = (p2[f2].flags(), False)
574 sargs = (p2[f2].flags(), False)
572 # Case 1: normal file in the working copy, largefile in
575 # Case 1: normal file in the working copy, largefile in
573 # the second parent
576 # the second parent
574 usermsg = (
577 usermsg = (
575 _(
578 _(
576 b'remote turned local normal file %s into a largefile\n'
579 b'remote turned local normal file %s into a largefile\n'
577 b'use (l)argefile or keep (n)ormal file?'
580 b'use (l)argefile or keep (n)ormal file?'
578 b'$$ &Largefile $$ &Normal file'
581 b'$$ &Largefile $$ &Normal file'
579 )
582 )
580 % lfile
583 % lfile
581 )
584 )
582 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
585 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
583 mresult.addfile(lfile, b'r', None, b'replaced by standin')
586 mresult.addfile(lfile, b'r', None, b'replaced by standin')
584 mresult.addfile(standin, b'g', sargs, b'replaces standin')
587 mresult.addfile(standin, b'g', sargs, b'replaces standin')
585 else: # keep local normal file
588 else: # keep local normal file
586 mresult.addfile(lfile, b'k', None, b'replaces standin')
589 mresult.addfile(lfile, b'k', None, b'replaces standin')
587 if branchmerge:
590 if branchmerge:
588 mresult.addfile(
591 mresult.addfile(
589 standin,
592 standin,
590 b'k',
593 b'k',
591 None,
594 None,
592 b'replaced by non-standin',
595 b'replaced by non-standin',
593 )
596 )
594 else:
597 else:
595 mresult.addfile(
598 mresult.addfile(
596 standin,
599 standin,
597 b'r',
600 b'r',
598 None,
601 None,
599 b'replaced by non-standin',
602 b'replaced by non-standin',
600 )
603 )
601 elif lm in (b'g', b'dc') and sm != b'r':
604 elif lm in (b'g', b'dc') and sm != b'r':
602 if lm == b'dc':
605 if lm == b'dc':
603 f1, f2, fa, move, anc = largs
606 f1, f2, fa, move, anc = largs
604 largs = (p2[f2].flags(), False)
607 largs = (p2[f2].flags(), False)
605 # Case 2: largefile in the working copy, normal file in
608 # Case 2: largefile in the working copy, normal file in
606 # the second parent
609 # the second parent
607 usermsg = (
610 usermsg = (
608 _(
611 _(
609 b'remote turned local largefile %s into a normal file\n'
612 b'remote turned local largefile %s into a normal file\n'
610 b'keep (l)argefile or use (n)ormal file?'
613 b'keep (l)argefile or use (n)ormal file?'
611 b'$$ &Largefile $$ &Normal file'
614 b'$$ &Largefile $$ &Normal file'
612 )
615 )
613 % lfile
616 % lfile
614 )
617 )
615 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
618 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
616 if branchmerge:
619 if branchmerge:
617 # largefile can be restored from standin safely
620 # largefile can be restored from standin safely
618 mresult.addfile(
621 mresult.addfile(
619 lfile,
622 lfile,
620 b'k',
623 b'k',
621 None,
624 None,
622 b'replaced by standin',
625 b'replaced by standin',
623 )
626 )
624 mresult.addfile(standin, b'k', None, b'replaces standin')
627 mresult.addfile(standin, b'k', None, b'replaces standin')
625 else:
628 else:
626 # "lfile" should be marked as "removed" without
629 # "lfile" should be marked as "removed" without
627 # removal of itself
630 # removal of itself
628 mresult.addfile(
631 mresult.addfile(
629 lfile,
632 lfile,
630 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
633 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
631 None,
634 None,
632 b'forget non-standin largefile',
635 b'forget non-standin largefile',
633 )
636 )
634
637
635 # linear-merge should treat this largefile as 're-added'
638 # linear-merge should treat this largefile as 're-added'
636 mresult.addfile(standin, b'a', None, b'keep standin')
639 mresult.addfile(standin, b'a', None, b'keep standin')
637 else: # pick remote normal file
640 else: # pick remote normal file
638 mresult.addfile(lfile, b'g', largs, b'replaces standin')
641 mresult.addfile(lfile, b'g', largs, b'replaces standin')
639 mresult.addfile(
642 mresult.addfile(
640 standin,
643 standin,
641 b'r',
644 b'r',
642 None,
645 None,
643 b'replaced by non-standin',
646 b'replaced by non-standin',
644 )
647 )
645
648
646 return mresult
649 return mresult
647
650
648
651
649 @eh.wrapfunction(mergestatemod, b'recordupdates')
652 @eh.wrapfunction(mergestatemod, b'recordupdates')
650 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
653 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
651 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
654 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
652 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
655 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
653 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
656 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
654 # this should be executed before 'orig', to execute 'remove'
657 # this should be executed before 'orig', to execute 'remove'
655 # before all other actions
658 # before all other actions
656 repo.dirstate.remove(lfile)
659 repo.dirstate.remove(lfile)
657 # make sure lfile doesn't get synclfdirstate'd as normal
660 # make sure lfile doesn't get synclfdirstate'd as normal
658 lfdirstate.add(lfile)
661 lfdirstate.add(lfile)
659 lfdirstate.write()
662 lfdirstate.write()
660
663
661 return orig(repo, actions, branchmerge, getfiledata)
664 return orig(repo, actions, branchmerge, getfiledata)
662
665
663
666
664 # Override filemerge to prompt the user about how they wish to merge
667 # Override filemerge to prompt the user about how they wish to merge
665 # largefiles. This will handle identical edits without prompting the user.
668 # largefiles. This will handle identical edits without prompting the user.
666 @eh.wrapfunction(filemerge, b'_filemerge')
669 @eh.wrapfunction(filemerge, b'_filemerge')
667 def overridefilemerge(
670 def overridefilemerge(
668 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
671 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
669 ):
672 ):
670 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
673 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
671 return origfn(
674 return origfn(
672 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
675 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
673 )
676 )
674
677
675 ahash = lfutil.readasstandin(fca).lower()
678 ahash = lfutil.readasstandin(fca).lower()
676 dhash = lfutil.readasstandin(fcd).lower()
679 dhash = lfutil.readasstandin(fcd).lower()
677 ohash = lfutil.readasstandin(fco).lower()
680 ohash = lfutil.readasstandin(fco).lower()
678 if (
681 if (
679 ohash != ahash
682 ohash != ahash
680 and ohash != dhash
683 and ohash != dhash
681 and (
684 and (
682 dhash == ahash
685 dhash == ahash
683 or repo.ui.promptchoice(
686 or repo.ui.promptchoice(
684 _(
687 _(
685 b'largefile %s has a merge conflict\nancestor was %s\n'
688 b'largefile %s has a merge conflict\nancestor was %s\n'
686 b'you can keep (l)ocal %s or take (o)ther %s.\n'
689 b'you can keep (l)ocal %s or take (o)ther %s.\n'
687 b'what do you want to do?'
690 b'what do you want to do?'
688 b'$$ &Local $$ &Other'
691 b'$$ &Local $$ &Other'
689 )
692 )
690 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
693 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
691 0,
694 0,
692 )
695 )
693 == 1
696 == 1
694 )
697 )
695 ):
698 ):
696 repo.wwrite(fcd.path(), fco.data(), fco.flags())
699 repo.wwrite(fcd.path(), fco.data(), fco.flags())
697 return True, 0, False
700 return True, 0, False
698
701
699
702
700 @eh.wrapfunction(copiesmod, b'pathcopies')
703 @eh.wrapfunction(copiesmod, b'pathcopies')
701 def copiespathcopies(orig, ctx1, ctx2, match=None):
704 def copiespathcopies(orig, ctx1, ctx2, match=None):
702 copies = orig(ctx1, ctx2, match=match)
705 copies = orig(ctx1, ctx2, match=match)
703 updated = {}
706 updated = {}
704
707
705 for k, v in pycompat.iteritems(copies):
708 for k, v in pycompat.iteritems(copies):
706 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
709 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
707
710
708 return updated
711 return updated
709
712
710
713
711 # Copy first changes the matchers to match standins instead of
714 # Copy first changes the matchers to match standins instead of
712 # largefiles. Then it overrides util.copyfile in that function it
715 # largefiles. Then it overrides util.copyfile in that function it
713 # checks if the destination largefile already exists. It also keeps a
716 # checks if the destination largefile already exists. It also keeps a
714 # list of copied files so that the largefiles can be copied and the
717 # list of copied files so that the largefiles can be copied and the
715 # dirstate updated.
718 # dirstate updated.
716 @eh.wrapfunction(cmdutil, b'copy')
719 @eh.wrapfunction(cmdutil, b'copy')
717 def overridecopy(orig, ui, repo, pats, opts, rename=False):
720 def overridecopy(orig, ui, repo, pats, opts, rename=False):
718 # doesn't remove largefile on rename
721 # doesn't remove largefile on rename
719 if len(pats) < 2:
722 if len(pats) < 2:
720 # this isn't legal, let the original function deal with it
723 # this isn't legal, let the original function deal with it
721 return orig(ui, repo, pats, opts, rename)
724 return orig(ui, repo, pats, opts, rename)
722
725
723 # This could copy both lfiles and normal files in one command,
726 # This could copy both lfiles and normal files in one command,
724 # but we don't want to do that. First replace their matcher to
727 # but we don't want to do that. First replace their matcher to
725 # only match normal files and run it, then replace it to just
728 # only match normal files and run it, then replace it to just
726 # match largefiles and run it again.
729 # match largefiles and run it again.
727 nonormalfiles = False
730 nonormalfiles = False
728 nolfiles = False
731 nolfiles = False
729 manifest = repo[None].manifest()
732 manifest = repo[None].manifest()
730
733
731 def normalfilesmatchfn(
734 def normalfilesmatchfn(
732 orig,
735 orig,
733 ctx,
736 ctx,
734 pats=(),
737 pats=(),
735 opts=None,
738 opts=None,
736 globbed=False,
739 globbed=False,
737 default=b'relpath',
740 default=b'relpath',
738 badfn=None,
741 badfn=None,
739 ):
742 ):
740 if opts is None:
743 if opts is None:
741 opts = {}
744 opts = {}
742 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
745 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
743 return composenormalfilematcher(match, manifest)
746 return composenormalfilematcher(match, manifest)
744
747
745 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
748 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
746 try:
749 try:
747 result = orig(ui, repo, pats, opts, rename)
750 result = orig(ui, repo, pats, opts, rename)
748 except error.Abort as e:
751 except error.Abort as e:
749 if e.message != _(b'no files to copy'):
752 if e.message != _(b'no files to copy'):
750 raise e
753 raise e
751 else:
754 else:
752 nonormalfiles = True
755 nonormalfiles = True
753 result = 0
756 result = 0
754
757
755 # The first rename can cause our current working directory to be removed.
758 # The first rename can cause our current working directory to be removed.
756 # In that case there is nothing left to copy/rename so just quit.
759 # In that case there is nothing left to copy/rename so just quit.
757 try:
760 try:
758 repo.getcwd()
761 repo.getcwd()
759 except OSError:
762 except OSError:
760 return result
763 return result
761
764
762 def makestandin(relpath):
765 def makestandin(relpath):
763 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
766 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
764 return repo.wvfs.join(lfutil.standin(path))
767 return repo.wvfs.join(lfutil.standin(path))
765
768
766 fullpats = scmutil.expandpats(pats)
769 fullpats = scmutil.expandpats(pats)
767 dest = fullpats[-1]
770 dest = fullpats[-1]
768
771
769 if os.path.isdir(dest):
772 if os.path.isdir(dest):
770 if not os.path.isdir(makestandin(dest)):
773 if not os.path.isdir(makestandin(dest)):
771 os.makedirs(makestandin(dest))
774 os.makedirs(makestandin(dest))
772
775
773 try:
776 try:
774 # When we call orig below it creates the standins but we don't add
777 # When we call orig below it creates the standins but we don't add
775 # them to the dir state until later so lock during that time.
778 # them to the dir state until later so lock during that time.
776 wlock = repo.wlock()
779 wlock = repo.wlock()
777
780
778 manifest = repo[None].manifest()
781 manifest = repo[None].manifest()
779
782
780 def overridematch(
783 def overridematch(
781 orig,
784 orig,
782 ctx,
785 ctx,
783 pats=(),
786 pats=(),
784 opts=None,
787 opts=None,
785 globbed=False,
788 globbed=False,
786 default=b'relpath',
789 default=b'relpath',
787 badfn=None,
790 badfn=None,
788 ):
791 ):
789 if opts is None:
792 if opts is None:
790 opts = {}
793 opts = {}
791 newpats = []
794 newpats = []
792 # The patterns were previously mangled to add the standin
795 # The patterns were previously mangled to add the standin
793 # directory; we need to remove that now
796 # directory; we need to remove that now
794 for pat in pats:
797 for pat in pats:
795 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
798 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
796 newpats.append(pat.replace(lfutil.shortname, b''))
799 newpats.append(pat.replace(lfutil.shortname, b''))
797 else:
800 else:
798 newpats.append(pat)
801 newpats.append(pat)
799 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
802 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
800 m = copy.copy(match)
803 m = copy.copy(match)
801 lfile = lambda f: lfutil.standin(f) in manifest
804 lfile = lambda f: lfutil.standin(f) in manifest
802 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
805 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
803 m._fileset = set(m._files)
806 m._fileset = set(m._files)
804 origmatchfn = m.matchfn
807 origmatchfn = m.matchfn
805
808
806 def matchfn(f):
809 def matchfn(f):
807 lfile = lfutil.splitstandin(f)
810 lfile = lfutil.splitstandin(f)
808 return (
811 return (
809 lfile is not None
812 lfile is not None
810 and (f in manifest)
813 and (f in manifest)
811 and origmatchfn(lfile)
814 and origmatchfn(lfile)
812 or None
815 or None
813 )
816 )
814
817
815 m.matchfn = matchfn
818 m.matchfn = matchfn
816 return m
819 return m
817
820
818 listpats = []
821 listpats = []
819 for pat in pats:
822 for pat in pats:
820 if matchmod.patkind(pat) is not None:
823 if matchmod.patkind(pat) is not None:
821 listpats.append(pat)
824 listpats.append(pat)
822 else:
825 else:
823 listpats.append(makestandin(pat))
826 listpats.append(makestandin(pat))
824
827
825 copiedfiles = []
828 copiedfiles = []
826
829
827 def overridecopyfile(orig, src, dest, *args, **kwargs):
830 def overridecopyfile(orig, src, dest, *args, **kwargs):
828 if lfutil.shortname in src and dest.startswith(
831 if lfutil.shortname in src and dest.startswith(
829 repo.wjoin(lfutil.shortname)
832 repo.wjoin(lfutil.shortname)
830 ):
833 ):
831 destlfile = dest.replace(lfutil.shortname, b'')
834 destlfile = dest.replace(lfutil.shortname, b'')
832 if not opts[b'force'] and os.path.exists(destlfile):
835 if not opts[b'force'] and os.path.exists(destlfile):
833 raise IOError(
836 raise IOError(
834 b'', _(b'destination largefile already exists')
837 b'', _(b'destination largefile already exists')
835 )
838 )
836 copiedfiles.append((src, dest))
839 copiedfiles.append((src, dest))
837 orig(src, dest, *args, **kwargs)
840 orig(src, dest, *args, **kwargs)
838
841
839 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
842 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
840 with extensions.wrappedfunction(scmutil, b'match', overridematch):
843 with extensions.wrappedfunction(scmutil, b'match', overridematch):
841 result += orig(ui, repo, listpats, opts, rename)
844 result += orig(ui, repo, listpats, opts, rename)
842
845
843 lfdirstate = lfutil.openlfdirstate(ui, repo)
846 lfdirstate = lfutil.openlfdirstate(ui, repo)
844 for (src, dest) in copiedfiles:
847 for (src, dest) in copiedfiles:
845 if lfutil.shortname in src and dest.startswith(
848 if lfutil.shortname in src and dest.startswith(
846 repo.wjoin(lfutil.shortname)
849 repo.wjoin(lfutil.shortname)
847 ):
850 ):
848 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
851 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
849 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
852 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
850 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
853 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
851 if not os.path.isdir(destlfiledir):
854 if not os.path.isdir(destlfiledir):
852 os.makedirs(destlfiledir)
855 os.makedirs(destlfiledir)
853 if rename:
856 if rename:
854 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
857 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
855
858
856 # The file is gone, but this deletes any empty parent
859 # The file is gone, but this deletes any empty parent
857 # directories as a side-effect.
860 # directories as a side-effect.
858 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
861 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
859 lfdirstate.remove(srclfile)
862 lfdirstate.remove(srclfile)
860 else:
863 else:
861 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
864 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
862
865
863 lfdirstate.add(destlfile)
866 lfdirstate.add(destlfile)
864 lfdirstate.write()
867 lfdirstate.write()
865 except error.Abort as e:
868 except error.Abort as e:
866 if e.message != _(b'no files to copy'):
869 if e.message != _(b'no files to copy'):
867 raise e
870 raise e
868 else:
871 else:
869 nolfiles = True
872 nolfiles = True
870 finally:
873 finally:
871 wlock.release()
874 wlock.release()
872
875
873 if nolfiles and nonormalfiles:
876 if nolfiles and nonormalfiles:
874 raise error.Abort(_(b'no files to copy'))
877 raise error.Abort(_(b'no files to copy'))
875
878
876 return result
879 return result
877
880
878
881
879 # When the user calls revert, we have to be careful to not revert any
882 # When the user calls revert, we have to be careful to not revert any
880 # changes to other largefiles accidentally. This means we have to keep
883 # changes to other largefiles accidentally. This means we have to keep
881 # track of the largefiles that are being reverted so we only pull down
884 # track of the largefiles that are being reverted so we only pull down
882 # the necessary largefiles.
885 # the necessary largefiles.
883 #
886 #
884 # Standins are only updated (to match the hash of largefiles) before
887 # Standins are only updated (to match the hash of largefiles) before
885 # commits. Update the standins then run the original revert, changing
888 # commits. Update the standins then run the original revert, changing
886 # the matcher to hit standins instead of largefiles. Based on the
889 # the matcher to hit standins instead of largefiles. Based on the
887 # resulting standins update the largefiles.
890 # resulting standins update the largefiles.
888 @eh.wrapfunction(cmdutil, b'revert')
891 @eh.wrapfunction(cmdutil, b'revert')
889 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
892 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
890 # Because we put the standins in a bad state (by updating them)
893 # Because we put the standins in a bad state (by updating them)
891 # and then return them to a correct state we need to lock to
894 # and then return them to a correct state we need to lock to
892 # prevent others from changing them in their incorrect state.
895 # prevent others from changing them in their incorrect state.
893 with repo.wlock():
896 with repo.wlock():
894 lfdirstate = lfutil.openlfdirstate(ui, repo)
897 lfdirstate = lfutil.openlfdirstate(ui, repo)
895 s = lfutil.lfdirstatestatus(lfdirstate, repo)
898 s = lfutil.lfdirstatestatus(lfdirstate, repo)
896 lfdirstate.write()
899 lfdirstate.write()
897 for lfile in s.modified:
900 for lfile in s.modified:
898 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
901 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
899 for lfile in s.deleted:
902 for lfile in s.deleted:
900 fstandin = lfutil.standin(lfile)
903 fstandin = lfutil.standin(lfile)
901 if repo.wvfs.exists(fstandin):
904 if repo.wvfs.exists(fstandin):
902 repo.wvfs.unlink(fstandin)
905 repo.wvfs.unlink(fstandin)
903
906
904 oldstandins = lfutil.getstandinsstate(repo)
907 oldstandins = lfutil.getstandinsstate(repo)
905
908
906 def overridematch(
909 def overridematch(
907 orig,
910 orig,
908 mctx,
911 mctx,
909 pats=(),
912 pats=(),
910 opts=None,
913 opts=None,
911 globbed=False,
914 globbed=False,
912 default=b'relpath',
915 default=b'relpath',
913 badfn=None,
916 badfn=None,
914 ):
917 ):
915 if opts is None:
918 if opts is None:
916 opts = {}
919 opts = {}
917 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
920 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
918 m = copy.copy(match)
921 m = copy.copy(match)
919
922
920 # revert supports recursing into subrepos, and though largefiles
923 # revert supports recursing into subrepos, and though largefiles
921 # currently doesn't work correctly in that case, this match is
924 # currently doesn't work correctly in that case, this match is
922 # called, so the lfdirstate above may not be the correct one for
925 # called, so the lfdirstate above may not be the correct one for
923 # this invocation of match.
926 # this invocation of match.
924 lfdirstate = lfutil.openlfdirstate(
927 lfdirstate = lfutil.openlfdirstate(
925 mctx.repo().ui, mctx.repo(), False
928 mctx.repo().ui, mctx.repo(), False
926 )
929 )
927
930
928 wctx = repo[None]
931 wctx = repo[None]
929 matchfiles = []
932 matchfiles = []
930 for f in m._files:
933 for f in m._files:
931 standin = lfutil.standin(f)
934 standin = lfutil.standin(f)
932 if standin in ctx or standin in mctx:
935 if standin in ctx or standin in mctx:
933 matchfiles.append(standin)
936 matchfiles.append(standin)
934 elif standin in wctx or lfdirstate[f] == b'r':
937 elif standin in wctx or lfdirstate[f] == b'r':
935 continue
938 continue
936 else:
939 else:
937 matchfiles.append(f)
940 matchfiles.append(f)
938 m._files = matchfiles
941 m._files = matchfiles
939 m._fileset = set(m._files)
942 m._fileset = set(m._files)
940 origmatchfn = m.matchfn
943 origmatchfn = m.matchfn
941
944
942 def matchfn(f):
945 def matchfn(f):
943 lfile = lfutil.splitstandin(f)
946 lfile = lfutil.splitstandin(f)
944 if lfile is not None:
947 if lfile is not None:
945 return origmatchfn(lfile) and (f in ctx or f in mctx)
948 return origmatchfn(lfile) and (f in ctx or f in mctx)
946 return origmatchfn(f)
949 return origmatchfn(f)
947
950
948 m.matchfn = matchfn
951 m.matchfn = matchfn
949 return m
952 return m
950
953
951 with extensions.wrappedfunction(scmutil, b'match', overridematch):
954 with extensions.wrappedfunction(scmutil, b'match', overridematch):
952 orig(ui, repo, ctx, *pats, **opts)
955 orig(ui, repo, ctx, *pats, **opts)
953
956
954 newstandins = lfutil.getstandinsstate(repo)
957 newstandins = lfutil.getstandinsstate(repo)
955 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
958 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
956 # lfdirstate should be 'normallookup'-ed for updated files,
959 # lfdirstate should be 'normallookup'-ed for updated files,
957 # because reverting doesn't touch dirstate for 'normal' files
960 # because reverting doesn't touch dirstate for 'normal' files
958 # when target revision is explicitly specified: in such case,
961 # when target revision is explicitly specified: in such case,
959 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
962 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
960 # of target (standin) file.
963 # of target (standin) file.
961 lfcommands.updatelfiles(
964 lfcommands.updatelfiles(
962 ui, repo, filelist, printmessage=False, normallookup=True
965 ui, repo, filelist, printmessage=False, normallookup=True
963 )
966 )
964
967
965
968
966 # after pulling changesets, we need to take some extra care to get
969 # after pulling changesets, we need to take some extra care to get
967 # largefiles updated remotely
970 # largefiles updated remotely
968 @eh.wrapcommand(
971 @eh.wrapcommand(
969 b'pull',
972 b'pull',
970 opts=[
973 opts=[
971 (
974 (
972 b'',
975 b'',
973 b'all-largefiles',
976 b'all-largefiles',
974 None,
977 None,
975 _(b'download all pulled versions of largefiles (DEPRECATED)'),
978 _(b'download all pulled versions of largefiles (DEPRECATED)'),
976 ),
979 ),
977 (
980 (
978 b'',
981 b'',
979 b'lfrev',
982 b'lfrev',
980 [],
983 [],
981 _(b'download largefiles for these revisions'),
984 _(b'download largefiles for these revisions'),
982 _(b'REV'),
985 _(b'REV'),
983 ),
986 ),
984 ],
987 ],
985 )
988 )
986 def overridepull(orig, ui, repo, source=None, **opts):
989 def overridepull(orig, ui, repo, source=None, **opts):
987 revsprepull = len(repo)
990 revsprepull = len(repo)
988 if not source:
991 if not source:
989 source = b'default'
992 source = b'default'
990 repo.lfpullsource = source
993 repo.lfpullsource = source
991 result = orig(ui, repo, source, **opts)
994 result = orig(ui, repo, source, **opts)
992 revspostpull = len(repo)
995 revspostpull = len(repo)
993 lfrevs = opts.get('lfrev', [])
996 lfrevs = opts.get('lfrev', [])
994 if opts.get('all_largefiles'):
997 if opts.get('all_largefiles'):
995 lfrevs.append(b'pulled()')
998 lfrevs.append(b'pulled()')
996 if lfrevs and revspostpull > revsprepull:
999 if lfrevs and revspostpull > revsprepull:
997 numcached = 0
1000 numcached = 0
998 repo.firstpulled = revsprepull # for pulled() revset expression
1001 repo.firstpulled = revsprepull # for pulled() revset expression
999 try:
1002 try:
1000 for rev in scmutil.revrange(repo, lfrevs):
1003 for rev in scmutil.revrange(repo, lfrevs):
1001 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1004 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1002 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1005 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1003 numcached += len(cached)
1006 numcached += len(cached)
1004 finally:
1007 finally:
1005 del repo.firstpulled
1008 del repo.firstpulled
1006 ui.status(_(b"%d largefiles cached\n") % numcached)
1009 ui.status(_(b"%d largefiles cached\n") % numcached)
1007 return result
1010 return result
1008
1011
1009
1012
1010 @eh.wrapcommand(
1013 @eh.wrapcommand(
1011 b'push',
1014 b'push',
1012 opts=[
1015 opts=[
1013 (
1016 (
1014 b'',
1017 b'',
1015 b'lfrev',
1018 b'lfrev',
1016 [],
1019 [],
1017 _(b'upload largefiles for these revisions'),
1020 _(b'upload largefiles for these revisions'),
1018 _(b'REV'),
1021 _(b'REV'),
1019 )
1022 )
1020 ],
1023 ],
1021 )
1024 )
1022 def overridepush(orig, ui, repo, *args, **kwargs):
1025 def overridepush(orig, ui, repo, *args, **kwargs):
1023 """Override push command and store --lfrev parameters in opargs"""
1026 """Override push command and store --lfrev parameters in opargs"""
1024 lfrevs = kwargs.pop('lfrev', None)
1027 lfrevs = kwargs.pop('lfrev', None)
1025 if lfrevs:
1028 if lfrevs:
1026 opargs = kwargs.setdefault('opargs', {})
1029 opargs = kwargs.setdefault('opargs', {})
1027 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1030 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1028 return orig(ui, repo, *args, **kwargs)
1031 return orig(ui, repo, *args, **kwargs)
1029
1032
1030
1033
1031 @eh.wrapfunction(exchange, b'pushoperation')
1034 @eh.wrapfunction(exchange, b'pushoperation')
1032 def exchangepushoperation(orig, *args, **kwargs):
1035 def exchangepushoperation(orig, *args, **kwargs):
1033 """Override pushoperation constructor and store lfrevs parameter"""
1036 """Override pushoperation constructor and store lfrevs parameter"""
1034 lfrevs = kwargs.pop('lfrevs', None)
1037 lfrevs = kwargs.pop('lfrevs', None)
1035 pushop = orig(*args, **kwargs)
1038 pushop = orig(*args, **kwargs)
1036 pushop.lfrevs = lfrevs
1039 pushop.lfrevs = lfrevs
1037 return pushop
1040 return pushop
1038
1041
1039
1042
1040 @eh.revsetpredicate(b'pulled()')
1043 @eh.revsetpredicate(b'pulled()')
1041 def pulledrevsetsymbol(repo, subset, x):
1044 def pulledrevsetsymbol(repo, subset, x):
1042 """Changesets that just has been pulled.
1045 """Changesets that just has been pulled.
1043
1046
1044 Only available with largefiles from pull --lfrev expressions.
1047 Only available with largefiles from pull --lfrev expressions.
1045
1048
1046 .. container:: verbose
1049 .. container:: verbose
1047
1050
1048 Some examples:
1051 Some examples:
1049
1052
1050 - pull largefiles for all new changesets::
1053 - pull largefiles for all new changesets::
1051
1054
1052 hg pull -lfrev "pulled()"
1055 hg pull -lfrev "pulled()"
1053
1056
1054 - pull largefiles for all new branch heads::
1057 - pull largefiles for all new branch heads::
1055
1058
1056 hg pull -lfrev "head(pulled()) and not closed()"
1059 hg pull -lfrev "head(pulled()) and not closed()"
1057
1060
1058 """
1061 """
1059
1062
1060 try:
1063 try:
1061 firstpulled = repo.firstpulled
1064 firstpulled = repo.firstpulled
1062 except AttributeError:
1065 except AttributeError:
1063 raise error.Abort(_(b"pulled() only available in --lfrev"))
1066 raise error.Abort(_(b"pulled() only available in --lfrev"))
1064 return smartset.baseset([r for r in subset if r >= firstpulled])
1067 return smartset.baseset([r for r in subset if r >= firstpulled])
1065
1068
1066
1069
1067 @eh.wrapcommand(
1070 @eh.wrapcommand(
1068 b'clone',
1071 b'clone',
1069 opts=[
1072 opts=[
1070 (
1073 (
1071 b'',
1074 b'',
1072 b'all-largefiles',
1075 b'all-largefiles',
1073 None,
1076 None,
1074 _(b'download all versions of all largefiles'),
1077 _(b'download all versions of all largefiles'),
1075 )
1078 )
1076 ],
1079 ],
1077 )
1080 )
1078 def overrideclone(orig, ui, source, dest=None, **opts):
1081 def overrideclone(orig, ui, source, dest=None, **opts):
1079 d = dest
1082 d = dest
1080 if d is None:
1083 if d is None:
1081 d = hg.defaultdest(source)
1084 d = hg.defaultdest(source)
1082 if opts.get('all_largefiles') and not hg.islocal(d):
1085 if opts.get('all_largefiles') and not hg.islocal(d):
1083 raise error.Abort(
1086 raise error.Abort(
1084 _(b'--all-largefiles is incompatible with non-local destination %s')
1087 _(b'--all-largefiles is incompatible with non-local destination %s')
1085 % d
1088 % d
1086 )
1089 )
1087
1090
1088 return orig(ui, source, dest, **opts)
1091 return orig(ui, source, dest, **opts)
1089
1092
1090
1093
1091 @eh.wrapfunction(hg, b'clone')
1094 @eh.wrapfunction(hg, b'clone')
1092 def hgclone(orig, ui, opts, *args, **kwargs):
1095 def hgclone(orig, ui, opts, *args, **kwargs):
1093 result = orig(ui, opts, *args, **kwargs)
1096 result = orig(ui, opts, *args, **kwargs)
1094
1097
1095 if result is not None:
1098 if result is not None:
1096 sourcerepo, destrepo = result
1099 sourcerepo, destrepo = result
1097 repo = destrepo.local()
1100 repo = destrepo.local()
1098
1101
1099 # When cloning to a remote repo (like through SSH), no repo is available
1102 # When cloning to a remote repo (like through SSH), no repo is available
1100 # from the peer. Therefore the largefiles can't be downloaded and the
1103 # from the peer. Therefore the largefiles can't be downloaded and the
1101 # hgrc can't be updated.
1104 # hgrc can't be updated.
1102 if not repo:
1105 if not repo:
1103 return result
1106 return result
1104
1107
1105 # Caching is implicitly limited to 'rev' option, since the dest repo was
1108 # Caching is implicitly limited to 'rev' option, since the dest repo was
1106 # truncated at that point. The user may expect a download count with
1109 # truncated at that point. The user may expect a download count with
1107 # this option, so attempt whether or not this is a largefile repo.
1110 # this option, so attempt whether or not this is a largefile repo.
1108 if opts.get(b'all_largefiles'):
1111 if opts.get(b'all_largefiles'):
1109 success, missing = lfcommands.downloadlfiles(ui, repo)
1112 success, missing = lfcommands.downloadlfiles(ui, repo)
1110
1113
1111 if missing != 0:
1114 if missing != 0:
1112 return None
1115 return None
1113
1116
1114 return result
1117 return result
1115
1118
1116
1119
1117 @eh.wrapcommand(b'rebase', extension=b'rebase')
1120 @eh.wrapcommand(b'rebase', extension=b'rebase')
1118 def overriderebasecmd(orig, ui, repo, **opts):
1121 def overriderebasecmd(orig, ui, repo, **opts):
1119 if not util.safehasattr(repo, b'_largefilesenabled'):
1122 if not util.safehasattr(repo, b'_largefilesenabled'):
1120 return orig(ui, repo, **opts)
1123 return orig(ui, repo, **opts)
1121
1124
1122 resuming = opts.get('continue')
1125 resuming = opts.get('continue')
1123 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1126 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1124 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1127 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1125 try:
1128 try:
1126 with ui.configoverride(
1129 with ui.configoverride(
1127 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1130 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1128 ):
1131 ):
1129 return orig(ui, repo, **opts)
1132 return orig(ui, repo, **opts)
1130 finally:
1133 finally:
1131 repo._lfstatuswriters.pop()
1134 repo._lfstatuswriters.pop()
1132 repo._lfcommithooks.pop()
1135 repo._lfcommithooks.pop()
1133
1136
1134
1137
1135 @eh.extsetup
1138 @eh.extsetup
1136 def overriderebase(ui):
1139 def overriderebase(ui):
1137 try:
1140 try:
1138 rebase = extensions.find(b'rebase')
1141 rebase = extensions.find(b'rebase')
1139 except KeyError:
1142 except KeyError:
1140 pass
1143 pass
1141 else:
1144 else:
1142
1145
1143 def _dorebase(orig, *args, **kwargs):
1146 def _dorebase(orig, *args, **kwargs):
1144 kwargs['inmemory'] = False
1147 kwargs['inmemory'] = False
1145 return orig(*args, **kwargs)
1148 return orig(*args, **kwargs)
1146
1149
1147 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1150 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1148
1151
1149
1152
1150 @eh.wrapcommand(b'archive')
1153 @eh.wrapcommand(b'archive')
1151 def overridearchivecmd(orig, ui, repo, dest, **opts):
1154 def overridearchivecmd(orig, ui, repo, dest, **opts):
1152 with lfstatus(repo.unfiltered()):
1155 with lfstatus(repo.unfiltered()):
1153 return orig(ui, repo.unfiltered(), dest, **opts)
1156 return orig(ui, repo.unfiltered(), dest, **opts)
1154
1157
1155
1158
1156 @eh.wrapfunction(webcommands, b'archive')
1159 @eh.wrapfunction(webcommands, b'archive')
1157 def hgwebarchive(orig, web):
1160 def hgwebarchive(orig, web):
1158 with lfstatus(web.repo):
1161 with lfstatus(web.repo):
1159 return orig(web)
1162 return orig(web)
1160
1163
1161
1164
1162 @eh.wrapfunction(archival, b'archive')
1165 @eh.wrapfunction(archival, b'archive')
1163 def overridearchive(
1166 def overridearchive(
1164 orig,
1167 orig,
1165 repo,
1168 repo,
1166 dest,
1169 dest,
1167 node,
1170 node,
1168 kind,
1171 kind,
1169 decode=True,
1172 decode=True,
1170 match=None,
1173 match=None,
1171 prefix=b'',
1174 prefix=b'',
1172 mtime=None,
1175 mtime=None,
1173 subrepos=None,
1176 subrepos=None,
1174 ):
1177 ):
1175 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1178 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1176 # unfiltered repo's attr, so check that as well.
1179 # unfiltered repo's attr, so check that as well.
1177 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1180 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1178 return orig(
1181 return orig(
1179 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1182 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1180 )
1183 )
1181
1184
1182 # No need to lock because we are only reading history and
1185 # No need to lock because we are only reading history and
1183 # largefile caches, neither of which are modified.
1186 # largefile caches, neither of which are modified.
1184 if node is not None:
1187 if node is not None:
1185 lfcommands.cachelfiles(repo.ui, repo, node)
1188 lfcommands.cachelfiles(repo.ui, repo, node)
1186
1189
1187 if kind not in archival.archivers:
1190 if kind not in archival.archivers:
1188 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1191 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1189
1192
1190 ctx = repo[node]
1193 ctx = repo[node]
1191
1194
1192 if kind == b'files':
1195 if kind == b'files':
1193 if prefix:
1196 if prefix:
1194 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1197 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1195 else:
1198 else:
1196 prefix = archival.tidyprefix(dest, kind, prefix)
1199 prefix = archival.tidyprefix(dest, kind, prefix)
1197
1200
1198 def write(name, mode, islink, getdata):
1201 def write(name, mode, islink, getdata):
1199 if match and not match(name):
1202 if match and not match(name):
1200 return
1203 return
1201 data = getdata()
1204 data = getdata()
1202 if decode:
1205 if decode:
1203 data = repo.wwritedata(name, data)
1206 data = repo.wwritedata(name, data)
1204 archiver.addfile(prefix + name, mode, islink, data)
1207 archiver.addfile(prefix + name, mode, islink, data)
1205
1208
1206 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1209 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1207
1210
1208 if repo.ui.configbool(b"ui", b"archivemeta"):
1211 if repo.ui.configbool(b"ui", b"archivemeta"):
1209 write(
1212 write(
1210 b'.hg_archival.txt',
1213 b'.hg_archival.txt',
1211 0o644,
1214 0o644,
1212 False,
1215 False,
1213 lambda: archival.buildmetadata(ctx),
1216 lambda: archival.buildmetadata(ctx),
1214 )
1217 )
1215
1218
1216 for f in ctx:
1219 for f in ctx:
1217 ff = ctx.flags(f)
1220 ff = ctx.flags(f)
1218 getdata = ctx[f].data
1221 getdata = ctx[f].data
1219 lfile = lfutil.splitstandin(f)
1222 lfile = lfutil.splitstandin(f)
1220 if lfile is not None:
1223 if lfile is not None:
1221 if node is not None:
1224 if node is not None:
1222 path = lfutil.findfile(repo, getdata().strip())
1225 path = lfutil.findfile(repo, getdata().strip())
1223
1226
1224 if path is None:
1227 if path is None:
1225 raise error.Abort(
1228 raise error.Abort(
1226 _(
1229 _(
1227 b'largefile %s not found in repo store or system cache'
1230 b'largefile %s not found in repo store or system cache'
1228 )
1231 )
1229 % lfile
1232 % lfile
1230 )
1233 )
1231 else:
1234 else:
1232 path = lfile
1235 path = lfile
1233
1236
1234 f = lfile
1237 f = lfile
1235
1238
1236 getdata = lambda: util.readfile(path)
1239 getdata = lambda: util.readfile(path)
1237 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1240 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1238
1241
1239 if subrepos:
1242 if subrepos:
1240 for subpath in sorted(ctx.substate):
1243 for subpath in sorted(ctx.substate):
1241 sub = ctx.workingsub(subpath)
1244 sub = ctx.workingsub(subpath)
1242 submatch = matchmod.subdirmatcher(subpath, match)
1245 submatch = matchmod.subdirmatcher(subpath, match)
1243 subprefix = prefix + subpath + b'/'
1246 subprefix = prefix + subpath + b'/'
1244
1247
1245 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1248 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1246 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1249 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1247 # allow only hgsubrepos to set this, instead of the current scheme
1250 # allow only hgsubrepos to set this, instead of the current scheme
1248 # where the parent sets this for the child.
1251 # where the parent sets this for the child.
1249 with (
1252 with (
1250 util.safehasattr(sub, '_repo')
1253 util.safehasattr(sub, '_repo')
1251 and lfstatus(sub._repo)
1254 and lfstatus(sub._repo)
1252 or util.nullcontextmanager()
1255 or util.nullcontextmanager()
1253 ):
1256 ):
1254 sub.archive(archiver, subprefix, submatch)
1257 sub.archive(archiver, subprefix, submatch)
1255
1258
1256 archiver.done()
1259 archiver.done()
1257
1260
1258
1261
1259 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1262 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1260 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1263 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1261 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1264 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1262 if not lfenabled or not repo._repo.lfstatus:
1265 if not lfenabled or not repo._repo.lfstatus:
1263 return orig(repo, archiver, prefix, match, decode)
1266 return orig(repo, archiver, prefix, match, decode)
1264
1267
1265 repo._get(repo._state + (b'hg',))
1268 repo._get(repo._state + (b'hg',))
1266 rev = repo._state[1]
1269 rev = repo._state[1]
1267 ctx = repo._repo[rev]
1270 ctx = repo._repo[rev]
1268
1271
1269 if ctx.node() is not None:
1272 if ctx.node() is not None:
1270 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1273 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1271
1274
1272 def write(name, mode, islink, getdata):
1275 def write(name, mode, islink, getdata):
1273 # At this point, the standin has been replaced with the largefile name,
1276 # At this point, the standin has been replaced with the largefile name,
1274 # so the normal matcher works here without the lfutil variants.
1277 # so the normal matcher works here without the lfutil variants.
1275 if match and not match(f):
1278 if match and not match(f):
1276 return
1279 return
1277 data = getdata()
1280 data = getdata()
1278 if decode:
1281 if decode:
1279 data = repo._repo.wwritedata(name, data)
1282 data = repo._repo.wwritedata(name, data)
1280
1283
1281 archiver.addfile(prefix + name, mode, islink, data)
1284 archiver.addfile(prefix + name, mode, islink, data)
1282
1285
1283 for f in ctx:
1286 for f in ctx:
1284 ff = ctx.flags(f)
1287 ff = ctx.flags(f)
1285 getdata = ctx[f].data
1288 getdata = ctx[f].data
1286 lfile = lfutil.splitstandin(f)
1289 lfile = lfutil.splitstandin(f)
1287 if lfile is not None:
1290 if lfile is not None:
1288 if ctx.node() is not None:
1291 if ctx.node() is not None:
1289 path = lfutil.findfile(repo._repo, getdata().strip())
1292 path = lfutil.findfile(repo._repo, getdata().strip())
1290
1293
1291 if path is None:
1294 if path is None:
1292 raise error.Abort(
1295 raise error.Abort(
1293 _(
1296 _(
1294 b'largefile %s not found in repo store or system cache'
1297 b'largefile %s not found in repo store or system cache'
1295 )
1298 )
1296 % lfile
1299 % lfile
1297 )
1300 )
1298 else:
1301 else:
1299 path = lfile
1302 path = lfile
1300
1303
1301 f = lfile
1304 f = lfile
1302
1305
1303 getdata = lambda: util.readfile(os.path.join(prefix, path))
1306 getdata = lambda: util.readfile(os.path.join(prefix, path))
1304
1307
1305 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1308 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1306
1309
1307 for subpath in sorted(ctx.substate):
1310 for subpath in sorted(ctx.substate):
1308 sub = ctx.workingsub(subpath)
1311 sub = ctx.workingsub(subpath)
1309 submatch = matchmod.subdirmatcher(subpath, match)
1312 submatch = matchmod.subdirmatcher(subpath, match)
1310 subprefix = prefix + subpath + b'/'
1313 subprefix = prefix + subpath + b'/'
1311 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1314 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1312 # infer and possibly set lfstatus at the top of this function. That
1315 # infer and possibly set lfstatus at the top of this function. That
1313 # would allow only hgsubrepos to set this, instead of the current scheme
1316 # would allow only hgsubrepos to set this, instead of the current scheme
1314 # where the parent sets this for the child.
1317 # where the parent sets this for the child.
1315 with (
1318 with (
1316 util.safehasattr(sub, '_repo')
1319 util.safehasattr(sub, '_repo')
1317 and lfstatus(sub._repo)
1320 and lfstatus(sub._repo)
1318 or util.nullcontextmanager()
1321 or util.nullcontextmanager()
1319 ):
1322 ):
1320 sub.archive(archiver, subprefix, submatch, decode)
1323 sub.archive(archiver, subprefix, submatch, decode)
1321
1324
1322
1325
1323 # If a largefile is modified, the change is not reflected in its
1326 # If a largefile is modified, the change is not reflected in its
1324 # standin until a commit. cmdutil.bailifchanged() raises an exception
1327 # standin until a commit. cmdutil.bailifchanged() raises an exception
1325 # if the repo has uncommitted changes. Wrap it to also check if
1328 # if the repo has uncommitted changes. Wrap it to also check if
1326 # largefiles were changed. This is used by bisect, backout and fetch.
1329 # largefiles were changed. This is used by bisect, backout and fetch.
1327 @eh.wrapfunction(cmdutil, b'bailifchanged')
1330 @eh.wrapfunction(cmdutil, b'bailifchanged')
1328 def overridebailifchanged(orig, repo, *args, **kwargs):
1331 def overridebailifchanged(orig, repo, *args, **kwargs):
1329 orig(repo, *args, **kwargs)
1332 orig(repo, *args, **kwargs)
1330 with lfstatus(repo):
1333 with lfstatus(repo):
1331 s = repo.status()
1334 s = repo.status()
1332 if s.modified or s.added or s.removed or s.deleted:
1335 if s.modified or s.added or s.removed or s.deleted:
1333 raise error.Abort(_(b'uncommitted changes'))
1336 raise error.Abort(_(b'uncommitted changes'))
1334
1337
1335
1338
1336 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1339 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1337 def postcommitstatus(orig, repo, *args, **kwargs):
1340 def postcommitstatus(orig, repo, *args, **kwargs):
1338 with lfstatus(repo):
1341 with lfstatus(repo):
1339 return orig(repo, *args, **kwargs)
1342 return orig(repo, *args, **kwargs)
1340
1343
1341
1344
1342 @eh.wrapfunction(cmdutil, b'forget')
1345 @eh.wrapfunction(cmdutil, b'forget')
1343 def cmdutilforget(
1346 def cmdutilforget(
1344 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1347 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1345 ):
1348 ):
1346 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1349 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1347 bad, forgot = orig(
1350 bad, forgot = orig(
1348 ui,
1351 ui,
1349 repo,
1352 repo,
1350 normalmatcher,
1353 normalmatcher,
1351 prefix,
1354 prefix,
1352 uipathfn,
1355 uipathfn,
1353 explicitonly,
1356 explicitonly,
1354 dryrun,
1357 dryrun,
1355 interactive,
1358 interactive,
1356 )
1359 )
1357 m = composelargefilematcher(match, repo[None].manifest())
1360 m = composelargefilematcher(match, repo[None].manifest())
1358
1361
1359 with lfstatus(repo):
1362 with lfstatus(repo):
1360 s = repo.status(match=m, clean=True)
1363 s = repo.status(match=m, clean=True)
1361 manifest = repo[None].manifest()
1364 manifest = repo[None].manifest()
1362 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1365 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1363 forget = [f for f in forget if lfutil.standin(f) in manifest]
1366 forget = [f for f in forget if lfutil.standin(f) in manifest]
1364
1367
1365 for f in forget:
1368 for f in forget:
1366 fstandin = lfutil.standin(f)
1369 fstandin = lfutil.standin(f)
1367 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1370 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1368 ui.warn(
1371 ui.warn(
1369 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1372 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1370 )
1373 )
1371 bad.append(f)
1374 bad.append(f)
1372
1375
1373 for f in forget:
1376 for f in forget:
1374 if ui.verbose or not m.exact(f):
1377 if ui.verbose or not m.exact(f):
1375 ui.status(_(b'removing %s\n') % uipathfn(f))
1378 ui.status(_(b'removing %s\n') % uipathfn(f))
1376
1379
1377 # Need to lock because standin files are deleted then removed from the
1380 # Need to lock because standin files are deleted then removed from the
1378 # repository and we could race in-between.
1381 # repository and we could race in-between.
1379 with repo.wlock():
1382 with repo.wlock():
1380 lfdirstate = lfutil.openlfdirstate(ui, repo)
1383 lfdirstate = lfutil.openlfdirstate(ui, repo)
1381 for f in forget:
1384 for f in forget:
1382 if lfdirstate[f] == b'a':
1385 if lfdirstate[f] == b'a':
1383 lfdirstate.drop(f)
1386 lfdirstate.drop(f)
1384 else:
1387 else:
1385 lfdirstate.remove(f)
1388 lfdirstate.remove(f)
1386 lfdirstate.write()
1389 lfdirstate.write()
1387 standins = [lfutil.standin(f) for f in forget]
1390 standins = [lfutil.standin(f) for f in forget]
1388 for f in standins:
1391 for f in standins:
1389 repo.wvfs.unlinkpath(f, ignoremissing=True)
1392 repo.wvfs.unlinkpath(f, ignoremissing=True)
1390 rejected = repo[None].forget(standins)
1393 rejected = repo[None].forget(standins)
1391
1394
1392 bad.extend(f for f in rejected if f in m.files())
1395 bad.extend(f for f in rejected if f in m.files())
1393 forgot.extend(f for f in forget if f not in rejected)
1396 forgot.extend(f for f in forget if f not in rejected)
1394 return bad, forgot
1397 return bad, forgot
1395
1398
1396
1399
1397 def _getoutgoings(repo, other, missing, addfunc):
1400 def _getoutgoings(repo, other, missing, addfunc):
1398 """get pairs of filename and largefile hash in outgoing revisions
1401 """get pairs of filename and largefile hash in outgoing revisions
1399 in 'missing'.
1402 in 'missing'.
1400
1403
1401 largefiles already existing on 'other' repository are ignored.
1404 largefiles already existing on 'other' repository are ignored.
1402
1405
1403 'addfunc' is invoked with each unique pairs of filename and
1406 'addfunc' is invoked with each unique pairs of filename and
1404 largefile hash value.
1407 largefile hash value.
1405 """
1408 """
1406 knowns = set()
1409 knowns = set()
1407 lfhashes = set()
1410 lfhashes = set()
1408
1411
1409 def dedup(fn, lfhash):
1412 def dedup(fn, lfhash):
1410 k = (fn, lfhash)
1413 k = (fn, lfhash)
1411 if k not in knowns:
1414 if k not in knowns:
1412 knowns.add(k)
1415 knowns.add(k)
1413 lfhashes.add(lfhash)
1416 lfhashes.add(lfhash)
1414
1417
1415 lfutil.getlfilestoupload(repo, missing, dedup)
1418 lfutil.getlfilestoupload(repo, missing, dedup)
1416 if lfhashes:
1419 if lfhashes:
1417 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1420 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1418 for fn, lfhash in knowns:
1421 for fn, lfhash in knowns:
1419 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1422 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1420 addfunc(fn, lfhash)
1423 addfunc(fn, lfhash)
1421
1424
1422
1425
1423 def outgoinghook(ui, repo, other, opts, missing):
1426 def outgoinghook(ui, repo, other, opts, missing):
1424 if opts.pop(b'large', None):
1427 if opts.pop(b'large', None):
1425 lfhashes = set()
1428 lfhashes = set()
1426 if ui.debugflag:
1429 if ui.debugflag:
1427 toupload = {}
1430 toupload = {}
1428
1431
1429 def addfunc(fn, lfhash):
1432 def addfunc(fn, lfhash):
1430 if fn not in toupload:
1433 if fn not in toupload:
1431 toupload[fn] = []
1434 toupload[fn] = []
1432 toupload[fn].append(lfhash)
1435 toupload[fn].append(lfhash)
1433 lfhashes.add(lfhash)
1436 lfhashes.add(lfhash)
1434
1437
1435 def showhashes(fn):
1438 def showhashes(fn):
1436 for lfhash in sorted(toupload[fn]):
1439 for lfhash in sorted(toupload[fn]):
1437 ui.debug(b' %s\n' % lfhash)
1440 ui.debug(b' %s\n' % lfhash)
1438
1441
1439 else:
1442 else:
1440 toupload = set()
1443 toupload = set()
1441
1444
1442 def addfunc(fn, lfhash):
1445 def addfunc(fn, lfhash):
1443 toupload.add(fn)
1446 toupload.add(fn)
1444 lfhashes.add(lfhash)
1447 lfhashes.add(lfhash)
1445
1448
1446 def showhashes(fn):
1449 def showhashes(fn):
1447 pass
1450 pass
1448
1451
1449 _getoutgoings(repo, other, missing, addfunc)
1452 _getoutgoings(repo, other, missing, addfunc)
1450
1453
1451 if not toupload:
1454 if not toupload:
1452 ui.status(_(b'largefiles: no files to upload\n'))
1455 ui.status(_(b'largefiles: no files to upload\n'))
1453 else:
1456 else:
1454 ui.status(
1457 ui.status(
1455 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1458 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1456 )
1459 )
1457 for file in sorted(toupload):
1460 for file in sorted(toupload):
1458 ui.status(lfutil.splitstandin(file) + b'\n')
1461 ui.status(lfutil.splitstandin(file) + b'\n')
1459 showhashes(file)
1462 showhashes(file)
1460 ui.status(b'\n')
1463 ui.status(b'\n')
1461
1464
1462
1465
1463 @eh.wrapcommand(
1466 @eh.wrapcommand(
1464 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1467 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1465 )
1468 )
1466 def _outgoingcmd(orig, *args, **kwargs):
1469 def _outgoingcmd(orig, *args, **kwargs):
1467 # Nothing to do here other than add the extra help option- the hook above
1470 # Nothing to do here other than add the extra help option- the hook above
1468 # processes it.
1471 # processes it.
1469 return orig(*args, **kwargs)
1472 return orig(*args, **kwargs)
1470
1473
1471
1474
1472 def summaryremotehook(ui, repo, opts, changes):
1475 def summaryremotehook(ui, repo, opts, changes):
1473 largeopt = opts.get(b'large', False)
1476 largeopt = opts.get(b'large', False)
1474 if changes is None:
1477 if changes is None:
1475 if largeopt:
1478 if largeopt:
1476 return (False, True) # only outgoing check is needed
1479 return (False, True) # only outgoing check is needed
1477 else:
1480 else:
1478 return (False, False)
1481 return (False, False)
1479 elif largeopt:
1482 elif largeopt:
1480 url, branch, peer, outgoing = changes[1]
1483 url, branch, peer, outgoing = changes[1]
1481 if peer is None:
1484 if peer is None:
1482 # i18n: column positioning for "hg summary"
1485 # i18n: column positioning for "hg summary"
1483 ui.status(_(b'largefiles: (no remote repo)\n'))
1486 ui.status(_(b'largefiles: (no remote repo)\n'))
1484 return
1487 return
1485
1488
1486 toupload = set()
1489 toupload = set()
1487 lfhashes = set()
1490 lfhashes = set()
1488
1491
1489 def addfunc(fn, lfhash):
1492 def addfunc(fn, lfhash):
1490 toupload.add(fn)
1493 toupload.add(fn)
1491 lfhashes.add(lfhash)
1494 lfhashes.add(lfhash)
1492
1495
1493 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1496 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1494
1497
1495 if not toupload:
1498 if not toupload:
1496 # i18n: column positioning for "hg summary"
1499 # i18n: column positioning for "hg summary"
1497 ui.status(_(b'largefiles: (no files to upload)\n'))
1500 ui.status(_(b'largefiles: (no files to upload)\n'))
1498 else:
1501 else:
1499 # i18n: column positioning for "hg summary"
1502 # i18n: column positioning for "hg summary"
1500 ui.status(
1503 ui.status(
1501 _(b'largefiles: %d entities for %d files to upload\n')
1504 _(b'largefiles: %d entities for %d files to upload\n')
1502 % (len(lfhashes), len(toupload))
1505 % (len(lfhashes), len(toupload))
1503 )
1506 )
1504
1507
1505
1508
1506 @eh.wrapcommand(
1509 @eh.wrapcommand(
1507 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1510 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1508 )
1511 )
1509 def overridesummary(orig, ui, repo, *pats, **opts):
1512 def overridesummary(orig, ui, repo, *pats, **opts):
1510 with lfstatus(repo):
1513 with lfstatus(repo):
1511 orig(ui, repo, *pats, **opts)
1514 orig(ui, repo, *pats, **opts)
1512
1515
1513
1516
1514 @eh.wrapfunction(scmutil, b'addremove')
1517 @eh.wrapfunction(scmutil, b'addremove')
1515 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1518 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1516 if opts is None:
1519 if opts is None:
1517 opts = {}
1520 opts = {}
1518 if not lfutil.islfilesrepo(repo):
1521 if not lfutil.islfilesrepo(repo):
1519 return orig(repo, matcher, prefix, uipathfn, opts)
1522 return orig(repo, matcher, prefix, uipathfn, opts)
1520 # Get the list of missing largefiles so we can remove them
1523 # Get the list of missing largefiles so we can remove them
1521 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1524 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1522 unsure, s = lfdirstate.status(
1525 unsure, s = lfdirstate.status(
1523 matchmod.always(),
1526 matchmod.always(),
1524 subrepos=[],
1527 subrepos=[],
1525 ignored=False,
1528 ignored=False,
1526 clean=False,
1529 clean=False,
1527 unknown=False,
1530 unknown=False,
1528 )
1531 )
1529
1532
1530 # Call into the normal remove code, but the removing of the standin, we want
1533 # Call into the normal remove code, but the removing of the standin, we want
1531 # to have handled by original addremove. Monkey patching here makes sure
1534 # to have handled by original addremove. Monkey patching here makes sure
1532 # we don't remove the standin in the largefiles code, preventing a very
1535 # we don't remove the standin in the largefiles code, preventing a very
1533 # confused state later.
1536 # confused state later.
1534 if s.deleted:
1537 if s.deleted:
1535 m = copy.copy(matcher)
1538 m = copy.copy(matcher)
1536
1539
1537 # The m._files and m._map attributes are not changed to the deleted list
1540 # The m._files and m._map attributes are not changed to the deleted list
1538 # because that affects the m.exact() test, which in turn governs whether
1541 # because that affects the m.exact() test, which in turn governs whether
1539 # or not the file name is printed, and how. Simply limit the original
1542 # or not the file name is printed, and how. Simply limit the original
1540 # matches to those in the deleted status list.
1543 # matches to those in the deleted status list.
1541 matchfn = m.matchfn
1544 matchfn = m.matchfn
1542 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1545 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1543
1546
1544 removelargefiles(
1547 removelargefiles(
1545 repo.ui,
1548 repo.ui,
1546 repo,
1549 repo,
1547 True,
1550 True,
1548 m,
1551 m,
1549 uipathfn,
1552 uipathfn,
1550 opts.get(b'dry_run'),
1553 opts.get(b'dry_run'),
1551 **pycompat.strkwargs(opts)
1554 **pycompat.strkwargs(opts)
1552 )
1555 )
1553 # Call into the normal add code, and any files that *should* be added as
1556 # Call into the normal add code, and any files that *should* be added as
1554 # largefiles will be
1557 # largefiles will be
1555 added, bad = addlargefiles(
1558 added, bad = addlargefiles(
1556 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1559 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1557 )
1560 )
1558 # Now that we've handled largefiles, hand off to the original addremove
1561 # Now that we've handled largefiles, hand off to the original addremove
1559 # function to take care of the rest. Make sure it doesn't do anything with
1562 # function to take care of the rest. Make sure it doesn't do anything with
1560 # largefiles by passing a matcher that will ignore them.
1563 # largefiles by passing a matcher that will ignore them.
1561 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1564 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1562 return orig(repo, matcher, prefix, uipathfn, opts)
1565 return orig(repo, matcher, prefix, uipathfn, opts)
1563
1566
1564
1567
1565 # Calling purge with --all will cause the largefiles to be deleted.
1568 # Calling purge with --all will cause the largefiles to be deleted.
1566 # Override repo.status to prevent this from happening.
1569 # Override repo.status to prevent this from happening.
1567 @eh.wrapcommand(b'purge', extension=b'purge')
1570 @eh.wrapcommand(b'purge', extension=b'purge')
1568 def overridepurge(orig, ui, repo, *dirs, **opts):
1571 def overridepurge(orig, ui, repo, *dirs, **opts):
1569 # XXX Monkey patching a repoview will not work. The assigned attribute will
1572 # XXX Monkey patching a repoview will not work. The assigned attribute will
1570 # be set on the unfiltered repo, but we will only lookup attributes in the
1573 # be set on the unfiltered repo, but we will only lookup attributes in the
1571 # unfiltered repo if the lookup in the repoview object itself fails. As the
1574 # unfiltered repo if the lookup in the repoview object itself fails. As the
1572 # monkey patched method exists on the repoview class the lookup will not
1575 # monkey patched method exists on the repoview class the lookup will not
1573 # fail. As a result, the original version will shadow the monkey patched
1576 # fail. As a result, the original version will shadow the monkey patched
1574 # one, defeating the monkey patch.
1577 # one, defeating the monkey patch.
1575 #
1578 #
1576 # As a work around we use an unfiltered repo here. We should do something
1579 # As a work around we use an unfiltered repo here. We should do something
1577 # cleaner instead.
1580 # cleaner instead.
1578 repo = repo.unfiltered()
1581 repo = repo.unfiltered()
1579 oldstatus = repo.status
1582 oldstatus = repo.status
1580
1583
1581 def overridestatus(
1584 def overridestatus(
1582 node1=b'.',
1585 node1=b'.',
1583 node2=None,
1586 node2=None,
1584 match=None,
1587 match=None,
1585 ignored=False,
1588 ignored=False,
1586 clean=False,
1589 clean=False,
1587 unknown=False,
1590 unknown=False,
1588 listsubrepos=False,
1591 listsubrepos=False,
1589 ):
1592 ):
1590 r = oldstatus(
1593 r = oldstatus(
1591 node1, node2, match, ignored, clean, unknown, listsubrepos
1594 node1, node2, match, ignored, clean, unknown, listsubrepos
1592 )
1595 )
1593 lfdirstate = lfutil.openlfdirstate(ui, repo)
1596 lfdirstate = lfutil.openlfdirstate(ui, repo)
1594 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1597 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1595 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1598 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1596 return scmutil.status(
1599 return scmutil.status(
1597 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1600 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1598 )
1601 )
1599
1602
1600 repo.status = overridestatus
1603 repo.status = overridestatus
1601 orig(ui, repo, *dirs, **opts)
1604 orig(ui, repo, *dirs, **opts)
1602 repo.status = oldstatus
1605 repo.status = oldstatus
1603
1606
1604
1607
1605 @eh.wrapcommand(b'rollback')
1608 @eh.wrapcommand(b'rollback')
1606 def overriderollback(orig, ui, repo, **opts):
1609 def overriderollback(orig, ui, repo, **opts):
1607 with repo.wlock():
1610 with repo.wlock():
1608 before = repo.dirstate.parents()
1611 before = repo.dirstate.parents()
1609 orphans = {
1612 orphans = {
1610 f
1613 f
1611 for f in repo.dirstate
1614 for f in repo.dirstate
1612 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1615 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1613 }
1616 }
1614 result = orig(ui, repo, **opts)
1617 result = orig(ui, repo, **opts)
1615 after = repo.dirstate.parents()
1618 after = repo.dirstate.parents()
1616 if before == after:
1619 if before == after:
1617 return result # no need to restore standins
1620 return result # no need to restore standins
1618
1621
1619 pctx = repo[b'.']
1622 pctx = repo[b'.']
1620 for f in repo.dirstate:
1623 for f in repo.dirstate:
1621 if lfutil.isstandin(f):
1624 if lfutil.isstandin(f):
1622 orphans.discard(f)
1625 orphans.discard(f)
1623 if repo.dirstate[f] == b'r':
1626 if repo.dirstate[f] == b'r':
1624 repo.wvfs.unlinkpath(f, ignoremissing=True)
1627 repo.wvfs.unlinkpath(f, ignoremissing=True)
1625 elif f in pctx:
1628 elif f in pctx:
1626 fctx = pctx[f]
1629 fctx = pctx[f]
1627 repo.wwrite(f, fctx.data(), fctx.flags())
1630 repo.wwrite(f, fctx.data(), fctx.flags())
1628 else:
1631 else:
1629 # content of standin is not so important in 'a',
1632 # content of standin is not so important in 'a',
1630 # 'm' or 'n' (coming from the 2nd parent) cases
1633 # 'm' or 'n' (coming from the 2nd parent) cases
1631 lfutil.writestandin(repo, f, b'', False)
1634 lfutil.writestandin(repo, f, b'', False)
1632 for standin in orphans:
1635 for standin in orphans:
1633 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1636 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1634
1637
1635 lfdirstate = lfutil.openlfdirstate(ui, repo)
1638 lfdirstate = lfutil.openlfdirstate(ui, repo)
1636 orphans = set(lfdirstate)
1639 orphans = set(lfdirstate)
1637 lfiles = lfutil.listlfiles(repo)
1640 lfiles = lfutil.listlfiles(repo)
1638 for file in lfiles:
1641 for file in lfiles:
1639 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1642 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1640 orphans.discard(file)
1643 orphans.discard(file)
1641 for lfile in orphans:
1644 for lfile in orphans:
1642 lfdirstate.drop(lfile)
1645 lfdirstate.drop(lfile)
1643 lfdirstate.write()
1646 lfdirstate.write()
1644 return result
1647 return result
1645
1648
1646
1649
1647 @eh.wrapcommand(b'transplant', extension=b'transplant')
1650 @eh.wrapcommand(b'transplant', extension=b'transplant')
1648 def overridetransplant(orig, ui, repo, *revs, **opts):
1651 def overridetransplant(orig, ui, repo, *revs, **opts):
1649 resuming = opts.get('continue')
1652 resuming = opts.get('continue')
1650 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1653 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1651 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1654 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1652 try:
1655 try:
1653 result = orig(ui, repo, *revs, **opts)
1656 result = orig(ui, repo, *revs, **opts)
1654 finally:
1657 finally:
1655 repo._lfstatuswriters.pop()
1658 repo._lfstatuswriters.pop()
1656 repo._lfcommithooks.pop()
1659 repo._lfcommithooks.pop()
1657 return result
1660 return result
1658
1661
1659
1662
1660 @eh.wrapcommand(b'cat')
1663 @eh.wrapcommand(b'cat')
1661 def overridecat(orig, ui, repo, file1, *pats, **opts):
1664 def overridecat(orig, ui, repo, file1, *pats, **opts):
1662 opts = pycompat.byteskwargs(opts)
1665 opts = pycompat.byteskwargs(opts)
1663 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1666 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1664 err = 1
1667 err = 1
1665 notbad = set()
1668 notbad = set()
1666 m = scmutil.match(ctx, (file1,) + pats, opts)
1669 m = scmutil.match(ctx, (file1,) + pats, opts)
1667 origmatchfn = m.matchfn
1670 origmatchfn = m.matchfn
1668
1671
1669 def lfmatchfn(f):
1672 def lfmatchfn(f):
1670 if origmatchfn(f):
1673 if origmatchfn(f):
1671 return True
1674 return True
1672 lf = lfutil.splitstandin(f)
1675 lf = lfutil.splitstandin(f)
1673 if lf is None:
1676 if lf is None:
1674 return False
1677 return False
1675 notbad.add(lf)
1678 notbad.add(lf)
1676 return origmatchfn(lf)
1679 return origmatchfn(lf)
1677
1680
1678 m.matchfn = lfmatchfn
1681 m.matchfn = lfmatchfn
1679 origbadfn = m.bad
1682 origbadfn = m.bad
1680
1683
1681 def lfbadfn(f, msg):
1684 def lfbadfn(f, msg):
1682 if not f in notbad:
1685 if not f in notbad:
1683 origbadfn(f, msg)
1686 origbadfn(f, msg)
1684
1687
1685 m.bad = lfbadfn
1688 m.bad = lfbadfn
1686
1689
1687 origvisitdirfn = m.visitdir
1690 origvisitdirfn = m.visitdir
1688
1691
1689 def lfvisitdirfn(dir):
1692 def lfvisitdirfn(dir):
1690 if dir == lfutil.shortname:
1693 if dir == lfutil.shortname:
1691 return True
1694 return True
1692 ret = origvisitdirfn(dir)
1695 ret = origvisitdirfn(dir)
1693 if ret:
1696 if ret:
1694 return ret
1697 return ret
1695 lf = lfutil.splitstandin(dir)
1698 lf = lfutil.splitstandin(dir)
1696 if lf is None:
1699 if lf is None:
1697 return False
1700 return False
1698 return origvisitdirfn(lf)
1701 return origvisitdirfn(lf)
1699
1702
1700 m.visitdir = lfvisitdirfn
1703 m.visitdir = lfvisitdirfn
1701
1704
1702 for f in ctx.walk(m):
1705 for f in ctx.walk(m):
1703 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1706 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1704 lf = lfutil.splitstandin(f)
1707 lf = lfutil.splitstandin(f)
1705 if lf is None or origmatchfn(f):
1708 if lf is None or origmatchfn(f):
1706 # duplicating unreachable code from commands.cat
1709 # duplicating unreachable code from commands.cat
1707 data = ctx[f].data()
1710 data = ctx[f].data()
1708 if opts.get(b'decode'):
1711 if opts.get(b'decode'):
1709 data = repo.wwritedata(f, data)
1712 data = repo.wwritedata(f, data)
1710 fp.write(data)
1713 fp.write(data)
1711 else:
1714 else:
1712 hash = lfutil.readasstandin(ctx[f])
1715 hash = lfutil.readasstandin(ctx[f])
1713 if not lfutil.inusercache(repo.ui, hash):
1716 if not lfutil.inusercache(repo.ui, hash):
1714 store = storefactory.openstore(repo)
1717 store = storefactory.openstore(repo)
1715 success, missing = store.get([(lf, hash)])
1718 success, missing = store.get([(lf, hash)])
1716 if len(success) != 1:
1719 if len(success) != 1:
1717 raise error.Abort(
1720 raise error.Abort(
1718 _(
1721 _(
1719 b'largefile %s is not in cache and could not be '
1722 b'largefile %s is not in cache and could not be '
1720 b'downloaded'
1723 b'downloaded'
1721 )
1724 )
1722 % lf
1725 % lf
1723 )
1726 )
1724 path = lfutil.usercachepath(repo.ui, hash)
1727 path = lfutil.usercachepath(repo.ui, hash)
1725 with open(path, b"rb") as fpin:
1728 with open(path, b"rb") as fpin:
1726 for chunk in util.filechunkiter(fpin):
1729 for chunk in util.filechunkiter(fpin):
1727 fp.write(chunk)
1730 fp.write(chunk)
1728 err = 0
1731 err = 0
1729 return err
1732 return err
1730
1733
1731
1734
1732 @eh.wrapfunction(merge, b'_update')
1735 @eh.wrapfunction(merge, b'_update')
1733 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1736 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1734 matcher = kwargs.get('matcher', None)
1737 matcher = kwargs.get('matcher', None)
1735 # note if this is a partial update
1738 # note if this is a partial update
1736 partial = matcher and not matcher.always()
1739 partial = matcher and not matcher.always()
1737 with repo.wlock():
1740 with repo.wlock():
1738 # branch | | |
1741 # branch | | |
1739 # merge | force | partial | action
1742 # merge | force | partial | action
1740 # -------+-------+---------+--------------
1743 # -------+-------+---------+--------------
1741 # x | x | x | linear-merge
1744 # x | x | x | linear-merge
1742 # o | x | x | branch-merge
1745 # o | x | x | branch-merge
1743 # x | o | x | overwrite (as clean update)
1746 # x | o | x | overwrite (as clean update)
1744 # o | o | x | force-branch-merge (*1)
1747 # o | o | x | force-branch-merge (*1)
1745 # x | x | o | (*)
1748 # x | x | o | (*)
1746 # o | x | o | (*)
1749 # o | x | o | (*)
1747 # x | o | o | overwrite (as revert)
1750 # x | o | o | overwrite (as revert)
1748 # o | o | o | (*)
1751 # o | o | o | (*)
1749 #
1752 #
1750 # (*) don't care
1753 # (*) don't care
1751 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1754 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1752
1755
1753 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1756 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1754 unsure, s = lfdirstate.status(
1757 unsure, s = lfdirstate.status(
1755 matchmod.always(),
1758 matchmod.always(),
1756 subrepos=[],
1759 subrepos=[],
1757 ignored=False,
1760 ignored=False,
1758 clean=True,
1761 clean=True,
1759 unknown=False,
1762 unknown=False,
1760 )
1763 )
1761 oldclean = set(s.clean)
1764 oldclean = set(s.clean)
1762 pctx = repo[b'.']
1765 pctx = repo[b'.']
1763 dctx = repo[node]
1766 dctx = repo[node]
1764 for lfile in unsure + s.modified:
1767 for lfile in unsure + s.modified:
1765 lfileabs = repo.wvfs.join(lfile)
1768 lfileabs = repo.wvfs.join(lfile)
1766 if not repo.wvfs.exists(lfileabs):
1769 if not repo.wvfs.exists(lfileabs):
1767 continue
1770 continue
1768 lfhash = lfutil.hashfile(lfileabs)
1771 lfhash = lfutil.hashfile(lfileabs)
1769 standin = lfutil.standin(lfile)
1772 standin = lfutil.standin(lfile)
1770 lfutil.writestandin(
1773 lfutil.writestandin(
1771 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1774 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1772 )
1775 )
1773 if standin in pctx and lfhash == lfutil.readasstandin(
1776 if standin in pctx and lfhash == lfutil.readasstandin(
1774 pctx[standin]
1777 pctx[standin]
1775 ):
1778 ):
1776 oldclean.add(lfile)
1779 oldclean.add(lfile)
1777 for lfile in s.added:
1780 for lfile in s.added:
1778 fstandin = lfutil.standin(lfile)
1781 fstandin = lfutil.standin(lfile)
1779 if fstandin not in dctx:
1782 if fstandin not in dctx:
1780 # in this case, content of standin file is meaningless
1783 # in this case, content of standin file is meaningless
1781 # (in dctx, lfile is unknown, or normal file)
1784 # (in dctx, lfile is unknown, or normal file)
1782 continue
1785 continue
1783 lfutil.updatestandin(repo, lfile, fstandin)
1786 lfutil.updatestandin(repo, lfile, fstandin)
1784 # mark all clean largefiles as dirty, just in case the update gets
1787 # mark all clean largefiles as dirty, just in case the update gets
1785 # interrupted before largefiles and lfdirstate are synchronized
1788 # interrupted before largefiles and lfdirstate are synchronized
1786 for lfile in oldclean:
1789 for lfile in oldclean:
1787 lfdirstate.normallookup(lfile)
1790 lfdirstate.normallookup(lfile)
1788 lfdirstate.write()
1791 lfdirstate.write()
1789
1792
1790 oldstandins = lfutil.getstandinsstate(repo)
1793 oldstandins = lfutil.getstandinsstate(repo)
1791 wc = kwargs.get('wc')
1794 wc = kwargs.get('wc')
1792 if wc and wc.isinmemory():
1795 if wc and wc.isinmemory():
1793 # largefiles is not a good candidate for in-memory merge (large
1796 # largefiles is not a good candidate for in-memory merge (large
1794 # files, custom dirstate, matcher usage).
1797 # files, custom dirstate, matcher usage).
1795 raise error.ProgrammingError(
1798 raise error.ProgrammingError(
1796 b'largefiles is not compatible with in-memory merge'
1799 b'largefiles is not compatible with in-memory merge'
1797 )
1800 )
1798 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1801 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1799
1802
1800 newstandins = lfutil.getstandinsstate(repo)
1803 newstandins = lfutil.getstandinsstate(repo)
1801 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1804 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1802
1805
1803 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1806 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1804 # all the ones that didn't change as clean
1807 # all the ones that didn't change as clean
1805 for lfile in oldclean.difference(filelist):
1808 for lfile in oldclean.difference(filelist):
1806 lfdirstate.normal(lfile)
1809 lfdirstate.normal(lfile)
1807 lfdirstate.write()
1810 lfdirstate.write()
1808
1811
1809 if branchmerge or force or partial:
1812 if branchmerge or force or partial:
1810 filelist.extend(s.deleted + s.removed)
1813 filelist.extend(s.deleted + s.removed)
1811
1814
1812 lfcommands.updatelfiles(
1815 lfcommands.updatelfiles(
1813 repo.ui, repo, filelist=filelist, normallookup=partial
1816 repo.ui, repo, filelist=filelist, normallookup=partial
1814 )
1817 )
1815
1818
1816 return result
1819 return result
1817
1820
1818
1821
1819 @eh.wrapfunction(scmutil, b'marktouched')
1822 @eh.wrapfunction(scmutil, b'marktouched')
1820 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1823 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1821 result = orig(repo, files, *args, **kwargs)
1824 result = orig(repo, files, *args, **kwargs)
1822
1825
1823 filelist = []
1826 filelist = []
1824 for f in files:
1827 for f in files:
1825 lf = lfutil.splitstandin(f)
1828 lf = lfutil.splitstandin(f)
1826 if lf is not None:
1829 if lf is not None:
1827 filelist.append(lf)
1830 filelist.append(lf)
1828 if filelist:
1831 if filelist:
1829 lfcommands.updatelfiles(
1832 lfcommands.updatelfiles(
1830 repo.ui,
1833 repo.ui,
1831 repo,
1834 repo,
1832 filelist=filelist,
1835 filelist=filelist,
1833 printmessage=False,
1836 printmessage=False,
1834 normallookup=True,
1837 normallookup=True,
1835 )
1838 )
1836
1839
1837 return result
1840 return result
1838
1841
1839
1842
1840 @eh.wrapfunction(upgrade, b'preservedrequirements')
1843 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1841 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1844 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1842 def upgraderequirements(orig, repo):
1845 def upgraderequirements(orig, repo):
1843 reqs = orig(repo)
1846 reqs = orig(repo)
1844 if b'largefiles' in repo.requirements:
1847 if b'largefiles' in repo.requirements:
1845 reqs.add(b'largefiles')
1848 reqs.add(b'largefiles')
1846 return reqs
1849 return reqs
1847
1850
1848
1851
1849 _lfscheme = b'largefile://'
1852 _lfscheme = b'largefile://'
1850
1853
1851
1854
1852 @eh.wrapfunction(urlmod, b'open')
1855 @eh.wrapfunction(urlmod, b'open')
1853 def openlargefile(orig, ui, url_, data=None):
1856 def openlargefile(orig, ui, url_, data=None):
1854 if url_.startswith(_lfscheme):
1857 if url_.startswith(_lfscheme):
1855 if data:
1858 if data:
1856 msg = b"cannot use data on a 'largefile://' url"
1859 msg = b"cannot use data on a 'largefile://' url"
1857 raise error.ProgrammingError(msg)
1860 raise error.ProgrammingError(msg)
1858 lfid = url_[len(_lfscheme) :]
1861 lfid = url_[len(_lfscheme) :]
1859 return storefactory.getlfile(ui, lfid)
1862 return storefactory.getlfile(ui, lfid)
1860 else:
1863 else:
1861 return orig(ui, url_, data=data)
1864 return orig(ui, url_, data=data)
@@ -1,548 +1,550 b''
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, hex, nullid, short
13 from mercurial.node import bin, hex, nullid, short
14 from mercurial.pycompat import (
14 from mercurial.pycompat import (
15 getattr,
15 getattr,
16 setattr,
16 setattr,
17 )
17 )
18
18
19 from mercurial import (
19 from mercurial import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 cmdutil,
22 cmdutil,
23 context,
23 context,
24 error,
24 error,
25 exchange,
25 exchange,
26 exthelper,
26 exthelper,
27 localrepo,
27 localrepo,
28 pycompat,
28 pycompat,
29 revlog,
29 revlog,
30 scmutil,
30 scmutil,
31 upgrade,
32 util,
31 util,
33 vfs as vfsmod,
32 vfs as vfsmod,
34 wireprotov1server,
33 wireprotov1server,
35 )
34 )
36
35
37 from mercurial.upgrade_utils import engine as upgrade_engine
36 from mercurial.upgrade_utils import (
37 actions as upgrade_actions,
38 engine as upgrade_engine,
39 )
38
40
39 from mercurial.interfaces import repository
41 from mercurial.interfaces import repository
40
42
41 from mercurial.utils import (
43 from mercurial.utils import (
42 storageutil,
44 storageutil,
43 stringutil,
45 stringutil,
44 )
46 )
45
47
46 from ..largefiles import lfutil
48 from ..largefiles import lfutil
47
49
48 from . import (
50 from . import (
49 blobstore,
51 blobstore,
50 pointer,
52 pointer,
51 )
53 )
52
54
53 eh = exthelper.exthelper()
55 eh = exthelper.exthelper()
54
56
55
57
56 @eh.wrapfunction(localrepo, b'makefilestorage')
58 @eh.wrapfunction(localrepo, b'makefilestorage')
57 def localrepomakefilestorage(orig, requirements, features, **kwargs):
59 def localrepomakefilestorage(orig, requirements, features, **kwargs):
58 if b'lfs' in requirements:
60 if b'lfs' in requirements:
59 features.add(repository.REPO_FEATURE_LFS)
61 features.add(repository.REPO_FEATURE_LFS)
60
62
61 return orig(requirements=requirements, features=features, **kwargs)
63 return orig(requirements=requirements, features=features, **kwargs)
62
64
63
65
64 @eh.wrapfunction(changegroup, b'allsupportedversions')
66 @eh.wrapfunction(changegroup, b'allsupportedversions')
65 def allsupportedversions(orig, ui):
67 def allsupportedversions(orig, ui):
66 versions = orig(ui)
68 versions = orig(ui)
67 versions.add(b'03')
69 versions.add(b'03')
68 return versions
70 return versions
69
71
70
72
71 @eh.wrapfunction(wireprotov1server, b'_capabilities')
73 @eh.wrapfunction(wireprotov1server, b'_capabilities')
72 def _capabilities(orig, repo, proto):
74 def _capabilities(orig, repo, proto):
73 '''Wrap server command to announce lfs server capability'''
75 '''Wrap server command to announce lfs server capability'''
74 caps = orig(repo, proto)
76 caps = orig(repo, proto)
75 if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
77 if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
76 # Advertise a slightly different capability when lfs is *required*, so
78 # Advertise a slightly different capability when lfs is *required*, so
77 # that the client knows it MUST load the extension. If lfs is not
79 # that the client knows it MUST load the extension. If lfs is not
78 # required on the server, there's no reason to autoload the extension
80 # required on the server, there's no reason to autoload the extension
79 # on the client.
81 # on the client.
80 if b'lfs' in repo.requirements:
82 if b'lfs' in repo.requirements:
81 caps.append(b'lfs-serve')
83 caps.append(b'lfs-serve')
82
84
83 caps.append(b'lfs')
85 caps.append(b'lfs')
84 return caps
86 return caps
85
87
86
88
87 def bypasscheckhash(self, text):
89 def bypasscheckhash(self, text):
88 return False
90 return False
89
91
90
92
91 def readfromstore(self, text):
93 def readfromstore(self, text):
92 """Read filelog content from local blobstore transform for flagprocessor.
94 """Read filelog content from local blobstore transform for flagprocessor.
93
95
94 Default tranform for flagprocessor, returning contents from blobstore.
96 Default tranform for flagprocessor, returning contents from blobstore.
95 Returns a 2-typle (text, validatehash) where validatehash is True as the
97 Returns a 2-typle (text, validatehash) where validatehash is True as the
96 contents of the blobstore should be checked using checkhash.
98 contents of the blobstore should be checked using checkhash.
97 """
99 """
98 p = pointer.deserialize(text)
100 p = pointer.deserialize(text)
99 oid = p.oid()
101 oid = p.oid()
100 store = self.opener.lfslocalblobstore
102 store = self.opener.lfslocalblobstore
101 if not store.has(oid):
103 if not store.has(oid):
102 p.filename = self.filename
104 p.filename = self.filename
103 self.opener.lfsremoteblobstore.readbatch([p], store)
105 self.opener.lfsremoteblobstore.readbatch([p], store)
104
106
105 # The caller will validate the content
107 # The caller will validate the content
106 text = store.read(oid, verify=False)
108 text = store.read(oid, verify=False)
107
109
108 # pack hg filelog metadata
110 # pack hg filelog metadata
109 hgmeta = {}
111 hgmeta = {}
110 for k in p.keys():
112 for k in p.keys():
111 if k.startswith(b'x-hg-'):
113 if k.startswith(b'x-hg-'):
112 name = k[len(b'x-hg-') :]
114 name = k[len(b'x-hg-') :]
113 hgmeta[name] = p[k]
115 hgmeta[name] = p[k]
114 if hgmeta or text.startswith(b'\1\n'):
116 if hgmeta or text.startswith(b'\1\n'):
115 text = storageutil.packmeta(hgmeta, text)
117 text = storageutil.packmeta(hgmeta, text)
116
118
117 return (text, True, {})
119 return (text, True, {})
118
120
119
121
120 def writetostore(self, text, sidedata):
122 def writetostore(self, text, sidedata):
121 # hg filelog metadata (includes rename, etc)
123 # hg filelog metadata (includes rename, etc)
122 hgmeta, offset = storageutil.parsemeta(text)
124 hgmeta, offset = storageutil.parsemeta(text)
123 if offset and offset > 0:
125 if offset and offset > 0:
124 # lfs blob does not contain hg filelog metadata
126 # lfs blob does not contain hg filelog metadata
125 text = text[offset:]
127 text = text[offset:]
126
128
127 # git-lfs only supports sha256
129 # git-lfs only supports sha256
128 oid = hex(hashlib.sha256(text).digest())
130 oid = hex(hashlib.sha256(text).digest())
129 self.opener.lfslocalblobstore.write(oid, text)
131 self.opener.lfslocalblobstore.write(oid, text)
130
132
131 # replace contents with metadata
133 # replace contents with metadata
132 longoid = b'sha256:%s' % oid
134 longoid = b'sha256:%s' % oid
133 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
135 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
134
136
135 # by default, we expect the content to be binary. however, LFS could also
137 # by default, we expect the content to be binary. however, LFS could also
136 # be used for non-binary content. add a special entry for non-binary data.
138 # be used for non-binary content. add a special entry for non-binary data.
137 # this will be used by filectx.isbinary().
139 # this will be used by filectx.isbinary().
138 if not stringutil.binary(text):
140 if not stringutil.binary(text):
139 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
141 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
140 metadata[b'x-is-binary'] = b'0'
142 metadata[b'x-is-binary'] = b'0'
141
143
142 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
144 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
143 if hgmeta is not None:
145 if hgmeta is not None:
144 for k, v in pycompat.iteritems(hgmeta):
146 for k, v in pycompat.iteritems(hgmeta):
145 metadata[b'x-hg-%s' % k] = v
147 metadata[b'x-hg-%s' % k] = v
146
148
147 rawtext = metadata.serialize()
149 rawtext = metadata.serialize()
148 return (rawtext, False)
150 return (rawtext, False)
149
151
150
152
151 def _islfs(rlog, node=None, rev=None):
153 def _islfs(rlog, node=None, rev=None):
152 if rev is None:
154 if rev is None:
153 if node is None:
155 if node is None:
154 # both None - likely working copy content where node is not ready
156 # both None - likely working copy content where node is not ready
155 return False
157 return False
156 rev = rlog.rev(node)
158 rev = rlog.rev(node)
157 else:
159 else:
158 node = rlog.node(rev)
160 node = rlog.node(rev)
159 if node == nullid:
161 if node == nullid:
160 return False
162 return False
161 flags = rlog.flags(rev)
163 flags = rlog.flags(rev)
162 return bool(flags & revlog.REVIDX_EXTSTORED)
164 return bool(flags & revlog.REVIDX_EXTSTORED)
163
165
164
166
165 # Wrapping may also be applied by remotefilelog
167 # Wrapping may also be applied by remotefilelog
166 def filelogaddrevision(
168 def filelogaddrevision(
167 orig,
169 orig,
168 self,
170 self,
169 text,
171 text,
170 transaction,
172 transaction,
171 link,
173 link,
172 p1,
174 p1,
173 p2,
175 p2,
174 cachedelta=None,
176 cachedelta=None,
175 node=None,
177 node=None,
176 flags=revlog.REVIDX_DEFAULT_FLAGS,
178 flags=revlog.REVIDX_DEFAULT_FLAGS,
177 **kwds
179 **kwds
178 ):
180 ):
179 # The matcher isn't available if reposetup() wasn't called.
181 # The matcher isn't available if reposetup() wasn't called.
180 lfstrack = self._revlog.opener.options.get(b'lfstrack')
182 lfstrack = self._revlog.opener.options.get(b'lfstrack')
181
183
182 if lfstrack:
184 if lfstrack:
183 textlen = len(text)
185 textlen = len(text)
184 # exclude hg rename meta from file size
186 # exclude hg rename meta from file size
185 meta, offset = storageutil.parsemeta(text)
187 meta, offset = storageutil.parsemeta(text)
186 if offset:
188 if offset:
187 textlen -= offset
189 textlen -= offset
188
190
189 if lfstrack(self._revlog.filename, textlen):
191 if lfstrack(self._revlog.filename, textlen):
190 flags |= revlog.REVIDX_EXTSTORED
192 flags |= revlog.REVIDX_EXTSTORED
191
193
192 return orig(
194 return orig(
193 self,
195 self,
194 text,
196 text,
195 transaction,
197 transaction,
196 link,
198 link,
197 p1,
199 p1,
198 p2,
200 p2,
199 cachedelta=cachedelta,
201 cachedelta=cachedelta,
200 node=node,
202 node=node,
201 flags=flags,
203 flags=flags,
202 **kwds
204 **kwds
203 )
205 )
204
206
205
207
206 # Wrapping may also be applied by remotefilelog
208 # Wrapping may also be applied by remotefilelog
207 def filelogrenamed(orig, self, node):
209 def filelogrenamed(orig, self, node):
208 if _islfs(self._revlog, node):
210 if _islfs(self._revlog, node):
209 rawtext = self._revlog.rawdata(node)
211 rawtext = self._revlog.rawdata(node)
210 if not rawtext:
212 if not rawtext:
211 return False
213 return False
212 metadata = pointer.deserialize(rawtext)
214 metadata = pointer.deserialize(rawtext)
213 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
215 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
214 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
216 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
215 else:
217 else:
216 return False
218 return False
217 return orig(self, node)
219 return orig(self, node)
218
220
219
221
220 # Wrapping may also be applied by remotefilelog
222 # Wrapping may also be applied by remotefilelog
221 def filelogsize(orig, self, rev):
223 def filelogsize(orig, self, rev):
222 if _islfs(self._revlog, rev=rev):
224 if _islfs(self._revlog, rev=rev):
223 # fast path: use lfs metadata to answer size
225 # fast path: use lfs metadata to answer size
224 rawtext = self._revlog.rawdata(rev)
226 rawtext = self._revlog.rawdata(rev)
225 metadata = pointer.deserialize(rawtext)
227 metadata = pointer.deserialize(rawtext)
226 return int(metadata[b'size'])
228 return int(metadata[b'size'])
227 return orig(self, rev)
229 return orig(self, rev)
228
230
229
231
230 @eh.wrapfunction(revlog, b'_verify_revision')
232 @eh.wrapfunction(revlog, b'_verify_revision')
231 def _verify_revision(orig, rl, skipflags, state, node):
233 def _verify_revision(orig, rl, skipflags, state, node):
232 if _islfs(rl, node=node):
234 if _islfs(rl, node=node):
233 rawtext = rl.rawdata(node)
235 rawtext = rl.rawdata(node)
234 metadata = pointer.deserialize(rawtext)
236 metadata = pointer.deserialize(rawtext)
235
237
236 # Don't skip blobs that are stored locally, as local verification is
238 # Don't skip blobs that are stored locally, as local verification is
237 # relatively cheap and there's no other way to verify the raw data in
239 # relatively cheap and there's no other way to verify the raw data in
238 # the revlog.
240 # the revlog.
239 if rl.opener.lfslocalblobstore.has(metadata.oid()):
241 if rl.opener.lfslocalblobstore.has(metadata.oid()):
240 skipflags &= ~revlog.REVIDX_EXTSTORED
242 skipflags &= ~revlog.REVIDX_EXTSTORED
241 elif skipflags & revlog.REVIDX_EXTSTORED:
243 elif skipflags & revlog.REVIDX_EXTSTORED:
242 # The wrapped method will set `skipread`, but there's enough local
244 # The wrapped method will set `skipread`, but there's enough local
243 # info to check renames.
245 # info to check renames.
244 state[b'safe_renamed'].add(node)
246 state[b'safe_renamed'].add(node)
245
247
246 orig(rl, skipflags, state, node)
248 orig(rl, skipflags, state, node)
247
249
248
250
249 @eh.wrapfunction(context.basefilectx, b'cmp')
251 @eh.wrapfunction(context.basefilectx, b'cmp')
250 def filectxcmp(orig, self, fctx):
252 def filectxcmp(orig, self, fctx):
251 """returns True if text is different than fctx"""
253 """returns True if text is different than fctx"""
252 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
254 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
253 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
255 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
254 # fast path: check LFS oid
256 # fast path: check LFS oid
255 p1 = pointer.deserialize(self.rawdata())
257 p1 = pointer.deserialize(self.rawdata())
256 p2 = pointer.deserialize(fctx.rawdata())
258 p2 = pointer.deserialize(fctx.rawdata())
257 return p1.oid() != p2.oid()
259 return p1.oid() != p2.oid()
258 return orig(self, fctx)
260 return orig(self, fctx)
259
261
260
262
261 @eh.wrapfunction(context.basefilectx, b'isbinary')
263 @eh.wrapfunction(context.basefilectx, b'isbinary')
262 def filectxisbinary(orig, self):
264 def filectxisbinary(orig, self):
263 if self.islfs():
265 if self.islfs():
264 # fast path: use lfs metadata to answer isbinary
266 # fast path: use lfs metadata to answer isbinary
265 metadata = pointer.deserialize(self.rawdata())
267 metadata = pointer.deserialize(self.rawdata())
266 # if lfs metadata says nothing, assume it's binary by default
268 # if lfs metadata says nothing, assume it's binary by default
267 return bool(int(metadata.get(b'x-is-binary', 1)))
269 return bool(int(metadata.get(b'x-is-binary', 1)))
268 return orig(self)
270 return orig(self)
269
271
270
272
271 def filectxislfs(self):
273 def filectxislfs(self):
272 return _islfs(self.filelog()._revlog, self.filenode())
274 return _islfs(self.filelog()._revlog, self.filenode())
273
275
274
276
275 @eh.wrapfunction(cmdutil, b'_updatecatformatter')
277 @eh.wrapfunction(cmdutil, b'_updatecatformatter')
276 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
278 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
277 orig(fm, ctx, matcher, path, decode)
279 orig(fm, ctx, matcher, path, decode)
278 fm.data(rawdata=ctx[path].rawdata())
280 fm.data(rawdata=ctx[path].rawdata())
279
281
280
282
281 @eh.wrapfunction(scmutil, b'wrapconvertsink')
283 @eh.wrapfunction(scmutil, b'wrapconvertsink')
282 def convertsink(orig, sink):
284 def convertsink(orig, sink):
283 sink = orig(sink)
285 sink = orig(sink)
284 if sink.repotype == b'hg':
286 if sink.repotype == b'hg':
285
287
286 class lfssink(sink.__class__):
288 class lfssink(sink.__class__):
287 def putcommit(
289 def putcommit(
288 self,
290 self,
289 files,
291 files,
290 copies,
292 copies,
291 parents,
293 parents,
292 commit,
294 commit,
293 source,
295 source,
294 revmap,
296 revmap,
295 full,
297 full,
296 cleanp2,
298 cleanp2,
297 ):
299 ):
298 pc = super(lfssink, self).putcommit
300 pc = super(lfssink, self).putcommit
299 node = pc(
301 node = pc(
300 files,
302 files,
301 copies,
303 copies,
302 parents,
304 parents,
303 commit,
305 commit,
304 source,
306 source,
305 revmap,
307 revmap,
306 full,
308 full,
307 cleanp2,
309 cleanp2,
308 )
310 )
309
311
310 if b'lfs' not in self.repo.requirements:
312 if b'lfs' not in self.repo.requirements:
311 ctx = self.repo[node]
313 ctx = self.repo[node]
312
314
313 # The file list may contain removed files, so check for
315 # The file list may contain removed files, so check for
314 # membership before assuming it is in the context.
316 # membership before assuming it is in the context.
315 if any(f in ctx and ctx[f].islfs() for f, n in files):
317 if any(f in ctx and ctx[f].islfs() for f, n in files):
316 self.repo.requirements.add(b'lfs')
318 self.repo.requirements.add(b'lfs')
317 scmutil.writereporequirements(self.repo)
319 scmutil.writereporequirements(self.repo)
318
320
319 return node
321 return node
320
322
321 sink.__class__ = lfssink
323 sink.__class__ = lfssink
322
324
323 return sink
325 return sink
324
326
325
327
326 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
328 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
327 # options and blob stores are passed from othervfs to the new readonlyvfs.
329 # options and blob stores are passed from othervfs to the new readonlyvfs.
328 @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
330 @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
329 def vfsinit(orig, self, othervfs):
331 def vfsinit(orig, self, othervfs):
330 orig(self, othervfs)
332 orig(self, othervfs)
331 # copy lfs related options
333 # copy lfs related options
332 for k, v in othervfs.options.items():
334 for k, v in othervfs.options.items():
333 if k.startswith(b'lfs'):
335 if k.startswith(b'lfs'):
334 self.options[k] = v
336 self.options[k] = v
335 # also copy lfs blobstores. note: this can run before reposetup, so lfs
337 # also copy lfs blobstores. note: this can run before reposetup, so lfs
336 # blobstore attributes are not always ready at this time.
338 # blobstore attributes are not always ready at this time.
337 for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
339 for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
338 if util.safehasattr(othervfs, name):
340 if util.safehasattr(othervfs, name):
339 setattr(self, name, getattr(othervfs, name))
341 setattr(self, name, getattr(othervfs, name))
340
342
341
343
342 def _prefetchfiles(repo, revmatches):
344 def _prefetchfiles(repo, revmatches):
343 """Ensure that required LFS blobs are present, fetching them as a group if
345 """Ensure that required LFS blobs are present, fetching them as a group if
344 needed."""
346 needed."""
345 if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
347 if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
346 return
348 return
347
349
348 pointers = []
350 pointers = []
349 oids = set()
351 oids = set()
350 localstore = repo.svfs.lfslocalblobstore
352 localstore = repo.svfs.lfslocalblobstore
351
353
352 for rev, match in revmatches:
354 for rev, match in revmatches:
353 ctx = repo[rev]
355 ctx = repo[rev]
354 for f in ctx.walk(match):
356 for f in ctx.walk(match):
355 p = pointerfromctx(ctx, f)
357 p = pointerfromctx(ctx, f)
356 if p and p.oid() not in oids and not localstore.has(p.oid()):
358 if p and p.oid() not in oids and not localstore.has(p.oid()):
357 p.filename = f
359 p.filename = f
358 pointers.append(p)
360 pointers.append(p)
359 oids.add(p.oid())
361 oids.add(p.oid())
360
362
361 if pointers:
363 if pointers:
362 # Recalculating the repo store here allows 'paths.default' that is set
364 # Recalculating the repo store here allows 'paths.default' that is set
363 # on the repo by a clone command to be used for the update.
365 # on the repo by a clone command to be used for the update.
364 blobstore.remote(repo).readbatch(pointers, localstore)
366 blobstore.remote(repo).readbatch(pointers, localstore)
365
367
366
368
367 def _canskipupload(repo):
369 def _canskipupload(repo):
368 # Skip if this hasn't been passed to reposetup()
370 # Skip if this hasn't been passed to reposetup()
369 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
371 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
370 return True
372 return True
371
373
372 # if remotestore is a null store, upload is a no-op and can be skipped
374 # if remotestore is a null store, upload is a no-op and can be skipped
373 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
375 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
374
376
375
377
376 def candownload(repo):
378 def candownload(repo):
377 # Skip if this hasn't been passed to reposetup()
379 # Skip if this hasn't been passed to reposetup()
378 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
380 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
379 return False
381 return False
380
382
381 # if remotestore is a null store, downloads will lead to nothing
383 # if remotestore is a null store, downloads will lead to nothing
382 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
384 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
383
385
384
386
385 def uploadblobsfromrevs(repo, revs):
387 def uploadblobsfromrevs(repo, revs):
386 """upload lfs blobs introduced by revs
388 """upload lfs blobs introduced by revs
387
389
388 Note: also used by other extensions e. g. infinitepush. avoid renaming.
390 Note: also used by other extensions e. g. infinitepush. avoid renaming.
389 """
391 """
390 if _canskipupload(repo):
392 if _canskipupload(repo):
391 return
393 return
392 pointers = extractpointers(repo, revs)
394 pointers = extractpointers(repo, revs)
393 uploadblobs(repo, pointers)
395 uploadblobs(repo, pointers)
394
396
395
397
396 def prepush(pushop):
398 def prepush(pushop):
397 """Prepush hook.
399 """Prepush hook.
398
400
399 Read through the revisions to push, looking for filelog entries that can be
401 Read through the revisions to push, looking for filelog entries that can be
400 deserialized into metadata so that we can block the push on their upload to
402 deserialized into metadata so that we can block the push on their upload to
401 the remote blobstore.
403 the remote blobstore.
402 """
404 """
403 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
405 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
404
406
405
407
406 @eh.wrapfunction(exchange, b'push')
408 @eh.wrapfunction(exchange, b'push')
407 def push(orig, repo, remote, *args, **kwargs):
409 def push(orig, repo, remote, *args, **kwargs):
408 """bail on push if the extension isn't enabled on remote when needed, and
410 """bail on push if the extension isn't enabled on remote when needed, and
409 update the remote store based on the destination path."""
411 update the remote store based on the destination path."""
410 if b'lfs' in repo.requirements:
412 if b'lfs' in repo.requirements:
411 # If the remote peer is for a local repo, the requirement tests in the
413 # If the remote peer is for a local repo, the requirement tests in the
412 # base class method enforce lfs support. Otherwise, some revisions in
414 # base class method enforce lfs support. Otherwise, some revisions in
413 # this repo use lfs, and the remote repo needs the extension loaded.
415 # this repo use lfs, and the remote repo needs the extension loaded.
414 if not remote.local() and not remote.capable(b'lfs'):
416 if not remote.local() and not remote.capable(b'lfs'):
415 # This is a copy of the message in exchange.push() when requirements
417 # This is a copy of the message in exchange.push() when requirements
416 # are missing between local repos.
418 # are missing between local repos.
417 m = _(b"required features are not supported in the destination: %s")
419 m = _(b"required features are not supported in the destination: %s")
418 raise error.Abort(
420 raise error.Abort(
419 m % b'lfs', hint=_(b'enable the lfs extension on the server')
421 m % b'lfs', hint=_(b'enable the lfs extension on the server')
420 )
422 )
421
423
422 # Repositories where this extension is disabled won't have the field.
424 # Repositories where this extension is disabled won't have the field.
423 # But if there's a requirement, then the extension must be loaded AND
425 # But if there's a requirement, then the extension must be loaded AND
424 # there may be blobs to push.
426 # there may be blobs to push.
425 remotestore = repo.svfs.lfsremoteblobstore
427 remotestore = repo.svfs.lfsremoteblobstore
426 try:
428 try:
427 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
429 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
428 return orig(repo, remote, *args, **kwargs)
430 return orig(repo, remote, *args, **kwargs)
429 finally:
431 finally:
430 repo.svfs.lfsremoteblobstore = remotestore
432 repo.svfs.lfsremoteblobstore = remotestore
431 else:
433 else:
432 return orig(repo, remote, *args, **kwargs)
434 return orig(repo, remote, *args, **kwargs)
433
435
434
436
435 # when writing a bundle via "hg bundle" command, upload related LFS blobs
437 # when writing a bundle via "hg bundle" command, upload related LFS blobs
436 @eh.wrapfunction(bundle2, b'writenewbundle')
438 @eh.wrapfunction(bundle2, b'writenewbundle')
437 def writenewbundle(
439 def writenewbundle(
438 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
440 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
439 ):
441 ):
440 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
442 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
441 uploadblobsfromrevs(repo, outgoing.missing)
443 uploadblobsfromrevs(repo, outgoing.missing)
442 return orig(
444 return orig(
443 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
445 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
444 )
446 )
445
447
446
448
447 def extractpointers(repo, revs):
449 def extractpointers(repo, revs):
448 """return a list of lfs pointers added by given revs"""
450 """return a list of lfs pointers added by given revs"""
449 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
451 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
450 pointers = {}
452 pointers = {}
451
453
452 makeprogress = repo.ui.makeprogress
454 makeprogress = repo.ui.makeprogress
453 with makeprogress(
455 with makeprogress(
454 _(b'lfs search'), _(b'changesets'), len(revs)
456 _(b'lfs search'), _(b'changesets'), len(revs)
455 ) as progress:
457 ) as progress:
456 for r in revs:
458 for r in revs:
457 ctx = repo[r]
459 ctx = repo[r]
458 for p in pointersfromctx(ctx).values():
460 for p in pointersfromctx(ctx).values():
459 pointers[p.oid()] = p
461 pointers[p.oid()] = p
460 progress.increment()
462 progress.increment()
461 return sorted(pointers.values(), key=lambda p: p.oid())
463 return sorted(pointers.values(), key=lambda p: p.oid())
462
464
463
465
464 def pointerfromctx(ctx, f, removed=False):
466 def pointerfromctx(ctx, f, removed=False):
465 """return a pointer for the named file from the given changectx, or None if
467 """return a pointer for the named file from the given changectx, or None if
466 the file isn't LFS.
468 the file isn't LFS.
467
469
468 Optionally, the pointer for a file deleted from the context can be returned.
470 Optionally, the pointer for a file deleted from the context can be returned.
469 Since no such pointer is actually stored, and to distinguish from a non LFS
471 Since no such pointer is actually stored, and to distinguish from a non LFS
470 file, this pointer is represented by an empty dict.
472 file, this pointer is represented by an empty dict.
471 """
473 """
472 _ctx = ctx
474 _ctx = ctx
473 if f not in ctx:
475 if f not in ctx:
474 if not removed:
476 if not removed:
475 return None
477 return None
476 if f in ctx.p1():
478 if f in ctx.p1():
477 _ctx = ctx.p1()
479 _ctx = ctx.p1()
478 elif f in ctx.p2():
480 elif f in ctx.p2():
479 _ctx = ctx.p2()
481 _ctx = ctx.p2()
480 else:
482 else:
481 return None
483 return None
482 fctx = _ctx[f]
484 fctx = _ctx[f]
483 if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
485 if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
484 return None
486 return None
485 try:
487 try:
486 p = pointer.deserialize(fctx.rawdata())
488 p = pointer.deserialize(fctx.rawdata())
487 if ctx == _ctx:
489 if ctx == _ctx:
488 return p
490 return p
489 return {}
491 return {}
490 except pointer.InvalidPointer as ex:
492 except pointer.InvalidPointer as ex:
491 raise error.Abort(
493 raise error.Abort(
492 _(b'lfs: corrupted pointer (%s@%s): %s\n')
494 _(b'lfs: corrupted pointer (%s@%s): %s\n')
493 % (f, short(_ctx.node()), ex)
495 % (f, short(_ctx.node()), ex)
494 )
496 )
495
497
496
498
497 def pointersfromctx(ctx, removed=False):
499 def pointersfromctx(ctx, removed=False):
498 """return a dict {path: pointer} for given single changectx.
500 """return a dict {path: pointer} for given single changectx.
499
501
500 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
502 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
501 stored for the path is an empty dict.
503 stored for the path is an empty dict.
502 """
504 """
503 result = {}
505 result = {}
504 m = ctx.repo().narrowmatch()
506 m = ctx.repo().narrowmatch()
505
507
506 # TODO: consider manifest.fastread() instead
508 # TODO: consider manifest.fastread() instead
507 for f in ctx.files():
509 for f in ctx.files():
508 if not m(f):
510 if not m(f):
509 continue
511 continue
510 p = pointerfromctx(ctx, f, removed=removed)
512 p = pointerfromctx(ctx, f, removed=removed)
511 if p is not None:
513 if p is not None:
512 result[f] = p
514 result[f] = p
513 return result
515 return result
514
516
515
517
516 def uploadblobs(repo, pointers):
518 def uploadblobs(repo, pointers):
517 """upload given pointers from local blobstore"""
519 """upload given pointers from local blobstore"""
518 if not pointers:
520 if not pointers:
519 return
521 return
520
522
521 remoteblob = repo.svfs.lfsremoteblobstore
523 remoteblob = repo.svfs.lfsremoteblobstore
522 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
524 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
523
525
524
526
525 @eh.wrapfunction(upgrade_engine, b'_finishdatamigration')
527 @eh.wrapfunction(upgrade_engine, b'_finishdatamigration')
526 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
528 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
527 orig(ui, srcrepo, dstrepo, requirements)
529 orig(ui, srcrepo, dstrepo, requirements)
528
530
529 # Skip if this hasn't been passed to reposetup()
531 # Skip if this hasn't been passed to reposetup()
530 if util.safehasattr(
532 if util.safehasattr(
531 srcrepo.svfs, b'lfslocalblobstore'
533 srcrepo.svfs, b'lfslocalblobstore'
532 ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
534 ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
533 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
535 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
534 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
536 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
535
537
536 for dirpath, dirs, files in srclfsvfs.walk():
538 for dirpath, dirs, files in srclfsvfs.walk():
537 for oid in files:
539 for oid in files:
538 ui.write(_(b'copying lfs blob %s\n') % oid)
540 ui.write(_(b'copying lfs blob %s\n') % oid)
539 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
541 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
540
542
541
543
542 @eh.wrapfunction(upgrade, b'preservedrequirements')
544 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
543 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
545 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
544 def upgraderequirements(orig, repo):
546 def upgraderequirements(orig, repo):
545 reqs = orig(repo)
547 reqs = orig(repo)
546 if b'lfs' in repo.requirements:
548 if b'lfs' in repo.requirements:
547 reqs.add(b'lfs')
549 reqs.add(b'lfs')
548 return reqs
550 return reqs
This diff has been collapsed as it changes many lines, (696 lines changed) Show them Hide them
@@ -1,1012 +1,378 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 hg,
13 hg,
14 localrepo,
14 localrepo,
15 pycompat,
15 pycompat,
16 requirements,
17 util,
18 )
16 )
19
17
20 from .upgrade_utils import (
18 from .upgrade_utils import (
19 actions as upgrade_actions,
21 engine as upgrade_engine,
20 engine as upgrade_engine,
22 )
21 )
23
22
24 from .utils import compression
23 allformatvariant = upgrade_actions.allformatvariant
25
26 # list of requirements that request a clone of all revlog if added/removed
27 RECLONES_REQUIREMENTS = {
28 b'generaldelta',
29 requirements.SPARSEREVLOG_REQUIREMENT,
30 }
31
32
33 def requiredsourcerequirements(repo):
34 """Obtain requirements required to be present to upgrade a repo.
35
36 An upgrade will not be allowed if the repository doesn't have the
37 requirements returned by this function.
38 """
39 return {
40 # Introduced in Mercurial 0.9.2.
41 b'revlogv1',
42 # Introduced in Mercurial 0.9.2.
43 b'store',
44 }
45
46
47 def blocksourcerequirements(repo):
48 """Obtain requirements that will prevent an upgrade from occurring.
49
50 An upgrade cannot be performed if the source repository contains a
51 requirements in the returned set.
52 """
53 return {
54 # The upgrade code does not yet support these experimental features.
55 # This is an artificial limitation.
56 requirements.TREEMANIFEST_REQUIREMENT,
57 # This was a precursor to generaldelta and was never enabled by default.
58 # It should (hopefully) not exist in the wild.
59 b'parentdelta',
60 # Upgrade should operate on the actual store, not the shared link.
61 requirements.SHARED_REQUIREMENT,
62 }
63
64
65 def supportremovedrequirements(repo):
66 """Obtain requirements that can be removed during an upgrade.
67
68 If an upgrade were to create a repository that dropped a requirement,
69 the dropped requirement must appear in the returned set for the upgrade
70 to be allowed.
71 """
72 supported = {
73 requirements.SPARSEREVLOG_REQUIREMENT,
74 requirements.SIDEDATA_REQUIREMENT,
75 requirements.COPIESSDC_REQUIREMENT,
76 requirements.NODEMAP_REQUIREMENT,
77 requirements.SHARESAFE_REQUIREMENT,
78 }
79 for name in compression.compengines:
80 engine = compression.compengines[name]
81 if engine.available() and engine.revlogheader():
82 supported.add(b'exp-compression-%s' % name)
83 if engine.name() == b'zstd':
84 supported.add(b'revlog-compression-zstd')
85 return supported
86
87
88 def supporteddestrequirements(repo):
89 """Obtain requirements that upgrade supports in the destination.
90
91 If the result of the upgrade would create requirements not in this set,
92 the upgrade is disallowed.
93
94 Extensions should monkeypatch this to add their custom requirements.
95 """
96 supported = {
97 b'dotencode',
98 b'fncache',
99 b'generaldelta',
100 b'revlogv1',
101 b'store',
102 requirements.SPARSEREVLOG_REQUIREMENT,
103 requirements.SIDEDATA_REQUIREMENT,
104 requirements.COPIESSDC_REQUIREMENT,
105 requirements.NODEMAP_REQUIREMENT,
106 requirements.SHARESAFE_REQUIREMENT,
107 }
108 for name in compression.compengines:
109 engine = compression.compengines[name]
110 if engine.available() and engine.revlogheader():
111 supported.add(b'exp-compression-%s' % name)
112 if engine.name() == b'zstd':
113 supported.add(b'revlog-compression-zstd')
114 return supported
115
116
117 def allowednewrequirements(repo):
118 """Obtain requirements that can be added to a repository during upgrade.
119
120 This is used to disallow proposed requirements from being added when
121 they weren't present before.
122
123 We use a list of allowed requirement additions instead of a list of known
124 bad additions because the whitelist approach is safer and will prevent
125 future, unknown requirements from accidentally being added.
126 """
127 supported = {
128 b'dotencode',
129 b'fncache',
130 b'generaldelta',
131 requirements.SPARSEREVLOG_REQUIREMENT,
132 requirements.SIDEDATA_REQUIREMENT,
133 requirements.COPIESSDC_REQUIREMENT,
134 requirements.NODEMAP_REQUIREMENT,
135 requirements.SHARESAFE_REQUIREMENT,
136 }
137 for name in compression.compengines:
138 engine = compression.compengines[name]
139 if engine.available() and engine.revlogheader():
140 supported.add(b'exp-compression-%s' % name)
141 if engine.name() == b'zstd':
142 supported.add(b'revlog-compression-zstd')
143 return supported
144
145
146 def preservedrequirements(repo):
147 return set()
148
149
150 DEFICIENCY = b'deficiency'
151 OPTIMISATION = b'optimization'
152
153
154 class improvement(object):
155 """Represents an improvement that can be made as part of an upgrade.
156
157 The following attributes are defined on each instance:
158
159 name
160 Machine-readable string uniquely identifying this improvement. It
161 will be mapped to an action later in the upgrade process.
162
163 type
164 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
165 problem. An optimization is an action (sometimes optional) that
166 can be taken to further improve the state of the repository.
167
168 description
169 Message intended for humans explaining the improvement in more detail,
170 including the implications of it. For ``DEFICIENCY`` types, should be
171 worded in the present tense. For ``OPTIMISATION`` types, should be
172 worded in the future tense.
173
174 upgrademessage
175 Message intended for humans explaining what an upgrade addressing this
176 issue will do. Should be worded in the future tense.
177 """
178
179 def __init__(self, name, type, description, upgrademessage):
180 self.name = name
181 self.type = type
182 self.description = description
183 self.upgrademessage = upgrademessage
184
185 def __eq__(self, other):
186 if not isinstance(other, improvement):
187 # This is what python tell use to do
188 return NotImplemented
189 return self.name == other.name
190
191 def __ne__(self, other):
192 return not (self == other)
193
194 def __hash__(self):
195 return hash(self.name)
196
197
198 allformatvariant = []
199
200
201 def registerformatvariant(cls):
202 allformatvariant.append(cls)
203 return cls
204
205
206 class formatvariant(improvement):
207 """an improvement subclass dedicated to repository format"""
208
209 type = DEFICIENCY
210 ### The following attributes should be defined for each class:
211
212 # machine-readable string uniquely identifying this improvement. it will be
213 # mapped to an action later in the upgrade process.
214 name = None
215
216 # message intended for humans explaining the improvement in more detail,
217 # including the implications of it ``DEFICIENCY`` types, should be worded
218 # in the present tense.
219 description = None
220
221 # message intended for humans explaining what an upgrade addressing this
222 # issue will do. should be worded in the future tense.
223 upgrademessage = None
224
225 # value of current Mercurial default for new repository
226 default = None
227
228 def __init__(self):
229 raise NotImplementedError()
230
231 @staticmethod
232 def fromrepo(repo):
233 """current value of the variant in the repository"""
234 raise NotImplementedError()
235
236 @staticmethod
237 def fromconfig(repo):
238 """current value of the variant in the configuration"""
239 raise NotImplementedError()
240
241
242 class requirementformatvariant(formatvariant):
243 """formatvariant based on a 'requirement' name.
244
245 Many format variant are controlled by a 'requirement'. We define a small
246 subclass to factor the code.
247 """
248
249 # the requirement that control this format variant
250 _requirement = None
251
252 @staticmethod
253 def _newreporequirements(ui):
254 return localrepo.newreporequirements(
255 ui, localrepo.defaultcreateopts(ui)
256 )
257
258 @classmethod
259 def fromrepo(cls, repo):
260 assert cls._requirement is not None
261 return cls._requirement in repo.requirements
262
263 @classmethod
264 def fromconfig(cls, repo):
265 assert cls._requirement is not None
266 return cls._requirement in cls._newreporequirements(repo.ui)
267
268
269 @registerformatvariant
270 class fncache(requirementformatvariant):
271 name = b'fncache'
272
273 _requirement = b'fncache'
274
275 default = True
276
277 description = _(
278 b'long and reserved filenames may not work correctly; '
279 b'repository performance is sub-optimal'
280 )
281
282 upgrademessage = _(
283 b'repository will be more resilient to storing '
284 b'certain paths and performance of certain '
285 b'operations should be improved'
286 )
287
288
289 @registerformatvariant
290 class dotencode(requirementformatvariant):
291 name = b'dotencode'
292
293 _requirement = b'dotencode'
294
295 default = True
296
297 description = _(
298 b'storage of filenames beginning with a period or '
299 b'space may not work correctly'
300 )
301
302 upgrademessage = _(
303 b'repository will be better able to store files '
304 b'beginning with a space or period'
305 )
306
307
308 @registerformatvariant
309 class generaldelta(requirementformatvariant):
310 name = b'generaldelta'
311
312 _requirement = b'generaldelta'
313
314 default = True
315
316 description = _(
317 b'deltas within internal storage are unable to '
318 b'choose optimal revisions; repository is larger and '
319 b'slower than it could be; interaction with other '
320 b'repositories may require extra network and CPU '
321 b'resources, making "hg push" and "hg pull" slower'
322 )
323
324 upgrademessage = _(
325 b'repository storage will be able to create '
326 b'optimal deltas; new repository data will be '
327 b'smaller and read times should decrease; '
328 b'interacting with other repositories using this '
329 b'storage model should require less network and '
330 b'CPU resources, making "hg push" and "hg pull" '
331 b'faster'
332 )
333
334
335 @registerformatvariant
336 class sharedsafe(requirementformatvariant):
337 name = b'exp-sharesafe'
338 _requirement = requirements.SHARESAFE_REQUIREMENT
339
340 default = False
341
342 description = _(
343 b'old shared repositories do not share source repository '
344 b'requirements and config. This leads to various problems '
345 b'when the source repository format is upgraded or some new '
346 b'extensions are enabled.'
347 )
348
349 upgrademessage = _(
350 b'Upgrades a repository to share-safe format so that future '
351 b'shares of this repository share its requirements and configs.'
352 )
353
354
355 @registerformatvariant
356 class sparserevlog(requirementformatvariant):
357 name = b'sparserevlog'
358
359 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
360
361 default = True
362
363 description = _(
364 b'in order to limit disk reading and memory usage on older '
365 b'version, the span of a delta chain from its root to its '
366 b'end is limited, whatever the relevant data in this span. '
367 b'This can severly limit Mercurial ability to build good '
368 b'chain of delta resulting is much more storage space being '
369 b'taken and limit reusability of on disk delta during '
370 b'exchange.'
371 )
372
373 upgrademessage = _(
374 b'Revlog supports delta chain with more unused data '
375 b'between payload. These gaps will be skipped at read '
376 b'time. This allows for better delta chains, making a '
377 b'better compression and faster exchange with server.'
378 )
379
380
381 @registerformatvariant
382 class sidedata(requirementformatvariant):
383 name = b'sidedata'
384
385 _requirement = requirements.SIDEDATA_REQUIREMENT
386
387 default = False
388
389 description = _(
390 b'Allows storage of extra data alongside a revision, '
391 b'unlocking various caching options.'
392 )
393
394 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
395
396
397 @registerformatvariant
398 class persistentnodemap(requirementformatvariant):
399 name = b'persistent-nodemap'
400
401 _requirement = requirements.NODEMAP_REQUIREMENT
402
403 default = False
404
405 description = _(
406 b'persist the node -> rev mapping on disk to speedup lookup'
407 )
408
409 upgrademessage = _(b'Speedup revision lookup by node id.')
410
411
412 @registerformatvariant
413 class copiessdc(requirementformatvariant):
414 name = b'copies-sdc'
415
416 _requirement = requirements.COPIESSDC_REQUIREMENT
417
418 default = False
419
420 description = _(b'Stores copies information alongside changesets.')
421
422 upgrademessage = _(
423 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
424 )
425
426
427 @registerformatvariant
428 class removecldeltachain(formatvariant):
429 name = b'plain-cl-delta'
430
431 default = True
432
433 description = _(
434 b'changelog storage is using deltas instead of '
435 b'raw entries; changelog reading and any '
436 b'operation relying on changelog data are slower '
437 b'than they could be'
438 )
439
440 upgrademessage = _(
441 b'changelog storage will be reformated to '
442 b'store raw entries; changelog reading will be '
443 b'faster; changelog size may be reduced'
444 )
445
446 @staticmethod
447 def fromrepo(repo):
448 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
449 # changelogs with deltas.
450 cl = repo.changelog
451 chainbase = cl.chainbase
452 return all(rev == chainbase(rev) for rev in cl)
453
454 @staticmethod
455 def fromconfig(repo):
456 return True
457
458
459 @registerformatvariant
460 class compressionengine(formatvariant):
461 name = b'compression'
462 default = b'zlib'
463
464 description = _(
465 b'Compresion algorithm used to compress data. '
466 b'Some engine are faster than other'
467 )
468
469 upgrademessage = _(
470 b'revlog content will be recompressed with the new algorithm.'
471 )
472
473 @classmethod
474 def fromrepo(cls, repo):
475 # we allow multiple compression engine requirement to co-exist because
476 # strickly speaking, revlog seems to support mixed compression style.
477 #
478 # The compression used for new entries will be "the last one"
479 compression = b'zlib'
480 for req in repo.requirements:
481 prefix = req.startswith
482 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
483 compression = req.split(b'-', 2)[2]
484 return compression
485
486 @classmethod
487 def fromconfig(cls, repo):
488 compengines = repo.ui.configlist(b'format', b'revlog-compression')
489 # return the first valid value as the selection code would do
490 for comp in compengines:
491 if comp in util.compengines:
492 return comp
493
494 # no valide compression found lets display it all for clarity
495 return b','.join(compengines)
496
497
498 @registerformatvariant
499 class compressionlevel(formatvariant):
500 name = b'compression-level'
501 default = b'default'
502
503 description = _(b'compression level')
504
505 upgrademessage = _(b'revlog content will be recompressed')
506
507 @classmethod
508 def fromrepo(cls, repo):
509 comp = compressionengine.fromrepo(repo)
510 level = None
511 if comp == b'zlib':
512 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
513 elif comp == b'zstd':
514 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
515 if level is None:
516 return b'default'
517 return bytes(level)
518
519 @classmethod
520 def fromconfig(cls, repo):
521 comp = compressionengine.fromconfig(repo)
522 level = None
523 if comp == b'zlib':
524 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
525 elif comp == b'zstd':
526 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
527 if level is None:
528 return b'default'
529 return bytes(level)
530
531
532 def finddeficiencies(repo):
533 """returns a list of deficiencies that the repo suffer from"""
534 deficiencies = []
535
536 # We could detect lack of revlogv1 and store here, but they were added
537 # in 0.9.2 and we don't support upgrading repos without these
538 # requirements, so let's not bother.
539
540 for fv in allformatvariant:
541 if not fv.fromrepo(repo):
542 deficiencies.append(fv)
543
544 return deficiencies
545
546
24
547 # search without '-' to support older form on newer client.
25 # search without '-' to support older form on newer client.
548 #
26 #
549 # We don't enforce backward compatibility for debug command so this
27 # We don't enforce backward compatibility for debug command so this
550 # might eventually be dropped. However, having to use two different
28 # might eventually be dropped. However, having to use two different
551 # forms in script when comparing result is anoying enough to add
29 # forms in script when comparing result is anoying enough to add
552 # backward compatibility for a while.
30 # backward compatibility for a while.
553 legacy_opts_map = {
31 legacy_opts_map = {
554 b'redeltaparent': b're-delta-parent',
32 b'redeltaparent': b're-delta-parent',
555 b'redeltamultibase': b're-delta-multibase',
33 b'redeltamultibase': b're-delta-multibase',
556 b'redeltaall': b're-delta-all',
34 b'redeltaall': b're-delta-all',
557 b'redeltafulladd': b're-delta-fulladd',
35 b'redeltafulladd': b're-delta-fulladd',
558 }
36 }
559
37
560 ALL_OPTIMISATIONS = []
561
562
563 def register_optimization(obj):
564 ALL_OPTIMISATIONS.append(obj)
565 return obj
566
567
568 register_optimization(
569 improvement(
570 name=b're-delta-parent',
571 type=OPTIMISATION,
572 description=_(
573 b'deltas within internal storage will be recalculated to '
574 b'choose an optimal base revision where this was not '
575 b'already done; the size of the repository may shrink and '
576 b'various operations may become faster; the first time '
577 b'this optimization is performed could slow down upgrade '
578 b'execution considerably; subsequent invocations should '
579 b'not run noticeably slower'
580 ),
581 upgrademessage=_(
582 b'deltas within internal storage will choose a new '
583 b'base revision if needed'
584 ),
585 )
586 )
587
588 register_optimization(
589 improvement(
590 name=b're-delta-multibase',
591 type=OPTIMISATION,
592 description=_(
593 b'deltas within internal storage will be recalculated '
594 b'against multiple base revision and the smallest '
595 b'difference will be used; the size of the repository may '
596 b'shrink significantly when there are many merges; this '
597 b'optimization will slow down execution in proportion to '
598 b'the number of merges in the repository and the amount '
599 b'of files in the repository; this slow down should not '
600 b'be significant unless there are tens of thousands of '
601 b'files and thousands of merges'
602 ),
603 upgrademessage=_(
604 b'deltas within internal storage will choose an '
605 b'optimal delta by computing deltas against multiple '
606 b'parents; may slow down execution time '
607 b'significantly'
608 ),
609 )
610 )
611
612 register_optimization(
613 improvement(
614 name=b're-delta-all',
615 type=OPTIMISATION,
616 description=_(
617 b'deltas within internal storage will always be '
618 b'recalculated without reusing prior deltas; this will '
619 b'likely make execution run several times slower; this '
620 b'optimization is typically not needed'
621 ),
622 upgrademessage=_(
623 b'deltas within internal storage will be fully '
624 b'recomputed; this will likely drastically slow down '
625 b'execution time'
626 ),
627 )
628 )
629
630 register_optimization(
631 improvement(
632 name=b're-delta-fulladd',
633 type=OPTIMISATION,
634 description=_(
635 b'every revision will be re-added as if it was new '
636 b'content. It will go through the full storage '
637 b'mechanism giving extensions a chance to process it '
638 b'(eg. lfs). This is similar to "re-delta-all" but even '
639 b'slower since more logic is involved.'
640 ),
641 upgrademessage=_(
642 b'each revision will be added as new content to the '
643 b'internal storage; this will likely drastically slow '
644 b'down execution time, but some extensions might need '
645 b'it'
646 ),
647 )
648 )
649
650
651 def findoptimizations(repo):
652 """Determine optimisation that could be used during upgrade"""
653 # These are unconditionally added. There is logic later that figures out
654 # which ones to apply.
655 return list(ALL_OPTIMISATIONS)
656
657
658 def determineactions(repo, deficiencies, sourcereqs, destreqs):
659 """Determine upgrade actions that will be performed.
660
661 Given a list of improvements as returned by ``finddeficiencies`` and
662 ``findoptimizations``, determine the list of upgrade actions that
663 will be performed.
664
665 The role of this function is to filter improvements if needed, apply
666 recommended optimizations from the improvements list that make sense,
667 etc.
668
669 Returns a list of action names.
670 """
671 newactions = []
672
673 for d in deficiencies:
674 name = d._requirement
675
676 # If the action is a requirement that doesn't show up in the
677 # destination requirements, prune the action.
678 if name is not None and name not in destreqs:
679 continue
680
681 newactions.append(d)
682
683 # FUTURE consider adding some optimizations here for certain transitions.
684 # e.g. adding generaldelta could schedule parent redeltas.
685
686 return newactions
687
688
38
689 def upgraderepo(
39 def upgraderepo(
690 ui,
40 ui,
691 repo,
41 repo,
692 run=False,
42 run=False,
693 optimize=None,
43 optimize=None,
694 backup=True,
44 backup=True,
695 manifest=None,
45 manifest=None,
696 changelog=None,
46 changelog=None,
697 filelogs=None,
47 filelogs=None,
698 ):
48 ):
699 """Upgrade a repository in place."""
49 """Upgrade a repository in place."""
700 if optimize is None:
50 if optimize is None:
701 optimize = []
51 optimize = []
702 optimize = {legacy_opts_map.get(o, o) for o in optimize}
52 optimize = {legacy_opts_map.get(o, o) for o in optimize}
703 repo = repo.unfiltered()
53 repo = repo.unfiltered()
704
54
705 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
55 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
706 specentries = (
56 specentries = (
707 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
57 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
708 (upgrade_engine.UPGRADE_MANIFEST, manifest),
58 (upgrade_engine.UPGRADE_MANIFEST, manifest),
709 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
59 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
710 )
60 )
711 specified = [(y, x) for (y, x) in specentries if x is not None]
61 specified = [(y, x) for (y, x) in specentries if x is not None]
712 if specified:
62 if specified:
713 # we have some limitation on revlogs to be recloned
63 # we have some limitation on revlogs to be recloned
714 if any(x for y, x in specified):
64 if any(x for y, x in specified):
715 revlogs = set()
65 revlogs = set()
716 for upgrade, enabled in specified:
66 for upgrade, enabled in specified:
717 if enabled:
67 if enabled:
718 revlogs.add(upgrade)
68 revlogs.add(upgrade)
719 else:
69 else:
720 # none are enabled
70 # none are enabled
721 for upgrade, __ in specified:
71 for upgrade, __ in specified:
722 revlogs.discard(upgrade)
72 revlogs.discard(upgrade)
723
73
724 # Ensure the repository can be upgraded.
74 # Ensure the repository can be upgraded.
725 missingreqs = requiredsourcerequirements(repo) - repo.requirements
75 missingreqs = (
76 upgrade_actions.requiredsourcerequirements(repo) - repo.requirements
77 )
726 if missingreqs:
78 if missingreqs:
727 raise error.Abort(
79 raise error.Abort(
728 _(b'cannot upgrade repository; requirement missing: %s')
80 _(b'cannot upgrade repository; requirement missing: %s')
729 % _(b', ').join(sorted(missingreqs))
81 % _(b', ').join(sorted(missingreqs))
730 )
82 )
731
83
732 blockedreqs = blocksourcerequirements(repo) & repo.requirements
84 blockedreqs = (
85 upgrade_actions.blocksourcerequirements(repo) & repo.requirements
86 )
733 if blockedreqs:
87 if blockedreqs:
734 raise error.Abort(
88 raise error.Abort(
735 _(
89 _(
736 b'cannot upgrade repository; unsupported source '
90 b'cannot upgrade repository; unsupported source '
737 b'requirement: %s'
91 b'requirement: %s'
738 )
92 )
739 % _(b', ').join(sorted(blockedreqs))
93 % _(b', ').join(sorted(blockedreqs))
740 )
94 )
741
95
742 # FUTURE there is potentially a need to control the wanted requirements via
96 # FUTURE there is potentially a need to control the wanted requirements via
743 # command arguments or via an extension hook point.
97 # command arguments or via an extension hook point.
744 newreqs = localrepo.newreporequirements(
98 newreqs = localrepo.newreporequirements(
745 repo.ui, localrepo.defaultcreateopts(repo.ui)
99 repo.ui, localrepo.defaultcreateopts(repo.ui)
746 )
100 )
747 newreqs.update(preservedrequirements(repo))
101 newreqs.update(upgrade_actions.preservedrequirements(repo))
748
102
749 noremovereqs = (
103 noremovereqs = (
750 repo.requirements - newreqs - supportremovedrequirements(repo)
104 repo.requirements
105 - newreqs
106 - upgrade_actions.supportremovedrequirements(repo)
751 )
107 )
752 if noremovereqs:
108 if noremovereqs:
753 raise error.Abort(
109 raise error.Abort(
754 _(
110 _(
755 b'cannot upgrade repository; requirement would be '
111 b'cannot upgrade repository; requirement would be '
756 b'removed: %s'
112 b'removed: %s'
757 )
113 )
758 % _(b', ').join(sorted(noremovereqs))
114 % _(b', ').join(sorted(noremovereqs))
759 )
115 )
760
116
761 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
117 noaddreqs = (
118 newreqs
119 - repo.requirements
120 - upgrade_actions.allowednewrequirements(repo)
121 )
762 if noaddreqs:
122 if noaddreqs:
763 raise error.Abort(
123 raise error.Abort(
764 _(
124 _(
765 b'cannot upgrade repository; do not support adding '
125 b'cannot upgrade repository; do not support adding '
766 b'requirement: %s'
126 b'requirement: %s'
767 )
127 )
768 % _(b', ').join(sorted(noaddreqs))
128 % _(b', ').join(sorted(noaddreqs))
769 )
129 )
770
130
771 unsupportedreqs = newreqs - supporteddestrequirements(repo)
131 unsupportedreqs = newreqs - upgrade_actions.supporteddestrequirements(repo)
772 if unsupportedreqs:
132 if unsupportedreqs:
773 raise error.Abort(
133 raise error.Abort(
774 _(
134 _(
775 b'cannot upgrade repository; do not support '
135 b'cannot upgrade repository; do not support '
776 b'destination requirement: %s'
136 b'destination requirement: %s'
777 )
137 )
778 % _(b', ').join(sorted(unsupportedreqs))
138 % _(b', ').join(sorted(unsupportedreqs))
779 )
139 )
780
140
781 # Find and validate all improvements that can be made.
141 # Find and validate all improvements that can be made.
782 alloptimizations = findoptimizations(repo)
142 alloptimizations = upgrade_actions.findoptimizations(repo)
783
143
784 # Apply and Validate arguments.
144 # Apply and Validate arguments.
785 optimizations = []
145 optimizations = []
786 for o in alloptimizations:
146 for o in alloptimizations:
787 if o.name in optimize:
147 if o.name in optimize:
788 optimizations.append(o)
148 optimizations.append(o)
789 optimize.discard(o.name)
149 optimize.discard(o.name)
790
150
791 if optimize: # anything left is unknown
151 if optimize: # anything left is unknown
792 raise error.Abort(
152 raise error.Abort(
793 _(b'unknown optimization action requested: %s')
153 _(b'unknown optimization action requested: %s')
794 % b', '.join(sorted(optimize)),
154 % b', '.join(sorted(optimize)),
795 hint=_(b'run without arguments to see valid optimizations'),
155 hint=_(b'run without arguments to see valid optimizations'),
796 )
156 )
797
157
798 deficiencies = finddeficiencies(repo)
158 deficiencies = upgrade_actions.finddeficiencies(repo)
799 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
159 actions = upgrade_actions.determineactions(
160 repo, deficiencies, repo.requirements, newreqs
161 )
800 actions.extend(
162 actions.extend(
801 o
163 o
802 for o in sorted(optimizations)
164 for o in sorted(optimizations)
803 # determineactions could have added optimisation
165 # determineactions could have added optimisation
804 if o not in actions
166 if o not in actions
805 )
167 )
806
168
807 removedreqs = repo.requirements - newreqs
169 removedreqs = repo.requirements - newreqs
808 addedreqs = newreqs - repo.requirements
170 addedreqs = newreqs - repo.requirements
809
171
810 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
172 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
811 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
173 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
174 removedreqs | addedreqs
175 )
812 if incompatible:
176 if incompatible:
813 msg = _(
177 msg = _(
814 b'ignoring revlogs selection flags, format requirements '
178 b'ignoring revlogs selection flags, format requirements '
815 b'change: %s\n'
179 b'change: %s\n'
816 )
180 )
817 ui.warn(msg % b', '.join(sorted(incompatible)))
181 ui.warn(msg % b', '.join(sorted(incompatible)))
818 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
182 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
819
183
820 def write_labeled(l, label):
184 def write_labeled(l, label):
821 first = True
185 first = True
822 for r in sorted(l):
186 for r in sorted(l):
823 if not first:
187 if not first:
824 ui.write(b', ')
188 ui.write(b', ')
825 ui.write(r, label=label)
189 ui.write(r, label=label)
826 first = False
190 first = False
827
191
828 def printrequirements():
192 def printrequirements():
829 ui.write(_(b'requirements\n'))
193 ui.write(_(b'requirements\n'))
830 ui.write(_(b' preserved: '))
194 ui.write(_(b' preserved: '))
831 write_labeled(
195 write_labeled(
832 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
196 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
833 )
197 )
834 ui.write((b'\n'))
198 ui.write((b'\n'))
835 removed = repo.requirements - newreqs
199 removed = repo.requirements - newreqs
836 if repo.requirements - newreqs:
200 if repo.requirements - newreqs:
837 ui.write(_(b' removed: '))
201 ui.write(_(b' removed: '))
838 write_labeled(removed, "upgrade-repo.requirement.removed")
202 write_labeled(removed, "upgrade-repo.requirement.removed")
839 ui.write((b'\n'))
203 ui.write((b'\n'))
840 added = newreqs - repo.requirements
204 added = newreqs - repo.requirements
841 if added:
205 if added:
842 ui.write(_(b' added: '))
206 ui.write(_(b' added: '))
843 write_labeled(added, "upgrade-repo.requirement.added")
207 write_labeled(added, "upgrade-repo.requirement.added")
844 ui.write((b'\n'))
208 ui.write((b'\n'))
845 ui.write(b'\n')
209 ui.write(b'\n')
846
210
847 def printoptimisations():
211 def printoptimisations():
848 optimisations = [a for a in actions if a.type == OPTIMISATION]
212 optimisations = [
213 a for a in actions if a.type == upgrade_actions.OPTIMISATION
214 ]
849 optimisations.sort(key=lambda a: a.name)
215 optimisations.sort(key=lambda a: a.name)
850 if optimisations:
216 if optimisations:
851 ui.write(_(b'optimisations: '))
217 ui.write(_(b'optimisations: '))
852 write_labeled(
218 write_labeled(
853 [a.name for a in optimisations],
219 [a.name for a in optimisations],
854 "upgrade-repo.optimisation.performed",
220 "upgrade-repo.optimisation.performed",
855 )
221 )
856 ui.write(b'\n\n')
222 ui.write(b'\n\n')
857
223
858 def printupgradeactions():
224 def printupgradeactions():
859 for a in actions:
225 for a in actions:
860 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
226 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
861
227
862 def print_affected_revlogs():
228 def print_affected_revlogs():
863 if not revlogs:
229 if not revlogs:
864 ui.write((b'no revlogs to process\n'))
230 ui.write((b'no revlogs to process\n'))
865 else:
231 else:
866 ui.write((b'processed revlogs:\n'))
232 ui.write((b'processed revlogs:\n'))
867 for r in sorted(revlogs):
233 for r in sorted(revlogs):
868 ui.write((b' - %s\n' % r))
234 ui.write((b' - %s\n' % r))
869 ui.write((b'\n'))
235 ui.write((b'\n'))
870
236
871 if not run:
237 if not run:
872 fromconfig = []
238 fromconfig = []
873 onlydefault = []
239 onlydefault = []
874
240
875 for d in deficiencies:
241 for d in deficiencies:
876 if d.fromconfig(repo):
242 if d.fromconfig(repo):
877 fromconfig.append(d)
243 fromconfig.append(d)
878 elif d.default:
244 elif d.default:
879 onlydefault.append(d)
245 onlydefault.append(d)
880
246
881 if fromconfig or onlydefault:
247 if fromconfig or onlydefault:
882
248
883 if fromconfig:
249 if fromconfig:
884 ui.status(
250 ui.status(
885 _(
251 _(
886 b'repository lacks features recommended by '
252 b'repository lacks features recommended by '
887 b'current config options:\n\n'
253 b'current config options:\n\n'
888 )
254 )
889 )
255 )
890 for i in fromconfig:
256 for i in fromconfig:
891 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
257 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
892
258
893 if onlydefault:
259 if onlydefault:
894 ui.status(
260 ui.status(
895 _(
261 _(
896 b'repository lacks features used by the default '
262 b'repository lacks features used by the default '
897 b'config options:\n\n'
263 b'config options:\n\n'
898 )
264 )
899 )
265 )
900 for i in onlydefault:
266 for i in onlydefault:
901 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
267 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
902
268
903 ui.status(b'\n')
269 ui.status(b'\n')
904 else:
270 else:
905 ui.status(
271 ui.status(
906 _(
272 _(
907 b'(no feature deficiencies found in existing '
273 b'(no feature deficiencies found in existing '
908 b'repository)\n'
274 b'repository)\n'
909 )
275 )
910 )
276 )
911
277
912 ui.status(
278 ui.status(
913 _(
279 _(
914 b'performing an upgrade with "--run" will make the following '
280 b'performing an upgrade with "--run" will make the following '
915 b'changes:\n\n'
281 b'changes:\n\n'
916 )
282 )
917 )
283 )
918
284
919 printrequirements()
285 printrequirements()
920 printoptimisations()
286 printoptimisations()
921 printupgradeactions()
287 printupgradeactions()
922 print_affected_revlogs()
288 print_affected_revlogs()
923
289
924 unusedoptimize = [i for i in alloptimizations if i not in actions]
290 unusedoptimize = [i for i in alloptimizations if i not in actions]
925
291
926 if unusedoptimize:
292 if unusedoptimize:
927 ui.status(
293 ui.status(
928 _(
294 _(
929 b'additional optimizations are available by specifying '
295 b'additional optimizations are available by specifying '
930 b'"--optimize <name>":\n\n'
296 b'"--optimize <name>":\n\n'
931 )
297 )
932 )
298 )
933 for i in unusedoptimize:
299 for i in unusedoptimize:
934 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
300 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
935 return
301 return
936
302
937 # Else we're in the run=true case.
303 # Else we're in the run=true case.
938 ui.write(_(b'upgrade will perform the following actions:\n\n'))
304 ui.write(_(b'upgrade will perform the following actions:\n\n'))
939 printrequirements()
305 printrequirements()
940 printoptimisations()
306 printoptimisations()
941 printupgradeactions()
307 printupgradeactions()
942 print_affected_revlogs()
308 print_affected_revlogs()
943
309
944 upgradeactions = [a.name for a in actions]
310 upgradeactions = [a.name for a in actions]
945
311
946 ui.status(_(b'beginning upgrade...\n'))
312 ui.status(_(b'beginning upgrade...\n'))
947 with repo.wlock(), repo.lock():
313 with repo.wlock(), repo.lock():
948 ui.status(_(b'repository locked and read-only\n'))
314 ui.status(_(b'repository locked and read-only\n'))
949 # Our strategy for upgrading the repository is to create a new,
315 # Our strategy for upgrading the repository is to create a new,
950 # temporary repository, write data to it, then do a swap of the
316 # temporary repository, write data to it, then do a swap of the
951 # data. There are less heavyweight ways to do this, but it is easier
317 # data. There are less heavyweight ways to do this, but it is easier
952 # to create a new repo object than to instantiate all the components
318 # to create a new repo object than to instantiate all the components
953 # (like the store) separately.
319 # (like the store) separately.
954 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
320 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
955 backuppath = None
321 backuppath = None
956 try:
322 try:
957 ui.status(
323 ui.status(
958 _(
324 _(
959 b'creating temporary repository to stage migrated '
325 b'creating temporary repository to stage migrated '
960 b'data: %s\n'
326 b'data: %s\n'
961 )
327 )
962 % tmppath
328 % tmppath
963 )
329 )
964
330
965 # clone ui without using ui.copy because repo.ui is protected
331 # clone ui without using ui.copy because repo.ui is protected
966 repoui = repo.ui.__class__(repo.ui)
332 repoui = repo.ui.__class__(repo.ui)
967 dstrepo = hg.repository(repoui, path=tmppath, create=True)
333 dstrepo = hg.repository(repoui, path=tmppath, create=True)
968
334
969 with dstrepo.wlock(), dstrepo.lock():
335 with dstrepo.wlock(), dstrepo.lock():
970 backuppath = upgrade_engine.upgrade(
336 backuppath = upgrade_engine.upgrade(
971 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
337 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
972 )
338 )
973 if not (backup or backuppath is None):
339 if not (backup or backuppath is None):
974 ui.status(
340 ui.status(
975 _(b'removing old repository content%s\n') % backuppath
341 _(b'removing old repository content%s\n') % backuppath
976 )
342 )
977 repo.vfs.rmtree(backuppath, forcibly=True)
343 repo.vfs.rmtree(backuppath, forcibly=True)
978 backuppath = None
344 backuppath = None
979
345
980 finally:
346 finally:
981 ui.status(_(b'removing temporary repository %s\n') % tmppath)
347 ui.status(_(b'removing temporary repository %s\n') % tmppath)
982 repo.vfs.rmtree(tmppath, forcibly=True)
348 repo.vfs.rmtree(tmppath, forcibly=True)
983
349
984 if backuppath and not ui.quiet:
350 if backuppath and not ui.quiet:
985 ui.warn(
351 ui.warn(
986 _(b'copy of old repository backed up at %s\n') % backuppath
352 _(b'copy of old repository backed up at %s\n') % backuppath
987 )
353 )
988 ui.warn(
354 ui.warn(
989 _(
355 _(
990 b'the old repository will not be deleted; remove '
356 b'the old repository will not be deleted; remove '
991 b'it to free up disk space once the upgraded '
357 b'it to free up disk space once the upgraded '
992 b'repository is verified\n'
358 b'repository is verified\n'
993 )
359 )
994 )
360 )
995
361
996 if sharedsafe.name in addedreqs:
362 if upgrade_actions.sharesafe.name in addedreqs:
997 ui.warn(
363 ui.warn(
998 _(
364 _(
999 b'repository upgraded to share safe mode, existing'
365 b'repository upgraded to share safe mode, existing'
1000 b' shares will still work in old non-safe mode. '
366 b' shares will still work in old non-safe mode. '
1001 b'Re-share existing shares to use them in safe mode'
367 b'Re-share existing shares to use them in safe mode'
1002 b' New shares will be created in safe mode.\n'
368 b' New shares will be created in safe mode.\n'
1003 )
369 )
1004 )
370 )
1005 if sharedsafe.name in removedreqs:
371 if upgrade_actions.sharesafe.name in removedreqs:
1006 ui.warn(
372 ui.warn(
1007 _(
373 _(
1008 b'repository downgraded to not use share safe mode, '
374 b'repository downgraded to not use share safe mode, '
1009 b'existing shares will not work and needs to'
375 b'existing shares will not work and needs to'
1010 b' be reshared.\n'
376 b' be reshared.\n'
1011 )
377 )
1012 )
378 )
@@ -1,1012 +1,666 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from ..i18n import _
11 from . import (
11 from .. import (
12 error,
13 hg,
14 localrepo,
12 localrepo,
15 pycompat,
16 requirements,
13 requirements,
17 util,
14 util,
18 )
15 )
19
16
20 from .upgrade_utils import (
17 from ..utils import compression
21 engine as upgrade_engine,
22 )
23
24 from .utils import compression
25
18
26 # list of requirements that request a clone of all revlog if added/removed
19 # list of requirements that request a clone of all revlog if added/removed
27 RECLONES_REQUIREMENTS = {
20 RECLONES_REQUIREMENTS = {
28 b'generaldelta',
21 b'generaldelta',
29 requirements.SPARSEREVLOG_REQUIREMENT,
22 requirements.SPARSEREVLOG_REQUIREMENT,
30 }
23 }
31
24
32
25
33 def requiredsourcerequirements(repo):
26 def requiredsourcerequirements(repo):
34 """Obtain requirements required to be present to upgrade a repo.
27 """Obtain requirements required to be present to upgrade a repo.
35
28
36 An upgrade will not be allowed if the repository doesn't have the
29 An upgrade will not be allowed if the repository doesn't have the
37 requirements returned by this function.
30 requirements returned by this function.
38 """
31 """
39 return {
32 return {
40 # Introduced in Mercurial 0.9.2.
33 # Introduced in Mercurial 0.9.2.
41 b'revlogv1',
34 b'revlogv1',
42 # Introduced in Mercurial 0.9.2.
35 # Introduced in Mercurial 0.9.2.
43 b'store',
36 b'store',
44 }
37 }
45
38
46
39
47 def blocksourcerequirements(repo):
40 def blocksourcerequirements(repo):
48 """Obtain requirements that will prevent an upgrade from occurring.
41 """Obtain requirements that will prevent an upgrade from occurring.
49
42
50 An upgrade cannot be performed if the source repository contains a
43 An upgrade cannot be performed if the source repository contains a
51 requirements in the returned set.
44 requirements in the returned set.
52 """
45 """
53 return {
46 return {
54 # The upgrade code does not yet support these experimental features.
47 # The upgrade code does not yet support these experimental features.
55 # This is an artificial limitation.
48 # This is an artificial limitation.
56 requirements.TREEMANIFEST_REQUIREMENT,
49 requirements.TREEMANIFEST_REQUIREMENT,
57 # This was a precursor to generaldelta and was never enabled by default.
50 # This was a precursor to generaldelta and was never enabled by default.
58 # It should (hopefully) not exist in the wild.
51 # It should (hopefully) not exist in the wild.
59 b'parentdelta',
52 b'parentdelta',
60 # Upgrade should operate on the actual store, not the shared link.
53 # Upgrade should operate on the actual store, not the shared link.
61 requirements.SHARED_REQUIREMENT,
54 requirements.SHARED_REQUIREMENT,
62 }
55 }
63
56
64
57
65 def supportremovedrequirements(repo):
58 def supportremovedrequirements(repo):
66 """Obtain requirements that can be removed during an upgrade.
59 """Obtain requirements that can be removed during an upgrade.
67
60
68 If an upgrade were to create a repository that dropped a requirement,
61 If an upgrade were to create a repository that dropped a requirement,
69 the dropped requirement must appear in the returned set for the upgrade
62 the dropped requirement must appear in the returned set for the upgrade
70 to be allowed.
63 to be allowed.
71 """
64 """
72 supported = {
65 supported = {
73 requirements.SPARSEREVLOG_REQUIREMENT,
66 requirements.SPARSEREVLOG_REQUIREMENT,
74 requirements.SIDEDATA_REQUIREMENT,
67 requirements.SIDEDATA_REQUIREMENT,
75 requirements.COPIESSDC_REQUIREMENT,
68 requirements.COPIESSDC_REQUIREMENT,
76 requirements.NODEMAP_REQUIREMENT,
69 requirements.NODEMAP_REQUIREMENT,
77 requirements.SHARESAFE_REQUIREMENT,
70 requirements.SHARESAFE_REQUIREMENT,
78 }
71 }
79 for name in compression.compengines:
72 for name in compression.compengines:
80 engine = compression.compengines[name]
73 engine = compression.compengines[name]
81 if engine.available() and engine.revlogheader():
74 if engine.available() and engine.revlogheader():
82 supported.add(b'exp-compression-%s' % name)
75 supported.add(b'exp-compression-%s' % name)
83 if engine.name() == b'zstd':
76 if engine.name() == b'zstd':
84 supported.add(b'revlog-compression-zstd')
77 supported.add(b'revlog-compression-zstd')
85 return supported
78 return supported
86
79
87
80
88 def supporteddestrequirements(repo):
81 def supporteddestrequirements(repo):
89 """Obtain requirements that upgrade supports in the destination.
82 """Obtain requirements that upgrade supports in the destination.
90
83
91 If the result of the upgrade would create requirements not in this set,
84 If the result of the upgrade would create requirements not in this set,
92 the upgrade is disallowed.
85 the upgrade is disallowed.
93
86
94 Extensions should monkeypatch this to add their custom requirements.
87 Extensions should monkeypatch this to add their custom requirements.
95 """
88 """
96 supported = {
89 supported = {
97 b'dotencode',
90 b'dotencode',
98 b'fncache',
91 b'fncache',
99 b'generaldelta',
92 b'generaldelta',
100 b'revlogv1',
93 b'revlogv1',
101 b'store',
94 b'store',
102 requirements.SPARSEREVLOG_REQUIREMENT,
95 requirements.SPARSEREVLOG_REQUIREMENT,
103 requirements.SIDEDATA_REQUIREMENT,
96 requirements.SIDEDATA_REQUIREMENT,
104 requirements.COPIESSDC_REQUIREMENT,
97 requirements.COPIESSDC_REQUIREMENT,
105 requirements.NODEMAP_REQUIREMENT,
98 requirements.NODEMAP_REQUIREMENT,
106 requirements.SHARESAFE_REQUIREMENT,
99 requirements.SHARESAFE_REQUIREMENT,
107 }
100 }
108 for name in compression.compengines:
101 for name in compression.compengines:
109 engine = compression.compengines[name]
102 engine = compression.compengines[name]
110 if engine.available() and engine.revlogheader():
103 if engine.available() and engine.revlogheader():
111 supported.add(b'exp-compression-%s' % name)
104 supported.add(b'exp-compression-%s' % name)
112 if engine.name() == b'zstd':
105 if engine.name() == b'zstd':
113 supported.add(b'revlog-compression-zstd')
106 supported.add(b'revlog-compression-zstd')
114 return supported
107 return supported
115
108
116
109
117 def allowednewrequirements(repo):
110 def allowednewrequirements(repo):
118 """Obtain requirements that can be added to a repository during upgrade.
111 """Obtain requirements that can be added to a repository during upgrade.
119
112
120 This is used to disallow proposed requirements from being added when
113 This is used to disallow proposed requirements from being added when
121 they weren't present before.
114 they weren't present before.
122
115
123 We use a list of allowed requirement additions instead of a list of known
116 We use a list of allowed requirement additions instead of a list of known
124 bad additions because the whitelist approach is safer and will prevent
117 bad additions because the whitelist approach is safer and will prevent
125 future, unknown requirements from accidentally being added.
118 future, unknown requirements from accidentally being added.
126 """
119 """
127 supported = {
120 supported = {
128 b'dotencode',
121 b'dotencode',
129 b'fncache',
122 b'fncache',
130 b'generaldelta',
123 b'generaldelta',
131 requirements.SPARSEREVLOG_REQUIREMENT,
124 requirements.SPARSEREVLOG_REQUIREMENT,
132 requirements.SIDEDATA_REQUIREMENT,
125 requirements.SIDEDATA_REQUIREMENT,
133 requirements.COPIESSDC_REQUIREMENT,
126 requirements.COPIESSDC_REQUIREMENT,
134 requirements.NODEMAP_REQUIREMENT,
127 requirements.NODEMAP_REQUIREMENT,
135 requirements.SHARESAFE_REQUIREMENT,
128 requirements.SHARESAFE_REQUIREMENT,
136 }
129 }
137 for name in compression.compengines:
130 for name in compression.compengines:
138 engine = compression.compengines[name]
131 engine = compression.compengines[name]
139 if engine.available() and engine.revlogheader():
132 if engine.available() and engine.revlogheader():
140 supported.add(b'exp-compression-%s' % name)
133 supported.add(b'exp-compression-%s' % name)
141 if engine.name() == b'zstd':
134 if engine.name() == b'zstd':
142 supported.add(b'revlog-compression-zstd')
135 supported.add(b'revlog-compression-zstd')
143 return supported
136 return supported
144
137
145
138
146 def preservedrequirements(repo):
139 def preservedrequirements(repo):
147 return set()
140 return set()
148
141
149
142
150 DEFICIENCY = b'deficiency'
143 DEFICIENCY = b'deficiency'
151 OPTIMISATION = b'optimization'
144 OPTIMISATION = b'optimization'
152
145
153
146
154 class improvement(object):
147 class improvement(object):
155 """Represents an improvement that can be made as part of an upgrade.
148 """Represents an improvement that can be made as part of an upgrade.
156
149
157 The following attributes are defined on each instance:
150 The following attributes are defined on each instance:
158
151
159 name
152 name
160 Machine-readable string uniquely identifying this improvement. It
153 Machine-readable string uniquely identifying this improvement. It
161 will be mapped to an action later in the upgrade process.
154 will be mapped to an action later in the upgrade process.
162
155
163 type
156 type
164 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
157 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
165 problem. An optimization is an action (sometimes optional) that
158 problem. An optimization is an action (sometimes optional) that
166 can be taken to further improve the state of the repository.
159 can be taken to further improve the state of the repository.
167
160
168 description
161 description
169 Message intended for humans explaining the improvement in more detail,
162 Message intended for humans explaining the improvement in more detail,
170 including the implications of it. For ``DEFICIENCY`` types, should be
163 including the implications of it. For ``DEFICIENCY`` types, should be
171 worded in the present tense. For ``OPTIMISATION`` types, should be
164 worded in the present tense. For ``OPTIMISATION`` types, should be
172 worded in the future tense.
165 worded in the future tense.
173
166
174 upgrademessage
167 upgrademessage
175 Message intended for humans explaining what an upgrade addressing this
168 Message intended for humans explaining what an upgrade addressing this
176 issue will do. Should be worded in the future tense.
169 issue will do. Should be worded in the future tense.
177 """
170 """
178
171
179 def __init__(self, name, type, description, upgrademessage):
172 def __init__(self, name, type, description, upgrademessage):
180 self.name = name
173 self.name = name
181 self.type = type
174 self.type = type
182 self.description = description
175 self.description = description
183 self.upgrademessage = upgrademessage
176 self.upgrademessage = upgrademessage
184
177
185 def __eq__(self, other):
178 def __eq__(self, other):
186 if not isinstance(other, improvement):
179 if not isinstance(other, improvement):
187 # This is what python tell use to do
180 # This is what python tell use to do
188 return NotImplemented
181 return NotImplemented
189 return self.name == other.name
182 return self.name == other.name
190
183
191 def __ne__(self, other):
184 def __ne__(self, other):
192 return not (self == other)
185 return not (self == other)
193
186
194 def __hash__(self):
187 def __hash__(self):
195 return hash(self.name)
188 return hash(self.name)
196
189
197
190
198 allformatvariant = []
191 allformatvariant = []
199
192
200
193
201 def registerformatvariant(cls):
194 def registerformatvariant(cls):
202 allformatvariant.append(cls)
195 allformatvariant.append(cls)
203 return cls
196 return cls
204
197
205
198
206 class formatvariant(improvement):
199 class formatvariant(improvement):
207 """an improvement subclass dedicated to repository format"""
200 """an improvement subclass dedicated to repository format"""
208
201
209 type = DEFICIENCY
202 type = DEFICIENCY
210 ### The following attributes should be defined for each class:
203 ### The following attributes should be defined for each class:
211
204
212 # machine-readable string uniquely identifying this improvement. it will be
205 # machine-readable string uniquely identifying this improvement. it will be
213 # mapped to an action later in the upgrade process.
206 # mapped to an action later in the upgrade process.
214 name = None
207 name = None
215
208
216 # message intended for humans explaining the improvement in more detail,
209 # message intended for humans explaining the improvement in more detail,
217 # including the implications of it ``DEFICIENCY`` types, should be worded
210 # including the implications of it ``DEFICIENCY`` types, should be worded
218 # in the present tense.
211 # in the present tense.
219 description = None
212 description = None
220
213
221 # message intended for humans explaining what an upgrade addressing this
214 # message intended for humans explaining what an upgrade addressing this
222 # issue will do. should be worded in the future tense.
215 # issue will do. should be worded in the future tense.
223 upgrademessage = None
216 upgrademessage = None
224
217
225 # value of current Mercurial default for new repository
218 # value of current Mercurial default for new repository
226 default = None
219 default = None
227
220
228 def __init__(self):
221 def __init__(self):
229 raise NotImplementedError()
222 raise NotImplementedError()
230
223
231 @staticmethod
224 @staticmethod
232 def fromrepo(repo):
225 def fromrepo(repo):
233 """current value of the variant in the repository"""
226 """current value of the variant in the repository"""
234 raise NotImplementedError()
227 raise NotImplementedError()
235
228
236 @staticmethod
229 @staticmethod
237 def fromconfig(repo):
230 def fromconfig(repo):
238 """current value of the variant in the configuration"""
231 """current value of the variant in the configuration"""
239 raise NotImplementedError()
232 raise NotImplementedError()
240
233
241
234
242 class requirementformatvariant(formatvariant):
235 class requirementformatvariant(formatvariant):
243 """formatvariant based on a 'requirement' name.
236 """formatvariant based on a 'requirement' name.
244
237
245 Many format variant are controlled by a 'requirement'. We define a small
238 Many format variant are controlled by a 'requirement'. We define a small
246 subclass to factor the code.
239 subclass to factor the code.
247 """
240 """
248
241
249 # the requirement that control this format variant
242 # the requirement that control this format variant
250 _requirement = None
243 _requirement = None
251
244
252 @staticmethod
245 @staticmethod
253 def _newreporequirements(ui):
246 def _newreporequirements(ui):
254 return localrepo.newreporequirements(
247 return localrepo.newreporequirements(
255 ui, localrepo.defaultcreateopts(ui)
248 ui, localrepo.defaultcreateopts(ui)
256 )
249 )
257
250
258 @classmethod
251 @classmethod
259 def fromrepo(cls, repo):
252 def fromrepo(cls, repo):
260 assert cls._requirement is not None
253 assert cls._requirement is not None
261 return cls._requirement in repo.requirements
254 return cls._requirement in repo.requirements
262
255
263 @classmethod
256 @classmethod
264 def fromconfig(cls, repo):
257 def fromconfig(cls, repo):
265 assert cls._requirement is not None
258 assert cls._requirement is not None
266 return cls._requirement in cls._newreporequirements(repo.ui)
259 return cls._requirement in cls._newreporequirements(repo.ui)
267
260
268
261
269 @registerformatvariant
262 @registerformatvariant
270 class fncache(requirementformatvariant):
263 class fncache(requirementformatvariant):
271 name = b'fncache'
264 name = b'fncache'
272
265
273 _requirement = b'fncache'
266 _requirement = b'fncache'
274
267
275 default = True
268 default = True
276
269
277 description = _(
270 description = _(
278 b'long and reserved filenames may not work correctly; '
271 b'long and reserved filenames may not work correctly; '
279 b'repository performance is sub-optimal'
272 b'repository performance is sub-optimal'
280 )
273 )
281
274
282 upgrademessage = _(
275 upgrademessage = _(
283 b'repository will be more resilient to storing '
276 b'repository will be more resilient to storing '
284 b'certain paths and performance of certain '
277 b'certain paths and performance of certain '
285 b'operations should be improved'
278 b'operations should be improved'
286 )
279 )
287
280
288
281
289 @registerformatvariant
282 @registerformatvariant
290 class dotencode(requirementformatvariant):
283 class dotencode(requirementformatvariant):
291 name = b'dotencode'
284 name = b'dotencode'
292
285
293 _requirement = b'dotencode'
286 _requirement = b'dotencode'
294
287
295 default = True
288 default = True
296
289
297 description = _(
290 description = _(
298 b'storage of filenames beginning with a period or '
291 b'storage of filenames beginning with a period or '
299 b'space may not work correctly'
292 b'space may not work correctly'
300 )
293 )
301
294
302 upgrademessage = _(
295 upgrademessage = _(
303 b'repository will be better able to store files '
296 b'repository will be better able to store files '
304 b'beginning with a space or period'
297 b'beginning with a space or period'
305 )
298 )
306
299
307
300
308 @registerformatvariant
301 @registerformatvariant
309 class generaldelta(requirementformatvariant):
302 class generaldelta(requirementformatvariant):
310 name = b'generaldelta'
303 name = b'generaldelta'
311
304
312 _requirement = b'generaldelta'
305 _requirement = b'generaldelta'
313
306
314 default = True
307 default = True
315
308
316 description = _(
309 description = _(
317 b'deltas within internal storage are unable to '
310 b'deltas within internal storage are unable to '
318 b'choose optimal revisions; repository is larger and '
311 b'choose optimal revisions; repository is larger and '
319 b'slower than it could be; interaction with other '
312 b'slower than it could be; interaction with other '
320 b'repositories may require extra network and CPU '
313 b'repositories may require extra network and CPU '
321 b'resources, making "hg push" and "hg pull" slower'
314 b'resources, making "hg push" and "hg pull" slower'
322 )
315 )
323
316
324 upgrademessage = _(
317 upgrademessage = _(
325 b'repository storage will be able to create '
318 b'repository storage will be able to create '
326 b'optimal deltas; new repository data will be '
319 b'optimal deltas; new repository data will be '
327 b'smaller and read times should decrease; '
320 b'smaller and read times should decrease; '
328 b'interacting with other repositories using this '
321 b'interacting with other repositories using this '
329 b'storage model should require less network and '
322 b'storage model should require less network and '
330 b'CPU resources, making "hg push" and "hg pull" '
323 b'CPU resources, making "hg push" and "hg pull" '
331 b'faster'
324 b'faster'
332 )
325 )
333
326
334
327
335 @registerformatvariant
328 @registerformatvariant
336 class sharedsafe(requirementformatvariant):
329 class sharesafe(requirementformatvariant):
337 name = b'exp-sharesafe'
330 name = b'exp-sharesafe'
338 _requirement = requirements.SHARESAFE_REQUIREMENT
331 _requirement = requirements.SHARESAFE_REQUIREMENT
339
332
340 default = False
333 default = False
341
334
342 description = _(
335 description = _(
343 b'old shared repositories do not share source repository '
336 b'old shared repositories do not share source repository '
344 b'requirements and config. This leads to various problems '
337 b'requirements and config. This leads to various problems '
345 b'when the source repository format is upgraded or some new '
338 b'when the source repository format is upgraded or some new '
346 b'extensions are enabled.'
339 b'extensions are enabled.'
347 )
340 )
348
341
349 upgrademessage = _(
342 upgrademessage = _(
350 b'Upgrades a repository to share-safe format so that future '
343 b'Upgrades a repository to share-safe format so that future '
351 b'shares of this repository share its requirements and configs.'
344 b'shares of this repository share its requirements and configs.'
352 )
345 )
353
346
354
347
355 @registerformatvariant
348 @registerformatvariant
356 class sparserevlog(requirementformatvariant):
349 class sparserevlog(requirementformatvariant):
357 name = b'sparserevlog'
350 name = b'sparserevlog'
358
351
359 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
352 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
360
353
361 default = True
354 default = True
362
355
363 description = _(
356 description = _(
364 b'in order to limit disk reading and memory usage on older '
357 b'in order to limit disk reading and memory usage on older '
365 b'version, the span of a delta chain from its root to its '
358 b'version, the span of a delta chain from its root to its '
366 b'end is limited, whatever the relevant data in this span. '
359 b'end is limited, whatever the relevant data in this span. '
367 b'This can severly limit Mercurial ability to build good '
360 b'This can severly limit Mercurial ability to build good '
368 b'chain of delta resulting is much more storage space being '
361 b'chain of delta resulting is much more storage space being '
369 b'taken and limit reusability of on disk delta during '
362 b'taken and limit reusability of on disk delta during '
370 b'exchange.'
363 b'exchange.'
371 )
364 )
372
365
373 upgrademessage = _(
366 upgrademessage = _(
374 b'Revlog supports delta chain with more unused data '
367 b'Revlog supports delta chain with more unused data '
375 b'between payload. These gaps will be skipped at read '
368 b'between payload. These gaps will be skipped at read '
376 b'time. This allows for better delta chains, making a '
369 b'time. This allows for better delta chains, making a '
377 b'better compression and faster exchange with server.'
370 b'better compression and faster exchange with server.'
378 )
371 )
379
372
380
373
381 @registerformatvariant
374 @registerformatvariant
382 class sidedata(requirementformatvariant):
375 class sidedata(requirementformatvariant):
383 name = b'sidedata'
376 name = b'sidedata'
384
377
385 _requirement = requirements.SIDEDATA_REQUIREMENT
378 _requirement = requirements.SIDEDATA_REQUIREMENT
386
379
387 default = False
380 default = False
388
381
389 description = _(
382 description = _(
390 b'Allows storage of extra data alongside a revision, '
383 b'Allows storage of extra data alongside a revision, '
391 b'unlocking various caching options.'
384 b'unlocking various caching options.'
392 )
385 )
393
386
394 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
387 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
395
388
396
389
397 @registerformatvariant
390 @registerformatvariant
398 class persistentnodemap(requirementformatvariant):
391 class persistentnodemap(requirementformatvariant):
399 name = b'persistent-nodemap'
392 name = b'persistent-nodemap'
400
393
401 _requirement = requirements.NODEMAP_REQUIREMENT
394 _requirement = requirements.NODEMAP_REQUIREMENT
402
395
403 default = False
396 default = False
404
397
405 description = _(
398 description = _(
406 b'persist the node -> rev mapping on disk to speedup lookup'
399 b'persist the node -> rev mapping on disk to speedup lookup'
407 )
400 )
408
401
409 upgrademessage = _(b'Speedup revision lookup by node id.')
402 upgrademessage = _(b'Speedup revision lookup by node id.')
410
403
411
404
412 @registerformatvariant
405 @registerformatvariant
413 class copiessdc(requirementformatvariant):
406 class copiessdc(requirementformatvariant):
414 name = b'copies-sdc'
407 name = b'copies-sdc'
415
408
416 _requirement = requirements.COPIESSDC_REQUIREMENT
409 _requirement = requirements.COPIESSDC_REQUIREMENT
417
410
418 default = False
411 default = False
419
412
420 description = _(b'Stores copies information alongside changesets.')
413 description = _(b'Stores copies information alongside changesets.')
421
414
422 upgrademessage = _(
415 upgrademessage = _(
423 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
416 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
424 )
417 )
425
418
426
419
427 @registerformatvariant
420 @registerformatvariant
428 class removecldeltachain(formatvariant):
421 class removecldeltachain(formatvariant):
429 name = b'plain-cl-delta'
422 name = b'plain-cl-delta'
430
423
431 default = True
424 default = True
432
425
433 description = _(
426 description = _(
434 b'changelog storage is using deltas instead of '
427 b'changelog storage is using deltas instead of '
435 b'raw entries; changelog reading and any '
428 b'raw entries; changelog reading and any '
436 b'operation relying on changelog data are slower '
429 b'operation relying on changelog data are slower '
437 b'than they could be'
430 b'than they could be'
438 )
431 )
439
432
440 upgrademessage = _(
433 upgrademessage = _(
441 b'changelog storage will be reformated to '
434 b'changelog storage will be reformated to '
442 b'store raw entries; changelog reading will be '
435 b'store raw entries; changelog reading will be '
443 b'faster; changelog size may be reduced'
436 b'faster; changelog size may be reduced'
444 )
437 )
445
438
446 @staticmethod
439 @staticmethod
447 def fromrepo(repo):
440 def fromrepo(repo):
448 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
441 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
449 # changelogs with deltas.
442 # changelogs with deltas.
450 cl = repo.changelog
443 cl = repo.changelog
451 chainbase = cl.chainbase
444 chainbase = cl.chainbase
452 return all(rev == chainbase(rev) for rev in cl)
445 return all(rev == chainbase(rev) for rev in cl)
453
446
454 @staticmethod
447 @staticmethod
455 def fromconfig(repo):
448 def fromconfig(repo):
456 return True
449 return True
457
450
458
451
459 @registerformatvariant
452 @registerformatvariant
460 class compressionengine(formatvariant):
453 class compressionengine(formatvariant):
461 name = b'compression'
454 name = b'compression'
462 default = b'zlib'
455 default = b'zlib'
463
456
464 description = _(
457 description = _(
465 b'Compresion algorithm used to compress data. '
458 b'Compresion algorithm used to compress data. '
466 b'Some engine are faster than other'
459 b'Some engine are faster than other'
467 )
460 )
468
461
469 upgrademessage = _(
462 upgrademessage = _(
470 b'revlog content will be recompressed with the new algorithm.'
463 b'revlog content will be recompressed with the new algorithm.'
471 )
464 )
472
465
473 @classmethod
466 @classmethod
474 def fromrepo(cls, repo):
467 def fromrepo(cls, repo):
475 # we allow multiple compression engine requirement to co-exist because
468 # we allow multiple compression engine requirement to co-exist because
476 # strickly speaking, revlog seems to support mixed compression style.
469 # strickly speaking, revlog seems to support mixed compression style.
477 #
470 #
478 # The compression used for new entries will be "the last one"
471 # The compression used for new entries will be "the last one"
479 compression = b'zlib'
472 compression = b'zlib'
480 for req in repo.requirements:
473 for req in repo.requirements:
481 prefix = req.startswith
474 prefix = req.startswith
482 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
475 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
483 compression = req.split(b'-', 2)[2]
476 compression = req.split(b'-', 2)[2]
484 return compression
477 return compression
485
478
486 @classmethod
479 @classmethod
487 def fromconfig(cls, repo):
480 def fromconfig(cls, repo):
488 compengines = repo.ui.configlist(b'format', b'revlog-compression')
481 compengines = repo.ui.configlist(b'format', b'revlog-compression')
489 # return the first valid value as the selection code would do
482 # return the first valid value as the selection code would do
490 for comp in compengines:
483 for comp in compengines:
491 if comp in util.compengines:
484 if comp in util.compengines:
492 return comp
485 return comp
493
486
494 # no valide compression found lets display it all for clarity
487 # no valide compression found lets display it all for clarity
495 return b','.join(compengines)
488 return b','.join(compengines)
496
489
497
490
498 @registerformatvariant
491 @registerformatvariant
499 class compressionlevel(formatvariant):
492 class compressionlevel(formatvariant):
500 name = b'compression-level'
493 name = b'compression-level'
501 default = b'default'
494 default = b'default'
502
495
503 description = _(b'compression level')
496 description = _(b'compression level')
504
497
505 upgrademessage = _(b'revlog content will be recompressed')
498 upgrademessage = _(b'revlog content will be recompressed')
506
499
507 @classmethod
500 @classmethod
508 def fromrepo(cls, repo):
501 def fromrepo(cls, repo):
509 comp = compressionengine.fromrepo(repo)
502 comp = compressionengine.fromrepo(repo)
510 level = None
503 level = None
511 if comp == b'zlib':
504 if comp == b'zlib':
512 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
505 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
513 elif comp == b'zstd':
506 elif comp == b'zstd':
514 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
507 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
515 if level is None:
508 if level is None:
516 return b'default'
509 return b'default'
517 return bytes(level)
510 return bytes(level)
518
511
519 @classmethod
512 @classmethod
520 def fromconfig(cls, repo):
513 def fromconfig(cls, repo):
521 comp = compressionengine.fromconfig(repo)
514 comp = compressionengine.fromconfig(repo)
522 level = None
515 level = None
523 if comp == b'zlib':
516 if comp == b'zlib':
524 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
517 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
525 elif comp == b'zstd':
518 elif comp == b'zstd':
526 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
519 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
527 if level is None:
520 if level is None:
528 return b'default'
521 return b'default'
529 return bytes(level)
522 return bytes(level)
530
523
531
524
532 def finddeficiencies(repo):
525 def finddeficiencies(repo):
533 """returns a list of deficiencies that the repo suffer from"""
526 """returns a list of deficiencies that the repo suffer from"""
534 deficiencies = []
527 deficiencies = []
535
528
536 # We could detect lack of revlogv1 and store here, but they were added
529 # We could detect lack of revlogv1 and store here, but they were added
537 # in 0.9.2 and we don't support upgrading repos without these
530 # in 0.9.2 and we don't support upgrading repos without these
538 # requirements, so let's not bother.
531 # requirements, so let's not bother.
539
532
540 for fv in allformatvariant:
533 for fv in allformatvariant:
541 if not fv.fromrepo(repo):
534 if not fv.fromrepo(repo):
542 deficiencies.append(fv)
535 deficiencies.append(fv)
543
536
544 return deficiencies
537 return deficiencies
545
538
546
539
547 # search without '-' to support older form on newer client.
548 #
549 # We don't enforce backward compatibility for debug command so this
550 # might eventually be dropped. However, having to use two different
551 # forms in script when comparing result is anoying enough to add
552 # backward compatibility for a while.
553 legacy_opts_map = {
554 b'redeltaparent': b're-delta-parent',
555 b'redeltamultibase': b're-delta-multibase',
556 b'redeltaall': b're-delta-all',
557 b'redeltafulladd': b're-delta-fulladd',
558 }
559
560 ALL_OPTIMISATIONS = []
540 ALL_OPTIMISATIONS = []
561
541
562
542
563 def register_optimization(obj):
543 def register_optimization(obj):
564 ALL_OPTIMISATIONS.append(obj)
544 ALL_OPTIMISATIONS.append(obj)
565 return obj
545 return obj
566
546
567
547
568 register_optimization(
548 register_optimization(
569 improvement(
549 improvement(
570 name=b're-delta-parent',
550 name=b're-delta-parent',
571 type=OPTIMISATION,
551 type=OPTIMISATION,
572 description=_(
552 description=_(
573 b'deltas within internal storage will be recalculated to '
553 b'deltas within internal storage will be recalculated to '
574 b'choose an optimal base revision where this was not '
554 b'choose an optimal base revision where this was not '
575 b'already done; the size of the repository may shrink and '
555 b'already done; the size of the repository may shrink and '
576 b'various operations may become faster; the first time '
556 b'various operations may become faster; the first time '
577 b'this optimization is performed could slow down upgrade '
557 b'this optimization is performed could slow down upgrade '
578 b'execution considerably; subsequent invocations should '
558 b'execution considerably; subsequent invocations should '
579 b'not run noticeably slower'
559 b'not run noticeably slower'
580 ),
560 ),
581 upgrademessage=_(
561 upgrademessage=_(
582 b'deltas within internal storage will choose a new '
562 b'deltas within internal storage will choose a new '
583 b'base revision if needed'
563 b'base revision if needed'
584 ),
564 ),
585 )
565 )
586 )
566 )
587
567
588 register_optimization(
568 register_optimization(
589 improvement(
569 improvement(
590 name=b're-delta-multibase',
570 name=b're-delta-multibase',
591 type=OPTIMISATION,
571 type=OPTIMISATION,
592 description=_(
572 description=_(
593 b'deltas within internal storage will be recalculated '
573 b'deltas within internal storage will be recalculated '
594 b'against multiple base revision and the smallest '
574 b'against multiple base revision and the smallest '
595 b'difference will be used; the size of the repository may '
575 b'difference will be used; the size of the repository may '
596 b'shrink significantly when there are many merges; this '
576 b'shrink significantly when there are many merges; this '
597 b'optimization will slow down execution in proportion to '
577 b'optimization will slow down execution in proportion to '
598 b'the number of merges in the repository and the amount '
578 b'the number of merges in the repository and the amount '
599 b'of files in the repository; this slow down should not '
579 b'of files in the repository; this slow down should not '
600 b'be significant unless there are tens of thousands of '
580 b'be significant unless there are tens of thousands of '
601 b'files and thousands of merges'
581 b'files and thousands of merges'
602 ),
582 ),
603 upgrademessage=_(
583 upgrademessage=_(
604 b'deltas within internal storage will choose an '
584 b'deltas within internal storage will choose an '
605 b'optimal delta by computing deltas against multiple '
585 b'optimal delta by computing deltas against multiple '
606 b'parents; may slow down execution time '
586 b'parents; may slow down execution time '
607 b'significantly'
587 b'significantly'
608 ),
588 ),
609 )
589 )
610 )
590 )
611
591
612 register_optimization(
592 register_optimization(
613 improvement(
593 improvement(
614 name=b're-delta-all',
594 name=b're-delta-all',
615 type=OPTIMISATION,
595 type=OPTIMISATION,
616 description=_(
596 description=_(
617 b'deltas within internal storage will always be '
597 b'deltas within internal storage will always be '
618 b'recalculated without reusing prior deltas; this will '
598 b'recalculated without reusing prior deltas; this will '
619 b'likely make execution run several times slower; this '
599 b'likely make execution run several times slower; this '
620 b'optimization is typically not needed'
600 b'optimization is typically not needed'
621 ),
601 ),
622 upgrademessage=_(
602 upgrademessage=_(
623 b'deltas within internal storage will be fully '
603 b'deltas within internal storage will be fully '
624 b'recomputed; this will likely drastically slow down '
604 b'recomputed; this will likely drastically slow down '
625 b'execution time'
605 b'execution time'
626 ),
606 ),
627 )
607 )
628 )
608 )
629
609
630 register_optimization(
610 register_optimization(
631 improvement(
611 improvement(
632 name=b're-delta-fulladd',
612 name=b're-delta-fulladd',
633 type=OPTIMISATION,
613 type=OPTIMISATION,
634 description=_(
614 description=_(
635 b'every revision will be re-added as if it was new '
615 b'every revision will be re-added as if it was new '
636 b'content. It will go through the full storage '
616 b'content. It will go through the full storage '
637 b'mechanism giving extensions a chance to process it '
617 b'mechanism giving extensions a chance to process it '
638 b'(eg. lfs). This is similar to "re-delta-all" but even '
618 b'(eg. lfs). This is similar to "re-delta-all" but even '
639 b'slower since more logic is involved.'
619 b'slower since more logic is involved.'
640 ),
620 ),
641 upgrademessage=_(
621 upgrademessage=_(
642 b'each revision will be added as new content to the '
622 b'each revision will be added as new content to the '
643 b'internal storage; this will likely drastically slow '
623 b'internal storage; this will likely drastically slow '
644 b'down execution time, but some extensions might need '
624 b'down execution time, but some extensions might need '
645 b'it'
625 b'it'
646 ),
626 ),
647 )
627 )
648 )
628 )
649
629
650
630
651 def findoptimizations(repo):
631 def findoptimizations(repo):
652 """Determine optimisation that could be used during upgrade"""
632 """Determine optimisation that could be used during upgrade"""
653 # These are unconditionally added. There is logic later that figures out
633 # These are unconditionally added. There is logic later that figures out
654 # which ones to apply.
634 # which ones to apply.
655 return list(ALL_OPTIMISATIONS)
635 return list(ALL_OPTIMISATIONS)
656
636
657
637
658 def determineactions(repo, deficiencies, sourcereqs, destreqs):
638 def determineactions(repo, deficiencies, sourcereqs, destreqs):
659 """Determine upgrade actions that will be performed.
639 """Determine upgrade actions that will be performed.
660
640
661 Given a list of improvements as returned by ``finddeficiencies`` and
641 Given a list of improvements as returned by ``finddeficiencies`` and
662 ``findoptimizations``, determine the list of upgrade actions that
642 ``findoptimizations``, determine the list of upgrade actions that
663 will be performed.
643 will be performed.
664
644
665 The role of this function is to filter improvements if needed, apply
645 The role of this function is to filter improvements if needed, apply
666 recommended optimizations from the improvements list that make sense,
646 recommended optimizations from the improvements list that make sense,
667 etc.
647 etc.
668
648
669 Returns a list of action names.
649 Returns a list of action names.
670 """
650 """
671 newactions = []
651 newactions = []
672
652
673 for d in deficiencies:
653 for d in deficiencies:
674 name = d._requirement
654 name = d._requirement
675
655
676 # If the action is a requirement that doesn't show up in the
656 # If the action is a requirement that doesn't show up in the
677 # destination requirements, prune the action.
657 # destination requirements, prune the action.
678 if name is not None and name not in destreqs:
658 if name is not None and name not in destreqs:
679 continue
659 continue
680
660
681 newactions.append(d)
661 newactions.append(d)
682
662
683 # FUTURE consider adding some optimizations here for certain transitions.
663 # FUTURE consider adding some optimizations here for certain transitions.
684 # e.g. adding generaldelta could schedule parent redeltas.
664 # e.g. adding generaldelta could schedule parent redeltas.
685
665
686 return newactions
666 return newactions
687
688
689 def upgraderepo(
690 ui,
691 repo,
692 run=False,
693 optimize=None,
694 backup=True,
695 manifest=None,
696 changelog=None,
697 filelogs=None,
698 ):
699 """Upgrade a repository in place."""
700 if optimize is None:
701 optimize = []
702 optimize = {legacy_opts_map.get(o, o) for o in optimize}
703 repo = repo.unfiltered()
704
705 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
706 specentries = (
707 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
708 (upgrade_engine.UPGRADE_MANIFEST, manifest),
709 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
710 )
711 specified = [(y, x) for (y, x) in specentries if x is not None]
712 if specified:
713 # we have some limitation on revlogs to be recloned
714 if any(x for y, x in specified):
715 revlogs = set()
716 for upgrade, enabled in specified:
717 if enabled:
718 revlogs.add(upgrade)
719 else:
720 # none are enabled
721 for upgrade, __ in specified:
722 revlogs.discard(upgrade)
723
724 # Ensure the repository can be upgraded.
725 missingreqs = requiredsourcerequirements(repo) - repo.requirements
726 if missingreqs:
727 raise error.Abort(
728 _(b'cannot upgrade repository; requirement missing: %s')
729 % _(b', ').join(sorted(missingreqs))
730 )
731
732 blockedreqs = blocksourcerequirements(repo) & repo.requirements
733 if blockedreqs:
734 raise error.Abort(
735 _(
736 b'cannot upgrade repository; unsupported source '
737 b'requirement: %s'
738 )
739 % _(b', ').join(sorted(blockedreqs))
740 )
741
742 # FUTURE there is potentially a need to control the wanted requirements via
743 # command arguments or via an extension hook point.
744 newreqs = localrepo.newreporequirements(
745 repo.ui, localrepo.defaultcreateopts(repo.ui)
746 )
747 newreqs.update(preservedrequirements(repo))
748
749 noremovereqs = (
750 repo.requirements - newreqs - supportremovedrequirements(repo)
751 )
752 if noremovereqs:
753 raise error.Abort(
754 _(
755 b'cannot upgrade repository; requirement would be '
756 b'removed: %s'
757 )
758 % _(b', ').join(sorted(noremovereqs))
759 )
760
761 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
762 if noaddreqs:
763 raise error.Abort(
764 _(
765 b'cannot upgrade repository; do not support adding '
766 b'requirement: %s'
767 )
768 % _(b', ').join(sorted(noaddreqs))
769 )
770
771 unsupportedreqs = newreqs - supporteddestrequirements(repo)
772 if unsupportedreqs:
773 raise error.Abort(
774 _(
775 b'cannot upgrade repository; do not support '
776 b'destination requirement: %s'
777 )
778 % _(b', ').join(sorted(unsupportedreqs))
779 )
780
781 # Find and validate all improvements that can be made.
782 alloptimizations = findoptimizations(repo)
783
784 # Apply and Validate arguments.
785 optimizations = []
786 for o in alloptimizations:
787 if o.name in optimize:
788 optimizations.append(o)
789 optimize.discard(o.name)
790
791 if optimize: # anything left is unknown
792 raise error.Abort(
793 _(b'unknown optimization action requested: %s')
794 % b', '.join(sorted(optimize)),
795 hint=_(b'run without arguments to see valid optimizations'),
796 )
797
798 deficiencies = finddeficiencies(repo)
799 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
800 actions.extend(
801 o
802 for o in sorted(optimizations)
803 # determineactions could have added optimisation
804 if o not in actions
805 )
806
807 removedreqs = repo.requirements - newreqs
808 addedreqs = newreqs - repo.requirements
809
810 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
811 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
812 if incompatible:
813 msg = _(
814 b'ignoring revlogs selection flags, format requirements '
815 b'change: %s\n'
816 )
817 ui.warn(msg % b', '.join(sorted(incompatible)))
818 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
819
820 def write_labeled(l, label):
821 first = True
822 for r in sorted(l):
823 if not first:
824 ui.write(b', ')
825 ui.write(r, label=label)
826 first = False
827
828 def printrequirements():
829 ui.write(_(b'requirements\n'))
830 ui.write(_(b' preserved: '))
831 write_labeled(
832 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
833 )
834 ui.write((b'\n'))
835 removed = repo.requirements - newreqs
836 if repo.requirements - newreqs:
837 ui.write(_(b' removed: '))
838 write_labeled(removed, "upgrade-repo.requirement.removed")
839 ui.write((b'\n'))
840 added = newreqs - repo.requirements
841 if added:
842 ui.write(_(b' added: '))
843 write_labeled(added, "upgrade-repo.requirement.added")
844 ui.write((b'\n'))
845 ui.write(b'\n')
846
847 def printoptimisations():
848 optimisations = [a for a in actions if a.type == OPTIMISATION]
849 optimisations.sort(key=lambda a: a.name)
850 if optimisations:
851 ui.write(_(b'optimisations: '))
852 write_labeled(
853 [a.name for a in optimisations],
854 "upgrade-repo.optimisation.performed",
855 )
856 ui.write(b'\n\n')
857
858 def printupgradeactions():
859 for a in actions:
860 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
861
862 def print_affected_revlogs():
863 if not revlogs:
864 ui.write((b'no revlogs to process\n'))
865 else:
866 ui.write((b'processed revlogs:\n'))
867 for r in sorted(revlogs):
868 ui.write((b' - %s\n' % r))
869 ui.write((b'\n'))
870
871 if not run:
872 fromconfig = []
873 onlydefault = []
874
875 for d in deficiencies:
876 if d.fromconfig(repo):
877 fromconfig.append(d)
878 elif d.default:
879 onlydefault.append(d)
880
881 if fromconfig or onlydefault:
882
883 if fromconfig:
884 ui.status(
885 _(
886 b'repository lacks features recommended by '
887 b'current config options:\n\n'
888 )
889 )
890 for i in fromconfig:
891 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
892
893 if onlydefault:
894 ui.status(
895 _(
896 b'repository lacks features used by the default '
897 b'config options:\n\n'
898 )
899 )
900 for i in onlydefault:
901 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
902
903 ui.status(b'\n')
904 else:
905 ui.status(
906 _(
907 b'(no feature deficiencies found in existing '
908 b'repository)\n'
909 )
910 )
911
912 ui.status(
913 _(
914 b'performing an upgrade with "--run" will make the following '
915 b'changes:\n\n'
916 )
917 )
918
919 printrequirements()
920 printoptimisations()
921 printupgradeactions()
922 print_affected_revlogs()
923
924 unusedoptimize = [i for i in alloptimizations if i not in actions]
925
926 if unusedoptimize:
927 ui.status(
928 _(
929 b'additional optimizations are available by specifying '
930 b'"--optimize <name>":\n\n'
931 )
932 )
933 for i in unusedoptimize:
934 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
935 return
936
937 # Else we're in the run=true case.
938 ui.write(_(b'upgrade will perform the following actions:\n\n'))
939 printrequirements()
940 printoptimisations()
941 printupgradeactions()
942 print_affected_revlogs()
943
944 upgradeactions = [a.name for a in actions]
945
946 ui.status(_(b'beginning upgrade...\n'))
947 with repo.wlock(), repo.lock():
948 ui.status(_(b'repository locked and read-only\n'))
949 # Our strategy for upgrading the repository is to create a new,
950 # temporary repository, write data to it, then do a swap of the
951 # data. There are less heavyweight ways to do this, but it is easier
952 # to create a new repo object than to instantiate all the components
953 # (like the store) separately.
954 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
955 backuppath = None
956 try:
957 ui.status(
958 _(
959 b'creating temporary repository to stage migrated '
960 b'data: %s\n'
961 )
962 % tmppath
963 )
964
965 # clone ui without using ui.copy because repo.ui is protected
966 repoui = repo.ui.__class__(repo.ui)
967 dstrepo = hg.repository(repoui, path=tmppath, create=True)
968
969 with dstrepo.wlock(), dstrepo.lock():
970 backuppath = upgrade_engine.upgrade(
971 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
972 )
973 if not (backup or backuppath is None):
974 ui.status(
975 _(b'removing old repository content%s\n') % backuppath
976 )
977 repo.vfs.rmtree(backuppath, forcibly=True)
978 backuppath = None
979
980 finally:
981 ui.status(_(b'removing temporary repository %s\n') % tmppath)
982 repo.vfs.rmtree(tmppath, forcibly=True)
983
984 if backuppath and not ui.quiet:
985 ui.warn(
986 _(b'copy of old repository backed up at %s\n') % backuppath
987 )
988 ui.warn(
989 _(
990 b'the old repository will not be deleted; remove '
991 b'it to free up disk space once the upgraded '
992 b'repository is verified\n'
993 )
994 )
995
996 if sharedsafe.name in addedreqs:
997 ui.warn(
998 _(
999 b'repository upgraded to share safe mode, existing'
1000 b' shares will still work in old non-safe mode. '
1001 b'Re-share existing shares to use them in safe mode'
1002 b' New shares will be created in safe mode.\n'
1003 )
1004 )
1005 if sharedsafe.name in removedreqs:
1006 ui.warn(
1007 _(
1008 b'repository downgraded to not use share safe mode, '
1009 b'existing shares will not work and needs to'
1010 b' be reshared.\n'
1011 )
1012 )
General Comments 0
You need to be logged in to leave comments. Login now