##// END OF EJS Templates
upgrade: properly filter action depending on planned work...
marmoute -
r45248:c36a3fcf stable
parent child Browse files
Show More
@@ -1,1400 +1,1398
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 copies,
16 copies,
17 error,
17 error,
18 filelog,
18 filelog,
19 hg,
19 hg,
20 localrepo,
20 localrepo,
21 manifest,
21 manifest,
22 pycompat,
22 pycompat,
23 revlog,
23 revlog,
24 scmutil,
24 scmutil,
25 util,
25 util,
26 vfs as vfsmod,
26 vfs as vfsmod,
27 )
27 )
28
28
29 from .utils import compression
29 from .utils import compression
30
30
31 # list of requirements that request a clone of all revlog if added/removed
31 # list of requirements that request a clone of all revlog if added/removed
32 RECLONES_REQUIREMENTS = {
32 RECLONES_REQUIREMENTS = {
33 b'generaldelta',
33 b'generaldelta',
34 localrepo.SPARSEREVLOG_REQUIREMENT,
34 localrepo.SPARSEREVLOG_REQUIREMENT,
35 }
35 }
36
36
37
37
38 def requiredsourcerequirements(repo):
38 def requiredsourcerequirements(repo):
39 """Obtain requirements required to be present to upgrade a repo.
39 """Obtain requirements required to be present to upgrade a repo.
40
40
41 An upgrade will not be allowed if the repository doesn't have the
41 An upgrade will not be allowed if the repository doesn't have the
42 requirements returned by this function.
42 requirements returned by this function.
43 """
43 """
44 return {
44 return {
45 # Introduced in Mercurial 0.9.2.
45 # Introduced in Mercurial 0.9.2.
46 b'revlogv1',
46 b'revlogv1',
47 # Introduced in Mercurial 0.9.2.
47 # Introduced in Mercurial 0.9.2.
48 b'store',
48 b'store',
49 }
49 }
50
50
51
51
52 def blocksourcerequirements(repo):
52 def blocksourcerequirements(repo):
53 """Obtain requirements that will prevent an upgrade from occurring.
53 """Obtain requirements that will prevent an upgrade from occurring.
54
54
55 An upgrade cannot be performed if the source repository contains a
55 An upgrade cannot be performed if the source repository contains a
56 requirements in the returned set.
56 requirements in the returned set.
57 """
57 """
58 return {
58 return {
59 # The upgrade code does not yet support these experimental features.
59 # The upgrade code does not yet support these experimental features.
60 # This is an artificial limitation.
60 # This is an artificial limitation.
61 b'treemanifest',
61 b'treemanifest',
62 # This was a precursor to generaldelta and was never enabled by default.
62 # This was a precursor to generaldelta and was never enabled by default.
63 # It should (hopefully) not exist in the wild.
63 # It should (hopefully) not exist in the wild.
64 b'parentdelta',
64 b'parentdelta',
65 # Upgrade should operate on the actual store, not the shared link.
65 # Upgrade should operate on the actual store, not the shared link.
66 b'shared',
66 b'shared',
67 }
67 }
68
68
69
69
70 def supportremovedrequirements(repo):
70 def supportremovedrequirements(repo):
71 """Obtain requirements that can be removed during an upgrade.
71 """Obtain requirements that can be removed during an upgrade.
72
72
73 If an upgrade were to create a repository that dropped a requirement,
73 If an upgrade were to create a repository that dropped a requirement,
74 the dropped requirement must appear in the returned set for the upgrade
74 the dropped requirement must appear in the returned set for the upgrade
75 to be allowed.
75 to be allowed.
76 """
76 """
77 supported = {
77 supported = {
78 localrepo.SPARSEREVLOG_REQUIREMENT,
78 localrepo.SPARSEREVLOG_REQUIREMENT,
79 localrepo.SIDEDATA_REQUIREMENT,
79 localrepo.SIDEDATA_REQUIREMENT,
80 localrepo.COPIESSDC_REQUIREMENT,
80 localrepo.COPIESSDC_REQUIREMENT,
81 }
81 }
82 for name in compression.compengines:
82 for name in compression.compengines:
83 engine = compression.compengines[name]
83 engine = compression.compengines[name]
84 if engine.available() and engine.revlogheader():
84 if engine.available() and engine.revlogheader():
85 supported.add(b'exp-compression-%s' % name)
85 supported.add(b'exp-compression-%s' % name)
86 if engine.name() == b'zstd':
86 if engine.name() == b'zstd':
87 supported.add(b'revlog-compression-zstd')
87 supported.add(b'revlog-compression-zstd')
88 return supported
88 return supported
89
89
90
90
91 def supporteddestrequirements(repo):
91 def supporteddestrequirements(repo):
92 """Obtain requirements that upgrade supports in the destination.
92 """Obtain requirements that upgrade supports in the destination.
93
93
94 If the result of the upgrade would create requirements not in this set,
94 If the result of the upgrade would create requirements not in this set,
95 the upgrade is disallowed.
95 the upgrade is disallowed.
96
96
97 Extensions should monkeypatch this to add their custom requirements.
97 Extensions should monkeypatch this to add their custom requirements.
98 """
98 """
99 supported = {
99 supported = {
100 b'dotencode',
100 b'dotencode',
101 b'fncache',
101 b'fncache',
102 b'generaldelta',
102 b'generaldelta',
103 b'revlogv1',
103 b'revlogv1',
104 b'store',
104 b'store',
105 localrepo.SPARSEREVLOG_REQUIREMENT,
105 localrepo.SPARSEREVLOG_REQUIREMENT,
106 localrepo.SIDEDATA_REQUIREMENT,
106 localrepo.SIDEDATA_REQUIREMENT,
107 localrepo.COPIESSDC_REQUIREMENT,
107 localrepo.COPIESSDC_REQUIREMENT,
108 }
108 }
109 for name in compression.compengines:
109 for name in compression.compengines:
110 engine = compression.compengines[name]
110 engine = compression.compengines[name]
111 if engine.available() and engine.revlogheader():
111 if engine.available() and engine.revlogheader():
112 supported.add(b'exp-compression-%s' % name)
112 supported.add(b'exp-compression-%s' % name)
113 if engine.name() == b'zstd':
113 if engine.name() == b'zstd':
114 supported.add(b'revlog-compression-zstd')
114 supported.add(b'revlog-compression-zstd')
115 return supported
115 return supported
116
116
117
117
118 def allowednewrequirements(repo):
118 def allowednewrequirements(repo):
119 """Obtain requirements that can be added to a repository during upgrade.
119 """Obtain requirements that can be added to a repository during upgrade.
120
120
121 This is used to disallow proposed requirements from being added when
121 This is used to disallow proposed requirements from being added when
122 they weren't present before.
122 they weren't present before.
123
123
124 We use a list of allowed requirement additions instead of a list of known
124 We use a list of allowed requirement additions instead of a list of known
125 bad additions because the whitelist approach is safer and will prevent
125 bad additions because the whitelist approach is safer and will prevent
126 future, unknown requirements from accidentally being added.
126 future, unknown requirements from accidentally being added.
127 """
127 """
128 supported = {
128 supported = {
129 b'dotencode',
129 b'dotencode',
130 b'fncache',
130 b'fncache',
131 b'generaldelta',
131 b'generaldelta',
132 localrepo.SPARSEREVLOG_REQUIREMENT,
132 localrepo.SPARSEREVLOG_REQUIREMENT,
133 localrepo.SIDEDATA_REQUIREMENT,
133 localrepo.SIDEDATA_REQUIREMENT,
134 localrepo.COPIESSDC_REQUIREMENT,
134 localrepo.COPIESSDC_REQUIREMENT,
135 }
135 }
136 for name in compression.compengines:
136 for name in compression.compengines:
137 engine = compression.compengines[name]
137 engine = compression.compengines[name]
138 if engine.available() and engine.revlogheader():
138 if engine.available() and engine.revlogheader():
139 supported.add(b'exp-compression-%s' % name)
139 supported.add(b'exp-compression-%s' % name)
140 if engine.name() == b'zstd':
140 if engine.name() == b'zstd':
141 supported.add(b'revlog-compression-zstd')
141 supported.add(b'revlog-compression-zstd')
142 return supported
142 return supported
143
143
144
144
145 def preservedrequirements(repo):
145 def preservedrequirements(repo):
146 return set()
146 return set()
147
147
148
148
149 deficiency = b'deficiency'
149 deficiency = b'deficiency'
150 optimisation = b'optimization'
150 optimisation = b'optimization'
151
151
152
152
153 class improvement(object):
153 class improvement(object):
154 """Represents an improvement that can be made as part of an upgrade.
154 """Represents an improvement that can be made as part of an upgrade.
155
155
156 The following attributes are defined on each instance:
156 The following attributes are defined on each instance:
157
157
158 name
158 name
159 Machine-readable string uniquely identifying this improvement. It
159 Machine-readable string uniquely identifying this improvement. It
160 will be mapped to an action later in the upgrade process.
160 will be mapped to an action later in the upgrade process.
161
161
162 type
162 type
163 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
163 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
164 problem. An optimization is an action (sometimes optional) that
164 problem. An optimization is an action (sometimes optional) that
165 can be taken to further improve the state of the repository.
165 can be taken to further improve the state of the repository.
166
166
167 description
167 description
168 Message intended for humans explaining the improvement in more detail,
168 Message intended for humans explaining the improvement in more detail,
169 including the implications of it. For ``deficiency`` types, should be
169 including the implications of it. For ``deficiency`` types, should be
170 worded in the present tense. For ``optimisation`` types, should be
170 worded in the present tense. For ``optimisation`` types, should be
171 worded in the future tense.
171 worded in the future tense.
172
172
173 upgrademessage
173 upgrademessage
174 Message intended for humans explaining what an upgrade addressing this
174 Message intended for humans explaining what an upgrade addressing this
175 issue will do. Should be worded in the future tense.
175 issue will do. Should be worded in the future tense.
176 """
176 """
177
177
178 def __init__(self, name, type, description, upgrademessage):
178 def __init__(self, name, type, description, upgrademessage):
179 self.name = name
179 self.name = name
180 self.type = type
180 self.type = type
181 self.description = description
181 self.description = description
182 self.upgrademessage = upgrademessage
182 self.upgrademessage = upgrademessage
183
183
184 def __eq__(self, other):
184 def __eq__(self, other):
185 if not isinstance(other, improvement):
185 if not isinstance(other, improvement):
186 # This is what python tell use to do
186 # This is what python tell use to do
187 return NotImplemented
187 return NotImplemented
188 return self.name == other.name
188 return self.name == other.name
189
189
190 def __ne__(self, other):
190 def __ne__(self, other):
191 return not (self == other)
191 return not (self == other)
192
192
193 def __hash__(self):
193 def __hash__(self):
194 return hash(self.name)
194 return hash(self.name)
195
195
196
196
197 allformatvariant = []
197 allformatvariant = []
198
198
199
199
200 def registerformatvariant(cls):
200 def registerformatvariant(cls):
201 allformatvariant.append(cls)
201 allformatvariant.append(cls)
202 return cls
202 return cls
203
203
204
204
205 class formatvariant(improvement):
205 class formatvariant(improvement):
206 """an improvement subclass dedicated to repository format"""
206 """an improvement subclass dedicated to repository format"""
207
207
208 type = deficiency
208 type = deficiency
209 ### The following attributes should be defined for each class:
209 ### The following attributes should be defined for each class:
210
210
211 # machine-readable string uniquely identifying this improvement. it will be
211 # machine-readable string uniquely identifying this improvement. it will be
212 # mapped to an action later in the upgrade process.
212 # mapped to an action later in the upgrade process.
213 name = None
213 name = None
214
214
215 # message intended for humans explaining the improvement in more detail,
215 # message intended for humans explaining the improvement in more detail,
216 # including the implications of it ``deficiency`` types, should be worded
216 # including the implications of it ``deficiency`` types, should be worded
217 # in the present tense.
217 # in the present tense.
218 description = None
218 description = None
219
219
220 # message intended for humans explaining what an upgrade addressing this
220 # message intended for humans explaining what an upgrade addressing this
221 # issue will do. should be worded in the future tense.
221 # issue will do. should be worded in the future tense.
222 upgrademessage = None
222 upgrademessage = None
223
223
224 # value of current Mercurial default for new repository
224 # value of current Mercurial default for new repository
225 default = None
225 default = None
226
226
227 def __init__(self):
227 def __init__(self):
228 raise NotImplementedError()
228 raise NotImplementedError()
229
229
230 @staticmethod
230 @staticmethod
231 def fromrepo(repo):
231 def fromrepo(repo):
232 """current value of the variant in the repository"""
232 """current value of the variant in the repository"""
233 raise NotImplementedError()
233 raise NotImplementedError()
234
234
235 @staticmethod
235 @staticmethod
236 def fromconfig(repo):
236 def fromconfig(repo):
237 """current value of the variant in the configuration"""
237 """current value of the variant in the configuration"""
238 raise NotImplementedError()
238 raise NotImplementedError()
239
239
240
240
241 class requirementformatvariant(formatvariant):
241 class requirementformatvariant(formatvariant):
242 """formatvariant based on a 'requirement' name.
242 """formatvariant based on a 'requirement' name.
243
243
244 Many format variant are controlled by a 'requirement'. We define a small
244 Many format variant are controlled by a 'requirement'. We define a small
245 subclass to factor the code.
245 subclass to factor the code.
246 """
246 """
247
247
248 # the requirement that control this format variant
248 # the requirement that control this format variant
249 _requirement = None
249 _requirement = None
250
250
251 @staticmethod
251 @staticmethod
252 def _newreporequirements(ui):
252 def _newreporequirements(ui):
253 return localrepo.newreporequirements(
253 return localrepo.newreporequirements(
254 ui, localrepo.defaultcreateopts(ui)
254 ui, localrepo.defaultcreateopts(ui)
255 )
255 )
256
256
257 @classmethod
257 @classmethod
258 def fromrepo(cls, repo):
258 def fromrepo(cls, repo):
259 assert cls._requirement is not None
259 assert cls._requirement is not None
260 return cls._requirement in repo.requirements
260 return cls._requirement in repo.requirements
261
261
262 @classmethod
262 @classmethod
263 def fromconfig(cls, repo):
263 def fromconfig(cls, repo):
264 assert cls._requirement is not None
264 assert cls._requirement is not None
265 return cls._requirement in cls._newreporequirements(repo.ui)
265 return cls._requirement in cls._newreporequirements(repo.ui)
266
266
267
267
268 @registerformatvariant
268 @registerformatvariant
269 class fncache(requirementformatvariant):
269 class fncache(requirementformatvariant):
270 name = b'fncache'
270 name = b'fncache'
271
271
272 _requirement = b'fncache'
272 _requirement = b'fncache'
273
273
274 default = True
274 default = True
275
275
276 description = _(
276 description = _(
277 b'long and reserved filenames may not work correctly; '
277 b'long and reserved filenames may not work correctly; '
278 b'repository performance is sub-optimal'
278 b'repository performance is sub-optimal'
279 )
279 )
280
280
281 upgrademessage = _(
281 upgrademessage = _(
282 b'repository will be more resilient to storing '
282 b'repository will be more resilient to storing '
283 b'certain paths and performance of certain '
283 b'certain paths and performance of certain '
284 b'operations should be improved'
284 b'operations should be improved'
285 )
285 )
286
286
287
287
288 @registerformatvariant
288 @registerformatvariant
289 class dotencode(requirementformatvariant):
289 class dotencode(requirementformatvariant):
290 name = b'dotencode'
290 name = b'dotencode'
291
291
292 _requirement = b'dotencode'
292 _requirement = b'dotencode'
293
293
294 default = True
294 default = True
295
295
296 description = _(
296 description = _(
297 b'storage of filenames beginning with a period or '
297 b'storage of filenames beginning with a period or '
298 b'space may not work correctly'
298 b'space may not work correctly'
299 )
299 )
300
300
301 upgrademessage = _(
301 upgrademessage = _(
302 b'repository will be better able to store files '
302 b'repository will be better able to store files '
303 b'beginning with a space or period'
303 b'beginning with a space or period'
304 )
304 )
305
305
306
306
307 @registerformatvariant
307 @registerformatvariant
308 class generaldelta(requirementformatvariant):
308 class generaldelta(requirementformatvariant):
309 name = b'generaldelta'
309 name = b'generaldelta'
310
310
311 _requirement = b'generaldelta'
311 _requirement = b'generaldelta'
312
312
313 default = True
313 default = True
314
314
315 description = _(
315 description = _(
316 b'deltas within internal storage are unable to '
316 b'deltas within internal storage are unable to '
317 b'choose optimal revisions; repository is larger and '
317 b'choose optimal revisions; repository is larger and '
318 b'slower than it could be; interaction with other '
318 b'slower than it could be; interaction with other '
319 b'repositories may require extra network and CPU '
319 b'repositories may require extra network and CPU '
320 b'resources, making "hg push" and "hg pull" slower'
320 b'resources, making "hg push" and "hg pull" slower'
321 )
321 )
322
322
323 upgrademessage = _(
323 upgrademessage = _(
324 b'repository storage will be able to create '
324 b'repository storage will be able to create '
325 b'optimal deltas; new repository data will be '
325 b'optimal deltas; new repository data will be '
326 b'smaller and read times should decrease; '
326 b'smaller and read times should decrease; '
327 b'interacting with other repositories using this '
327 b'interacting with other repositories using this '
328 b'storage model should require less network and '
328 b'storage model should require less network and '
329 b'CPU resources, making "hg push" and "hg pull" '
329 b'CPU resources, making "hg push" and "hg pull" '
330 b'faster'
330 b'faster'
331 )
331 )
332
332
333
333
334 @registerformatvariant
334 @registerformatvariant
335 class sparserevlog(requirementformatvariant):
335 class sparserevlog(requirementformatvariant):
336 name = b'sparserevlog'
336 name = b'sparserevlog'
337
337
338 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
338 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
339
339
340 default = True
340 default = True
341
341
342 description = _(
342 description = _(
343 b'in order to limit disk reading and memory usage on older '
343 b'in order to limit disk reading and memory usage on older '
344 b'version, the span of a delta chain from its root to its '
344 b'version, the span of a delta chain from its root to its '
345 b'end is limited, whatever the relevant data in this span. '
345 b'end is limited, whatever the relevant data in this span. '
346 b'This can severly limit Mercurial ability to build good '
346 b'This can severly limit Mercurial ability to build good '
347 b'chain of delta resulting is much more storage space being '
347 b'chain of delta resulting is much more storage space being '
348 b'taken and limit reusability of on disk delta during '
348 b'taken and limit reusability of on disk delta during '
349 b'exchange.'
349 b'exchange.'
350 )
350 )
351
351
352 upgrademessage = _(
352 upgrademessage = _(
353 b'Revlog supports delta chain with more unused data '
353 b'Revlog supports delta chain with more unused data '
354 b'between payload. These gaps will be skipped at read '
354 b'between payload. These gaps will be skipped at read '
355 b'time. This allows for better delta chains, making a '
355 b'time. This allows for better delta chains, making a '
356 b'better compression and faster exchange with server.'
356 b'better compression and faster exchange with server.'
357 )
357 )
358
358
359
359
360 @registerformatvariant
360 @registerformatvariant
361 class sidedata(requirementformatvariant):
361 class sidedata(requirementformatvariant):
362 name = b'sidedata'
362 name = b'sidedata'
363
363
364 _requirement = localrepo.SIDEDATA_REQUIREMENT
364 _requirement = localrepo.SIDEDATA_REQUIREMENT
365
365
366 default = False
366 default = False
367
367
368 description = _(
368 description = _(
369 b'Allows storage of extra data alongside a revision, '
369 b'Allows storage of extra data alongside a revision, '
370 b'unlocking various caching options.'
370 b'unlocking various caching options.'
371 )
371 )
372
372
373 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
373 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
374
374
375
375
376 @registerformatvariant
376 @registerformatvariant
377 class copiessdc(requirementformatvariant):
377 class copiessdc(requirementformatvariant):
378 name = b'copies-sdc'
378 name = b'copies-sdc'
379
379
380 _requirement = localrepo.COPIESSDC_REQUIREMENT
380 _requirement = localrepo.COPIESSDC_REQUIREMENT
381
381
382 default = False
382 default = False
383
383
384 description = _(b'Stores copies information alongside changesets.')
384 description = _(b'Stores copies information alongside changesets.')
385
385
386 upgrademessage = _(
386 upgrademessage = _(
387 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
387 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
388 )
388 )
389
389
390
390
391 @registerformatvariant
391 @registerformatvariant
392 class removecldeltachain(formatvariant):
392 class removecldeltachain(formatvariant):
393 name = b'plain-cl-delta'
393 name = b'plain-cl-delta'
394
394
395 default = True
395 default = True
396
396
397 description = _(
397 description = _(
398 b'changelog storage is using deltas instead of '
398 b'changelog storage is using deltas instead of '
399 b'raw entries; changelog reading and any '
399 b'raw entries; changelog reading and any '
400 b'operation relying on changelog data are slower '
400 b'operation relying on changelog data are slower '
401 b'than they could be'
401 b'than they could be'
402 )
402 )
403
403
404 upgrademessage = _(
404 upgrademessage = _(
405 b'changelog storage will be reformated to '
405 b'changelog storage will be reformated to '
406 b'store raw entries; changelog reading will be '
406 b'store raw entries; changelog reading will be '
407 b'faster; changelog size may be reduced'
407 b'faster; changelog size may be reduced'
408 )
408 )
409
409
410 @staticmethod
410 @staticmethod
411 def fromrepo(repo):
411 def fromrepo(repo):
412 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
412 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
413 # changelogs with deltas.
413 # changelogs with deltas.
414 cl = repo.changelog
414 cl = repo.changelog
415 chainbase = cl.chainbase
415 chainbase = cl.chainbase
416 return all(rev == chainbase(rev) for rev in cl)
416 return all(rev == chainbase(rev) for rev in cl)
417
417
418 @staticmethod
418 @staticmethod
419 def fromconfig(repo):
419 def fromconfig(repo):
420 return True
420 return True
421
421
422
422
423 @registerformatvariant
423 @registerformatvariant
424 class compressionengine(formatvariant):
424 class compressionengine(formatvariant):
425 name = b'compression'
425 name = b'compression'
426 default = b'zlib'
426 default = b'zlib'
427
427
428 description = _(
428 description = _(
429 b'Compresion algorithm used to compress data. '
429 b'Compresion algorithm used to compress data. '
430 b'Some engine are faster than other'
430 b'Some engine are faster than other'
431 )
431 )
432
432
433 upgrademessage = _(
433 upgrademessage = _(
434 b'revlog content will be recompressed with the new algorithm.'
434 b'revlog content will be recompressed with the new algorithm.'
435 )
435 )
436
436
437 @classmethod
437 @classmethod
438 def fromrepo(cls, repo):
438 def fromrepo(cls, repo):
439 # we allow multiple compression engine requirement to co-exist because
439 # we allow multiple compression engine requirement to co-exist because
440 # strickly speaking, revlog seems to support mixed compression style.
440 # strickly speaking, revlog seems to support mixed compression style.
441 #
441 #
442 # The compression used for new entries will be "the last one"
442 # The compression used for new entries will be "the last one"
443 compression = b'zlib'
443 compression = b'zlib'
444 for req in repo.requirements:
444 for req in repo.requirements:
445 prefix = req.startswith
445 prefix = req.startswith
446 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
446 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
447 compression = req.split(b'-', 2)[2]
447 compression = req.split(b'-', 2)[2]
448 return compression
448 return compression
449
449
450 @classmethod
450 @classmethod
451 def fromconfig(cls, repo):
451 def fromconfig(cls, repo):
452 compengines = repo.ui.configlist(b'format', b'revlog-compression')
452 compengines = repo.ui.configlist(b'format', b'revlog-compression')
453 # return the first valid value as the selection code would do
453 # return the first valid value as the selection code would do
454 for comp in compengines:
454 for comp in compengines:
455 if comp in util.compengines:
455 if comp in util.compengines:
456 return comp
456 return comp
457
457
458 # no valide compression found lets display it all for clarity
458 # no valide compression found lets display it all for clarity
459 return b','.join(compengines)
459 return b','.join(compengines)
460
460
461
461
462 @registerformatvariant
462 @registerformatvariant
463 class compressionlevel(formatvariant):
463 class compressionlevel(formatvariant):
464 name = b'compression-level'
464 name = b'compression-level'
465 default = b'default'
465 default = b'default'
466
466
467 description = _(b'compression level')
467 description = _(b'compression level')
468
468
469 upgrademessage = _(b'revlog content will be recompressed')
469 upgrademessage = _(b'revlog content will be recompressed')
470
470
471 @classmethod
471 @classmethod
472 def fromrepo(cls, repo):
472 def fromrepo(cls, repo):
473 comp = compressionengine.fromrepo(repo)
473 comp = compressionengine.fromrepo(repo)
474 level = None
474 level = None
475 if comp == b'zlib':
475 if comp == b'zlib':
476 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
476 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
477 elif comp == b'zstd':
477 elif comp == b'zstd':
478 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
478 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
479 if level is None:
479 if level is None:
480 return b'default'
480 return b'default'
481 return bytes(level)
481 return bytes(level)
482
482
483 @classmethod
483 @classmethod
484 def fromconfig(cls, repo):
484 def fromconfig(cls, repo):
485 comp = compressionengine.fromconfig(repo)
485 comp = compressionengine.fromconfig(repo)
486 level = None
486 level = None
487 if comp == b'zlib':
487 if comp == b'zlib':
488 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
488 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
489 elif comp == b'zstd':
489 elif comp == b'zstd':
490 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
490 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
491 if level is None:
491 if level is None:
492 return b'default'
492 return b'default'
493 return bytes(level)
493 return bytes(level)
494
494
495
495
496 def finddeficiencies(repo):
496 def finddeficiencies(repo):
497 """returns a list of deficiencies that the repo suffer from"""
497 """returns a list of deficiencies that the repo suffer from"""
498 deficiencies = []
498 deficiencies = []
499
499
500 # We could detect lack of revlogv1 and store here, but they were added
500 # We could detect lack of revlogv1 and store here, but they were added
501 # in 0.9.2 and we don't support upgrading repos without these
501 # in 0.9.2 and we don't support upgrading repos without these
502 # requirements, so let's not bother.
502 # requirements, so let's not bother.
503
503
504 for fv in allformatvariant:
504 for fv in allformatvariant:
505 if not fv.fromrepo(repo):
505 if not fv.fromrepo(repo):
506 deficiencies.append(fv)
506 deficiencies.append(fv)
507
507
508 return deficiencies
508 return deficiencies
509
509
510
510
511 # search without '-' to support older form on newer client.
511 # search without '-' to support older form on newer client.
512 #
512 #
513 # We don't enforce backward compatibility for debug command so this
513 # We don't enforce backward compatibility for debug command so this
514 # might eventually be dropped. However, having to use two different
514 # might eventually be dropped. However, having to use two different
515 # forms in script when comparing result is anoying enough to add
515 # forms in script when comparing result is anoying enough to add
516 # backward compatibility for a while.
516 # backward compatibility for a while.
517 legacy_opts_map = {
517 legacy_opts_map = {
518 b'redeltaparent': b're-delta-parent',
518 b'redeltaparent': b're-delta-parent',
519 b'redeltamultibase': b're-delta-multibase',
519 b'redeltamultibase': b're-delta-multibase',
520 b'redeltaall': b're-delta-all',
520 b'redeltaall': b're-delta-all',
521 b'redeltafulladd': b're-delta-fulladd',
521 b'redeltafulladd': b're-delta-fulladd',
522 }
522 }
523
523
524
524
525 def findoptimizations(repo):
525 def findoptimizations(repo):
526 """Determine optimisation that could be used during upgrade"""
526 """Determine optimisation that could be used during upgrade"""
527 # These are unconditionally added. There is logic later that figures out
527 # These are unconditionally added. There is logic later that figures out
528 # which ones to apply.
528 # which ones to apply.
529 optimizations = []
529 optimizations = []
530
530
531 optimizations.append(
531 optimizations.append(
532 improvement(
532 improvement(
533 name=b're-delta-parent',
533 name=b're-delta-parent',
534 type=optimisation,
534 type=optimisation,
535 description=_(
535 description=_(
536 b'deltas within internal storage will be recalculated to '
536 b'deltas within internal storage will be recalculated to '
537 b'choose an optimal base revision where this was not '
537 b'choose an optimal base revision where this was not '
538 b'already done; the size of the repository may shrink and '
538 b'already done; the size of the repository may shrink and '
539 b'various operations may become faster; the first time '
539 b'various operations may become faster; the first time '
540 b'this optimization is performed could slow down upgrade '
540 b'this optimization is performed could slow down upgrade '
541 b'execution considerably; subsequent invocations should '
541 b'execution considerably; subsequent invocations should '
542 b'not run noticeably slower'
542 b'not run noticeably slower'
543 ),
543 ),
544 upgrademessage=_(
544 upgrademessage=_(
545 b'deltas within internal storage will choose a new '
545 b'deltas within internal storage will choose a new '
546 b'base revision if needed'
546 b'base revision if needed'
547 ),
547 ),
548 )
548 )
549 )
549 )
550
550
551 optimizations.append(
551 optimizations.append(
552 improvement(
552 improvement(
553 name=b're-delta-multibase',
553 name=b're-delta-multibase',
554 type=optimisation,
554 type=optimisation,
555 description=_(
555 description=_(
556 b'deltas within internal storage will be recalculated '
556 b'deltas within internal storage will be recalculated '
557 b'against multiple base revision and the smallest '
557 b'against multiple base revision and the smallest '
558 b'difference will be used; the size of the repository may '
558 b'difference will be used; the size of the repository may '
559 b'shrink significantly when there are many merges; this '
559 b'shrink significantly when there are many merges; this '
560 b'optimization will slow down execution in proportion to '
560 b'optimization will slow down execution in proportion to '
561 b'the number of merges in the repository and the amount '
561 b'the number of merges in the repository and the amount '
562 b'of files in the repository; this slow down should not '
562 b'of files in the repository; this slow down should not '
563 b'be significant unless there are tens of thousands of '
563 b'be significant unless there are tens of thousands of '
564 b'files and thousands of merges'
564 b'files and thousands of merges'
565 ),
565 ),
566 upgrademessage=_(
566 upgrademessage=_(
567 b'deltas within internal storage will choose an '
567 b'deltas within internal storage will choose an '
568 b'optimal delta by computing deltas against multiple '
568 b'optimal delta by computing deltas against multiple '
569 b'parents; may slow down execution time '
569 b'parents; may slow down execution time '
570 b'significantly'
570 b'significantly'
571 ),
571 ),
572 )
572 )
573 )
573 )
574
574
575 optimizations.append(
575 optimizations.append(
576 improvement(
576 improvement(
577 name=b're-delta-all',
577 name=b're-delta-all',
578 type=optimisation,
578 type=optimisation,
579 description=_(
579 description=_(
580 b'deltas within internal storage will always be '
580 b'deltas within internal storage will always be '
581 b'recalculated without reusing prior deltas; this will '
581 b'recalculated without reusing prior deltas; this will '
582 b'likely make execution run several times slower; this '
582 b'likely make execution run several times slower; this '
583 b'optimization is typically not needed'
583 b'optimization is typically not needed'
584 ),
584 ),
585 upgrademessage=_(
585 upgrademessage=_(
586 b'deltas within internal storage will be fully '
586 b'deltas within internal storage will be fully '
587 b'recomputed; this will likely drastically slow down '
587 b'recomputed; this will likely drastically slow down '
588 b'execution time'
588 b'execution time'
589 ),
589 ),
590 )
590 )
591 )
591 )
592
592
593 optimizations.append(
593 optimizations.append(
594 improvement(
594 improvement(
595 name=b're-delta-fulladd',
595 name=b're-delta-fulladd',
596 type=optimisation,
596 type=optimisation,
597 description=_(
597 description=_(
598 b'every revision will be re-added as if it was new '
598 b'every revision will be re-added as if it was new '
599 b'content. It will go through the full storage '
599 b'content. It will go through the full storage '
600 b'mechanism giving extensions a chance to process it '
600 b'mechanism giving extensions a chance to process it '
601 b'(eg. lfs). This is similar to "re-delta-all" but even '
601 b'(eg. lfs). This is similar to "re-delta-all" but even '
602 b'slower since more logic is involved.'
602 b'slower since more logic is involved.'
603 ),
603 ),
604 upgrademessage=_(
604 upgrademessage=_(
605 b'each revision will be added as new content to the '
605 b'each revision will be added as new content to the '
606 b'internal storage; this will likely drastically slow '
606 b'internal storage; this will likely drastically slow '
607 b'down execution time, but some extensions might need '
607 b'down execution time, but some extensions might need '
608 b'it'
608 b'it'
609 ),
609 ),
610 )
610 )
611 )
611 )
612
612
613 return optimizations
613 return optimizations
614
614
615
615
616 def determineactions(repo, deficiencies, sourcereqs, destreqs):
616 def determineactions(repo, deficiencies, sourcereqs, destreqs):
617 """Determine upgrade actions that will be performed.
617 """Determine upgrade actions that will be performed.
618
618
619 Given a list of improvements as returned by ``finddeficiencies`` and
619 Given a list of improvements as returned by ``finddeficiencies`` and
620 ``findoptimizations``, determine the list of upgrade actions that
620 ``findoptimizations``, determine the list of upgrade actions that
621 will be performed.
621 will be performed.
622
622
623 The role of this function is to filter improvements if needed, apply
623 The role of this function is to filter improvements if needed, apply
624 recommended optimizations from the improvements list that make sense,
624 recommended optimizations from the improvements list that make sense,
625 etc.
625 etc.
626
626
627 Returns a list of action names.
627 Returns a list of action names.
628 """
628 """
629 newactions = []
629 newactions = []
630
630
631 knownreqs = supporteddestrequirements(repo)
632
633 for d in deficiencies:
631 for d in deficiencies:
634 name = d.name
632 name = d._requirement
635
633
636 # If the action is a requirement that doesn't show up in the
634 # If the action is a requirement that doesn't show up in the
637 # destination requirements, prune the action.
635 # destination requirements, prune the action.
638 if name in knownreqs and name not in destreqs:
636 if name is not None and name not in destreqs:
639 continue
637 continue
640
638
641 newactions.append(d)
639 newactions.append(d)
642
640
643 # FUTURE consider adding some optimizations here for certain transitions.
641 # FUTURE consider adding some optimizations here for certain transitions.
644 # e.g. adding generaldelta could schedule parent redeltas.
642 # e.g. adding generaldelta could schedule parent redeltas.
645
643
646 return newactions
644 return newactions
647
645
648
646
649 def _revlogfrompath(repo, path):
647 def _revlogfrompath(repo, path):
650 """Obtain a revlog from a repo path.
648 """Obtain a revlog from a repo path.
651
649
652 An instance of the appropriate class is returned.
650 An instance of the appropriate class is returned.
653 """
651 """
654 if path == b'00changelog.i':
652 if path == b'00changelog.i':
655 return changelog.changelog(repo.svfs)
653 return changelog.changelog(repo.svfs)
656 elif path.endswith(b'00manifest.i'):
654 elif path.endswith(b'00manifest.i'):
657 mandir = path[: -len(b'00manifest.i')]
655 mandir = path[: -len(b'00manifest.i')]
658 return manifest.manifestrevlog(repo.svfs, tree=mandir)
656 return manifest.manifestrevlog(repo.svfs, tree=mandir)
659 else:
657 else:
660 # reverse of "/".join(("data", path + ".i"))
658 # reverse of "/".join(("data", path + ".i"))
661 return filelog.filelog(repo.svfs, path[5:-2])
659 return filelog.filelog(repo.svfs, path[5:-2])
662
660
663
661
664 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
662 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
665 """copy all relevant files for `oldrl` into `destrepo` store
663 """copy all relevant files for `oldrl` into `destrepo` store
666
664
667 Files are copied "as is" without any transformation. The copy is performed
665 Files are copied "as is" without any transformation. The copy is performed
668 without extra checks. Callers are responsible for making sure the copied
666 without extra checks. Callers are responsible for making sure the copied
669 content is compatible with format of the destination repository.
667 content is compatible with format of the destination repository.
670 """
668 """
671 oldrl = getattr(oldrl, '_revlog', oldrl)
669 oldrl = getattr(oldrl, '_revlog', oldrl)
672 newrl = _revlogfrompath(destrepo, unencodedname)
670 newrl = _revlogfrompath(destrepo, unencodedname)
673 newrl = getattr(newrl, '_revlog', newrl)
671 newrl = getattr(newrl, '_revlog', newrl)
674
672
675 oldvfs = oldrl.opener
673 oldvfs = oldrl.opener
676 newvfs = newrl.opener
674 newvfs = newrl.opener
677 oldindex = oldvfs.join(oldrl.indexfile)
675 oldindex = oldvfs.join(oldrl.indexfile)
678 newindex = newvfs.join(newrl.indexfile)
676 newindex = newvfs.join(newrl.indexfile)
679 olddata = oldvfs.join(oldrl.datafile)
677 olddata = oldvfs.join(oldrl.datafile)
680 newdata = newvfs.join(newrl.datafile)
678 newdata = newvfs.join(newrl.datafile)
681
679
682 with newvfs(newrl.indexfile, b'w'):
680 with newvfs(newrl.indexfile, b'w'):
683 pass # create all the directories
681 pass # create all the directories
684
682
685 util.copyfile(oldindex, newindex)
683 util.copyfile(oldindex, newindex)
686 copydata = oldrl.opener.exists(oldrl.datafile)
684 copydata = oldrl.opener.exists(oldrl.datafile)
687 if copydata:
685 if copydata:
688 util.copyfile(olddata, newdata)
686 util.copyfile(olddata, newdata)
689
687
690 if not (
688 if not (
691 unencodedname.endswith(b'00changelog.i')
689 unencodedname.endswith(b'00changelog.i')
692 or unencodedname.endswith(b'00manifest.i')
690 or unencodedname.endswith(b'00manifest.i')
693 ):
691 ):
694 destrepo.svfs.fncache.add(unencodedname)
692 destrepo.svfs.fncache.add(unencodedname)
695 if copydata:
693 if copydata:
696 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
694 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
697
695
698
696
699 UPGRADE_CHANGELOG = object()
697 UPGRADE_CHANGELOG = object()
700 UPGRADE_MANIFEST = object()
698 UPGRADE_MANIFEST = object()
701 UPGRADE_FILELOG = object()
699 UPGRADE_FILELOG = object()
702
700
703 UPGRADE_ALL_REVLOGS = frozenset(
701 UPGRADE_ALL_REVLOGS = frozenset(
704 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
702 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
705 )
703 )
706
704
707
705
708 def getsidedatacompanion(srcrepo, dstrepo):
706 def getsidedatacompanion(srcrepo, dstrepo):
709 sidedatacompanion = None
707 sidedatacompanion = None
710 removedreqs = srcrepo.requirements - dstrepo.requirements
708 removedreqs = srcrepo.requirements - dstrepo.requirements
711 addedreqs = dstrepo.requirements - srcrepo.requirements
709 addedreqs = dstrepo.requirements - srcrepo.requirements
712 if localrepo.SIDEDATA_REQUIREMENT in removedreqs:
710 if localrepo.SIDEDATA_REQUIREMENT in removedreqs:
713
711
714 def sidedatacompanion(rl, rev):
712 def sidedatacompanion(rl, rev):
715 rl = getattr(rl, '_revlog', rl)
713 rl = getattr(rl, '_revlog', rl)
716 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
714 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
717 return True, (), {}
715 return True, (), {}
718 return False, (), {}
716 return False, (), {}
719
717
720 elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
718 elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
721 sidedatacompanion = copies.getsidedataadder(srcrepo, dstrepo)
719 sidedatacompanion = copies.getsidedataadder(srcrepo, dstrepo)
722 elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
720 elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
723 sidedatacompanion = copies.getsidedataremover(srcrepo, dstrepo)
721 sidedatacompanion = copies.getsidedataremover(srcrepo, dstrepo)
724 return sidedatacompanion
722 return sidedatacompanion
725
723
726
724
727 def matchrevlog(revlogfilter, entry):
725 def matchrevlog(revlogfilter, entry):
728 """check is a revlog is selected for cloning
726 """check is a revlog is selected for cloning
729
727
730 The store entry is checked against the passed filter"""
728 The store entry is checked against the passed filter"""
731 if entry.endswith(b'00changelog.i'):
729 if entry.endswith(b'00changelog.i'):
732 return UPGRADE_CHANGELOG in revlogfilter
730 return UPGRADE_CHANGELOG in revlogfilter
733 elif entry.endswith(b'00manifest.i'):
731 elif entry.endswith(b'00manifest.i'):
734 return UPGRADE_MANIFEST in revlogfilter
732 return UPGRADE_MANIFEST in revlogfilter
735 return UPGRADE_FILELOG in revlogfilter
733 return UPGRADE_FILELOG in revlogfilter
736
734
737
735
738 def _clonerevlogs(
736 def _clonerevlogs(
739 ui,
737 ui,
740 srcrepo,
738 srcrepo,
741 dstrepo,
739 dstrepo,
742 tr,
740 tr,
743 deltareuse,
741 deltareuse,
744 forcedeltabothparents,
742 forcedeltabothparents,
745 revlogs=UPGRADE_ALL_REVLOGS,
743 revlogs=UPGRADE_ALL_REVLOGS,
746 ):
744 ):
747 """Copy revlogs between 2 repos."""
745 """Copy revlogs between 2 repos."""
748 revcount = 0
746 revcount = 0
749 srcsize = 0
747 srcsize = 0
750 srcrawsize = 0
748 srcrawsize = 0
751 dstsize = 0
749 dstsize = 0
752 fcount = 0
750 fcount = 0
753 frevcount = 0
751 frevcount = 0
754 fsrcsize = 0
752 fsrcsize = 0
755 frawsize = 0
753 frawsize = 0
756 fdstsize = 0
754 fdstsize = 0
757 mcount = 0
755 mcount = 0
758 mrevcount = 0
756 mrevcount = 0
759 msrcsize = 0
757 msrcsize = 0
760 mrawsize = 0
758 mrawsize = 0
761 mdstsize = 0
759 mdstsize = 0
762 crevcount = 0
760 crevcount = 0
763 csrcsize = 0
761 csrcsize = 0
764 crawsize = 0
762 crawsize = 0
765 cdstsize = 0
763 cdstsize = 0
766
764
767 alldatafiles = list(srcrepo.store.walk())
765 alldatafiles = list(srcrepo.store.walk())
768
766
769 # Perform a pass to collect metadata. This validates we can open all
767 # Perform a pass to collect metadata. This validates we can open all
770 # source files and allows a unified progress bar to be displayed.
768 # source files and allows a unified progress bar to be displayed.
771 for unencoded, encoded, size in alldatafiles:
769 for unencoded, encoded, size in alldatafiles:
772 if unencoded.endswith(b'.d'):
770 if unencoded.endswith(b'.d'):
773 continue
771 continue
774
772
775 rl = _revlogfrompath(srcrepo, unencoded)
773 rl = _revlogfrompath(srcrepo, unencoded)
776
774
777 info = rl.storageinfo(
775 info = rl.storageinfo(
778 exclusivefiles=True,
776 exclusivefiles=True,
779 revisionscount=True,
777 revisionscount=True,
780 trackedsize=True,
778 trackedsize=True,
781 storedsize=True,
779 storedsize=True,
782 )
780 )
783
781
784 revcount += info[b'revisionscount'] or 0
782 revcount += info[b'revisionscount'] or 0
785 datasize = info[b'storedsize'] or 0
783 datasize = info[b'storedsize'] or 0
786 rawsize = info[b'trackedsize'] or 0
784 rawsize = info[b'trackedsize'] or 0
787
785
788 srcsize += datasize
786 srcsize += datasize
789 srcrawsize += rawsize
787 srcrawsize += rawsize
790
788
791 # This is for the separate progress bars.
789 # This is for the separate progress bars.
792 if isinstance(rl, changelog.changelog):
790 if isinstance(rl, changelog.changelog):
793 crevcount += len(rl)
791 crevcount += len(rl)
794 csrcsize += datasize
792 csrcsize += datasize
795 crawsize += rawsize
793 crawsize += rawsize
796 elif isinstance(rl, manifest.manifestrevlog):
794 elif isinstance(rl, manifest.manifestrevlog):
797 mcount += 1
795 mcount += 1
798 mrevcount += len(rl)
796 mrevcount += len(rl)
799 msrcsize += datasize
797 msrcsize += datasize
800 mrawsize += rawsize
798 mrawsize += rawsize
801 elif isinstance(rl, filelog.filelog):
799 elif isinstance(rl, filelog.filelog):
802 fcount += 1
800 fcount += 1
803 frevcount += len(rl)
801 frevcount += len(rl)
804 fsrcsize += datasize
802 fsrcsize += datasize
805 frawsize += rawsize
803 frawsize += rawsize
806 else:
804 else:
807 error.ProgrammingError(b'unknown revlog type')
805 error.ProgrammingError(b'unknown revlog type')
808
806
809 if not revcount:
807 if not revcount:
810 return
808 return
811
809
812 ui.write(
810 ui.write(
813 _(
811 _(
814 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
812 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
815 b'%d in changelog)\n'
813 b'%d in changelog)\n'
816 )
814 )
817 % (revcount, frevcount, mrevcount, crevcount)
815 % (revcount, frevcount, mrevcount, crevcount)
818 )
816 )
819 ui.write(
817 ui.write(
820 _(b'migrating %s in store; %s tracked data\n')
818 _(b'migrating %s in store; %s tracked data\n')
821 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
819 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
822 )
820 )
823
821
824 # Used to keep track of progress.
822 # Used to keep track of progress.
825 progress = None
823 progress = None
826
824
827 def oncopiedrevision(rl, rev, node):
825 def oncopiedrevision(rl, rev, node):
828 progress.increment()
826 progress.increment()
829
827
830 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
828 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
831
829
832 # Do the actual copying.
830 # Do the actual copying.
833 # FUTURE this operation can be farmed off to worker processes.
831 # FUTURE this operation can be farmed off to worker processes.
834 seen = set()
832 seen = set()
835 for unencoded, encoded, size in alldatafiles:
833 for unencoded, encoded, size in alldatafiles:
836 if unencoded.endswith(b'.d'):
834 if unencoded.endswith(b'.d'):
837 continue
835 continue
838
836
839 oldrl = _revlogfrompath(srcrepo, unencoded)
837 oldrl = _revlogfrompath(srcrepo, unencoded)
840
838
841 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
839 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
842 ui.write(
840 ui.write(
843 _(
841 _(
844 b'finished migrating %d manifest revisions across %d '
842 b'finished migrating %d manifest revisions across %d '
845 b'manifests; change in size: %s\n'
843 b'manifests; change in size: %s\n'
846 )
844 )
847 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
845 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
848 )
846 )
849
847
850 ui.write(
848 ui.write(
851 _(
849 _(
852 b'migrating changelog containing %d revisions '
850 b'migrating changelog containing %d revisions '
853 b'(%s in store; %s tracked data)\n'
851 b'(%s in store; %s tracked data)\n'
854 )
852 )
855 % (
853 % (
856 crevcount,
854 crevcount,
857 util.bytecount(csrcsize),
855 util.bytecount(csrcsize),
858 util.bytecount(crawsize),
856 util.bytecount(crawsize),
859 )
857 )
860 )
858 )
861 seen.add(b'c')
859 seen.add(b'c')
862 progress = srcrepo.ui.makeprogress(
860 progress = srcrepo.ui.makeprogress(
863 _(b'changelog revisions'), total=crevcount
861 _(b'changelog revisions'), total=crevcount
864 )
862 )
865 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
863 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
866 ui.write(
864 ui.write(
867 _(
865 _(
868 b'finished migrating %d filelog revisions across %d '
866 b'finished migrating %d filelog revisions across %d '
869 b'filelogs; change in size: %s\n'
867 b'filelogs; change in size: %s\n'
870 )
868 )
871 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
869 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
872 )
870 )
873
871
874 ui.write(
872 ui.write(
875 _(
873 _(
876 b'migrating %d manifests containing %d revisions '
874 b'migrating %d manifests containing %d revisions '
877 b'(%s in store; %s tracked data)\n'
875 b'(%s in store; %s tracked data)\n'
878 )
876 )
879 % (
877 % (
880 mcount,
878 mcount,
881 mrevcount,
879 mrevcount,
882 util.bytecount(msrcsize),
880 util.bytecount(msrcsize),
883 util.bytecount(mrawsize),
881 util.bytecount(mrawsize),
884 )
882 )
885 )
883 )
886 seen.add(b'm')
884 seen.add(b'm')
887 if progress:
885 if progress:
888 progress.complete()
886 progress.complete()
889 progress = srcrepo.ui.makeprogress(
887 progress = srcrepo.ui.makeprogress(
890 _(b'manifest revisions'), total=mrevcount
888 _(b'manifest revisions'), total=mrevcount
891 )
889 )
892 elif b'f' not in seen:
890 elif b'f' not in seen:
893 ui.write(
891 ui.write(
894 _(
892 _(
895 b'migrating %d filelogs containing %d revisions '
893 b'migrating %d filelogs containing %d revisions '
896 b'(%s in store; %s tracked data)\n'
894 b'(%s in store; %s tracked data)\n'
897 )
895 )
898 % (
896 % (
899 fcount,
897 fcount,
900 frevcount,
898 frevcount,
901 util.bytecount(fsrcsize),
899 util.bytecount(fsrcsize),
902 util.bytecount(frawsize),
900 util.bytecount(frawsize),
903 )
901 )
904 )
902 )
905 seen.add(b'f')
903 seen.add(b'f')
906 if progress:
904 if progress:
907 progress.complete()
905 progress.complete()
908 progress = srcrepo.ui.makeprogress(
906 progress = srcrepo.ui.makeprogress(
909 _(b'file revisions'), total=frevcount
907 _(b'file revisions'), total=frevcount
910 )
908 )
911
909
912 if matchrevlog(revlogs, unencoded):
910 if matchrevlog(revlogs, unencoded):
913 ui.note(
911 ui.note(
914 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
912 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
915 )
913 )
916 newrl = _revlogfrompath(dstrepo, unencoded)
914 newrl = _revlogfrompath(dstrepo, unencoded)
917 oldrl.clone(
915 oldrl.clone(
918 tr,
916 tr,
919 newrl,
917 newrl,
920 addrevisioncb=oncopiedrevision,
918 addrevisioncb=oncopiedrevision,
921 deltareuse=deltareuse,
919 deltareuse=deltareuse,
922 forcedeltabothparents=forcedeltabothparents,
920 forcedeltabothparents=forcedeltabothparents,
923 sidedatacompanion=sidedatacompanion,
921 sidedatacompanion=sidedatacompanion,
924 )
922 )
925 else:
923 else:
926 msg = _(b'blindly copying %s containing %i revisions\n')
924 msg = _(b'blindly copying %s containing %i revisions\n')
927 ui.note(msg % (unencoded, len(oldrl)))
925 ui.note(msg % (unencoded, len(oldrl)))
928 _copyrevlog(tr, dstrepo, oldrl, unencoded)
926 _copyrevlog(tr, dstrepo, oldrl, unencoded)
929
927
930 newrl = _revlogfrompath(dstrepo, unencoded)
928 newrl = _revlogfrompath(dstrepo, unencoded)
931
929
932 info = newrl.storageinfo(storedsize=True)
930 info = newrl.storageinfo(storedsize=True)
933 datasize = info[b'storedsize'] or 0
931 datasize = info[b'storedsize'] or 0
934
932
935 dstsize += datasize
933 dstsize += datasize
936
934
937 if isinstance(newrl, changelog.changelog):
935 if isinstance(newrl, changelog.changelog):
938 cdstsize += datasize
936 cdstsize += datasize
939 elif isinstance(newrl, manifest.manifestrevlog):
937 elif isinstance(newrl, manifest.manifestrevlog):
940 mdstsize += datasize
938 mdstsize += datasize
941 else:
939 else:
942 fdstsize += datasize
940 fdstsize += datasize
943
941
944 progress.complete()
942 progress.complete()
945
943
946 ui.write(
944 ui.write(
947 _(
945 _(
948 b'finished migrating %d changelog revisions; change in size: '
946 b'finished migrating %d changelog revisions; change in size: '
949 b'%s\n'
947 b'%s\n'
950 )
948 )
951 % (crevcount, util.bytecount(cdstsize - csrcsize))
949 % (crevcount, util.bytecount(cdstsize - csrcsize))
952 )
950 )
953
951
954 ui.write(
952 ui.write(
955 _(
953 _(
956 b'finished migrating %d total revisions; total change in store '
954 b'finished migrating %d total revisions; total change in store '
957 b'size: %s\n'
955 b'size: %s\n'
958 )
956 )
959 % (revcount, util.bytecount(dstsize - srcsize))
957 % (revcount, util.bytecount(dstsize - srcsize))
960 )
958 )
961
959
962
960
963 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
961 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
964 """Determine whether to copy a store file during upgrade.
962 """Determine whether to copy a store file during upgrade.
965
963
966 This function is called when migrating store files from ``srcrepo`` to
964 This function is called when migrating store files from ``srcrepo`` to
967 ``dstrepo`` as part of upgrading a repository.
965 ``dstrepo`` as part of upgrading a repository.
968
966
969 Args:
967 Args:
970 srcrepo: repo we are copying from
968 srcrepo: repo we are copying from
971 dstrepo: repo we are copying to
969 dstrepo: repo we are copying to
972 requirements: set of requirements for ``dstrepo``
970 requirements: set of requirements for ``dstrepo``
973 path: store file being examined
971 path: store file being examined
974 mode: the ``ST_MODE`` file type of ``path``
972 mode: the ``ST_MODE`` file type of ``path``
975 st: ``stat`` data structure for ``path``
973 st: ``stat`` data structure for ``path``
976
974
977 Function should return ``True`` if the file is to be copied.
975 Function should return ``True`` if the file is to be copied.
978 """
976 """
979 # Skip revlogs.
977 # Skip revlogs.
980 if path.endswith((b'.i', b'.d')):
978 if path.endswith((b'.i', b'.d')):
981 return False
979 return False
982 # Skip transaction related files.
980 # Skip transaction related files.
983 if path.startswith(b'undo'):
981 if path.startswith(b'undo'):
984 return False
982 return False
985 # Only copy regular files.
983 # Only copy regular files.
986 if mode != stat.S_IFREG:
984 if mode != stat.S_IFREG:
987 return False
985 return False
988 # Skip other skipped files.
986 # Skip other skipped files.
989 if path in (b'lock', b'fncache'):
987 if path in (b'lock', b'fncache'):
990 return False
988 return False
991
989
992 return True
990 return True
993
991
994
992
995 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
993 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
996 """Hook point for extensions to perform additional actions during upgrade.
994 """Hook point for extensions to perform additional actions during upgrade.
997
995
998 This function is called after revlogs and store files have been copied but
996 This function is called after revlogs and store files have been copied but
999 before the new store is swapped into the original location.
997 before the new store is swapped into the original location.
1000 """
998 """
1001
999
1002
1000
1003 def _upgraderepo(
1001 def _upgraderepo(
1004 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1002 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1005 ):
1003 ):
1006 """Do the low-level work of upgrading a repository.
1004 """Do the low-level work of upgrading a repository.
1007
1005
1008 The upgrade is effectively performed as a copy between a source
1006 The upgrade is effectively performed as a copy between a source
1009 repository and a temporary destination repository.
1007 repository and a temporary destination repository.
1010
1008
1011 The source repository is unmodified for as long as possible so the
1009 The source repository is unmodified for as long as possible so the
1012 upgrade can abort at any time without causing loss of service for
1010 upgrade can abort at any time without causing loss of service for
1013 readers and without corrupting the source repository.
1011 readers and without corrupting the source repository.
1014 """
1012 """
1015 assert srcrepo.currentwlock()
1013 assert srcrepo.currentwlock()
1016 assert dstrepo.currentwlock()
1014 assert dstrepo.currentwlock()
1017
1015
1018 ui.write(
1016 ui.write(
1019 _(
1017 _(
1020 b'(it is safe to interrupt this process any time before '
1018 b'(it is safe to interrupt this process any time before '
1021 b'data migration completes)\n'
1019 b'data migration completes)\n'
1022 )
1020 )
1023 )
1021 )
1024
1022
1025 if b're-delta-all' in actions:
1023 if b're-delta-all' in actions:
1026 deltareuse = revlog.revlog.DELTAREUSENEVER
1024 deltareuse = revlog.revlog.DELTAREUSENEVER
1027 elif b're-delta-parent' in actions:
1025 elif b're-delta-parent' in actions:
1028 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1026 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1029 elif b're-delta-multibase' in actions:
1027 elif b're-delta-multibase' in actions:
1030 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1028 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1031 elif b're-delta-fulladd' in actions:
1029 elif b're-delta-fulladd' in actions:
1032 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1030 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1033 else:
1031 else:
1034 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1032 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1035
1033
1036 with dstrepo.transaction(b'upgrade') as tr:
1034 with dstrepo.transaction(b'upgrade') as tr:
1037 _clonerevlogs(
1035 _clonerevlogs(
1038 ui,
1036 ui,
1039 srcrepo,
1037 srcrepo,
1040 dstrepo,
1038 dstrepo,
1041 tr,
1039 tr,
1042 deltareuse,
1040 deltareuse,
1043 b're-delta-multibase' in actions,
1041 b're-delta-multibase' in actions,
1044 revlogs=revlogs,
1042 revlogs=revlogs,
1045 )
1043 )
1046
1044
1047 # Now copy other files in the store directory.
1045 # Now copy other files in the store directory.
1048 # The sorted() makes execution deterministic.
1046 # The sorted() makes execution deterministic.
1049 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1047 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1050 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1048 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1051 continue
1049 continue
1052
1050
1053 srcrepo.ui.write(_(b'copying %s\n') % p)
1051 srcrepo.ui.write(_(b'copying %s\n') % p)
1054 src = srcrepo.store.rawvfs.join(p)
1052 src = srcrepo.store.rawvfs.join(p)
1055 dst = dstrepo.store.rawvfs.join(p)
1053 dst = dstrepo.store.rawvfs.join(p)
1056 util.copyfile(src, dst, copystat=True)
1054 util.copyfile(src, dst, copystat=True)
1057
1055
1058 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1056 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1059
1057
1060 ui.write(_(b'data fully migrated to temporary repository\n'))
1058 ui.write(_(b'data fully migrated to temporary repository\n'))
1061
1059
1062 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1060 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1063 backupvfs = vfsmod.vfs(backuppath)
1061 backupvfs = vfsmod.vfs(backuppath)
1064
1062
1065 # Make a backup of requires file first, as it is the first to be modified.
1063 # Make a backup of requires file first, as it is the first to be modified.
1066 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1064 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1067
1065
1068 # We install an arbitrary requirement that clients must not support
1066 # We install an arbitrary requirement that clients must not support
1069 # as a mechanism to lock out new clients during the data swap. This is
1067 # as a mechanism to lock out new clients during the data swap. This is
1070 # better than allowing a client to continue while the repository is in
1068 # better than allowing a client to continue while the repository is in
1071 # an inconsistent state.
1069 # an inconsistent state.
1072 ui.write(
1070 ui.write(
1073 _(
1071 _(
1074 b'marking source repository as being upgraded; clients will be '
1072 b'marking source repository as being upgraded; clients will be '
1075 b'unable to read from repository\n'
1073 b'unable to read from repository\n'
1076 )
1074 )
1077 )
1075 )
1078 scmutil.writerequires(
1076 scmutil.writerequires(
1079 srcrepo.vfs, srcrepo.requirements | {b'upgradeinprogress'}
1077 srcrepo.vfs, srcrepo.requirements | {b'upgradeinprogress'}
1080 )
1078 )
1081
1079
1082 ui.write(_(b'starting in-place swap of repository data\n'))
1080 ui.write(_(b'starting in-place swap of repository data\n'))
1083 ui.write(_(b'replaced files will be backed up at %s\n') % backuppath)
1081 ui.write(_(b'replaced files will be backed up at %s\n') % backuppath)
1084
1082
1085 # Now swap in the new store directory. Doing it as a rename should make
1083 # Now swap in the new store directory. Doing it as a rename should make
1086 # the operation nearly instantaneous and atomic (at least in well-behaved
1084 # the operation nearly instantaneous and atomic (at least in well-behaved
1087 # environments).
1085 # environments).
1088 ui.write(_(b'replacing store...\n'))
1086 ui.write(_(b'replacing store...\n'))
1089 tstart = util.timer()
1087 tstart = util.timer()
1090 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1088 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1091 util.rename(dstrepo.spath, srcrepo.spath)
1089 util.rename(dstrepo.spath, srcrepo.spath)
1092 elapsed = util.timer() - tstart
1090 elapsed = util.timer() - tstart
1093 ui.write(
1091 ui.write(
1094 _(
1092 _(
1095 b'store replacement complete; repository was inconsistent for '
1093 b'store replacement complete; repository was inconsistent for '
1096 b'%0.1fs\n'
1094 b'%0.1fs\n'
1097 )
1095 )
1098 % elapsed
1096 % elapsed
1099 )
1097 )
1100
1098
1101 # We first write the requirements file. Any new requirements will lock
1099 # We first write the requirements file. Any new requirements will lock
1102 # out legacy clients.
1100 # out legacy clients.
1103 ui.write(
1101 ui.write(
1104 _(
1102 _(
1105 b'finalizing requirements file and making repository readable '
1103 b'finalizing requirements file and making repository readable '
1106 b'again\n'
1104 b'again\n'
1107 )
1105 )
1108 )
1106 )
1109 scmutil.writerequires(srcrepo.vfs, requirements)
1107 scmutil.writerequires(srcrepo.vfs, requirements)
1110
1108
1111 # The lock file from the old store won't be removed because nothing has a
1109 # The lock file from the old store won't be removed because nothing has a
1112 # reference to its new location. So clean it up manually. Alternatively, we
1110 # reference to its new location. So clean it up manually. Alternatively, we
1113 # could update srcrepo.svfs and other variables to point to the new
1111 # could update srcrepo.svfs and other variables to point to the new
1114 # location. This is simpler.
1112 # location. This is simpler.
1115 backupvfs.unlink(b'store/lock')
1113 backupvfs.unlink(b'store/lock')
1116
1114
1117 return backuppath
1115 return backuppath
1118
1116
1119
1117
1120 def upgraderepo(
1118 def upgraderepo(
1121 ui,
1119 ui,
1122 repo,
1120 repo,
1123 run=False,
1121 run=False,
1124 optimize=None,
1122 optimize=None,
1125 backup=True,
1123 backup=True,
1126 manifest=None,
1124 manifest=None,
1127 changelog=None,
1125 changelog=None,
1128 ):
1126 ):
1129 """Upgrade a repository in place."""
1127 """Upgrade a repository in place."""
1130 if optimize is None:
1128 if optimize is None:
1131 optimize = []
1129 optimize = []
1132 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1130 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1133 repo = repo.unfiltered()
1131 repo = repo.unfiltered()
1134
1132
1135 revlogs = set(UPGRADE_ALL_REVLOGS)
1133 revlogs = set(UPGRADE_ALL_REVLOGS)
1136 specentries = ((b'c', changelog), (b'm', manifest))
1134 specentries = ((b'c', changelog), (b'm', manifest))
1137 specified = [(y, x) for (y, x) in specentries if x is not None]
1135 specified = [(y, x) for (y, x) in specentries if x is not None]
1138 if specified:
1136 if specified:
1139 # we have some limitation on revlogs to be recloned
1137 # we have some limitation on revlogs to be recloned
1140 if any(x for y, x in specified):
1138 if any(x for y, x in specified):
1141 revlogs = set()
1139 revlogs = set()
1142 for r, enabled in specified:
1140 for r, enabled in specified:
1143 if enabled:
1141 if enabled:
1144 if r == b'c':
1142 if r == b'c':
1145 revlogs.add(UPGRADE_CHANGELOG)
1143 revlogs.add(UPGRADE_CHANGELOG)
1146 elif r == b'm':
1144 elif r == b'm':
1147 revlogs.add(UPGRADE_MANIFEST)
1145 revlogs.add(UPGRADE_MANIFEST)
1148 else:
1146 else:
1149 # none are enabled
1147 # none are enabled
1150 for r, __ in specified:
1148 for r, __ in specified:
1151 if r == b'c':
1149 if r == b'c':
1152 revlogs.discard(UPGRADE_CHANGELOG)
1150 revlogs.discard(UPGRADE_CHANGELOG)
1153 elif r == b'm':
1151 elif r == b'm':
1154 revlogs.discard(UPGRADE_MANIFEST)
1152 revlogs.discard(UPGRADE_MANIFEST)
1155
1153
1156 # Ensure the repository can be upgraded.
1154 # Ensure the repository can be upgraded.
1157 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1155 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1158 if missingreqs:
1156 if missingreqs:
1159 raise error.Abort(
1157 raise error.Abort(
1160 _(b'cannot upgrade repository; requirement missing: %s')
1158 _(b'cannot upgrade repository; requirement missing: %s')
1161 % _(b', ').join(sorted(missingreqs))
1159 % _(b', ').join(sorted(missingreqs))
1162 )
1160 )
1163
1161
1164 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1162 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1165 if blockedreqs:
1163 if blockedreqs:
1166 raise error.Abort(
1164 raise error.Abort(
1167 _(
1165 _(
1168 b'cannot upgrade repository; unsupported source '
1166 b'cannot upgrade repository; unsupported source '
1169 b'requirement: %s'
1167 b'requirement: %s'
1170 )
1168 )
1171 % _(b', ').join(sorted(blockedreqs))
1169 % _(b', ').join(sorted(blockedreqs))
1172 )
1170 )
1173
1171
1174 # FUTURE there is potentially a need to control the wanted requirements via
1172 # FUTURE there is potentially a need to control the wanted requirements via
1175 # command arguments or via an extension hook point.
1173 # command arguments or via an extension hook point.
1176 newreqs = localrepo.newreporequirements(
1174 newreqs = localrepo.newreporequirements(
1177 repo.ui, localrepo.defaultcreateopts(repo.ui)
1175 repo.ui, localrepo.defaultcreateopts(repo.ui)
1178 )
1176 )
1179 newreqs.update(preservedrequirements(repo))
1177 newreqs.update(preservedrequirements(repo))
1180
1178
1181 noremovereqs = (
1179 noremovereqs = (
1182 repo.requirements - newreqs - supportremovedrequirements(repo)
1180 repo.requirements - newreqs - supportremovedrequirements(repo)
1183 )
1181 )
1184 if noremovereqs:
1182 if noremovereqs:
1185 raise error.Abort(
1183 raise error.Abort(
1186 _(
1184 _(
1187 b'cannot upgrade repository; requirement would be '
1185 b'cannot upgrade repository; requirement would be '
1188 b'removed: %s'
1186 b'removed: %s'
1189 )
1187 )
1190 % _(b', ').join(sorted(noremovereqs))
1188 % _(b', ').join(sorted(noremovereqs))
1191 )
1189 )
1192
1190
1193 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1191 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1194 if noaddreqs:
1192 if noaddreqs:
1195 raise error.Abort(
1193 raise error.Abort(
1196 _(
1194 _(
1197 b'cannot upgrade repository; do not support adding '
1195 b'cannot upgrade repository; do not support adding '
1198 b'requirement: %s'
1196 b'requirement: %s'
1199 )
1197 )
1200 % _(b', ').join(sorted(noaddreqs))
1198 % _(b', ').join(sorted(noaddreqs))
1201 )
1199 )
1202
1200
1203 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1201 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1204 if unsupportedreqs:
1202 if unsupportedreqs:
1205 raise error.Abort(
1203 raise error.Abort(
1206 _(
1204 _(
1207 b'cannot upgrade repository; do not support '
1205 b'cannot upgrade repository; do not support '
1208 b'destination requirement: %s'
1206 b'destination requirement: %s'
1209 )
1207 )
1210 % _(b', ').join(sorted(unsupportedreqs))
1208 % _(b', ').join(sorted(unsupportedreqs))
1211 )
1209 )
1212
1210
1213 # Find and validate all improvements that can be made.
1211 # Find and validate all improvements that can be made.
1214 alloptimizations = findoptimizations(repo)
1212 alloptimizations = findoptimizations(repo)
1215
1213
1216 # Apply and Validate arguments.
1214 # Apply and Validate arguments.
1217 optimizations = []
1215 optimizations = []
1218 for o in alloptimizations:
1216 for o in alloptimizations:
1219 if o.name in optimize:
1217 if o.name in optimize:
1220 optimizations.append(o)
1218 optimizations.append(o)
1221 optimize.discard(o.name)
1219 optimize.discard(o.name)
1222
1220
1223 if optimize: # anything left is unknown
1221 if optimize: # anything left is unknown
1224 raise error.Abort(
1222 raise error.Abort(
1225 _(b'unknown optimization action requested: %s')
1223 _(b'unknown optimization action requested: %s')
1226 % b', '.join(sorted(optimize)),
1224 % b', '.join(sorted(optimize)),
1227 hint=_(b'run without arguments to see valid optimizations'),
1225 hint=_(b'run without arguments to see valid optimizations'),
1228 )
1226 )
1229
1227
1230 deficiencies = finddeficiencies(repo)
1228 deficiencies = finddeficiencies(repo)
1231 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1229 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1232 actions.extend(
1230 actions.extend(
1233 o
1231 o
1234 for o in sorted(optimizations)
1232 for o in sorted(optimizations)
1235 # determineactions could have added optimisation
1233 # determineactions could have added optimisation
1236 if o not in actions
1234 if o not in actions
1237 )
1235 )
1238
1236
1239 removedreqs = repo.requirements - newreqs
1237 removedreqs = repo.requirements - newreqs
1240 addedreqs = newreqs - repo.requirements
1238 addedreqs = newreqs - repo.requirements
1241
1239
1242 if revlogs != UPGRADE_ALL_REVLOGS:
1240 if revlogs != UPGRADE_ALL_REVLOGS:
1243 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1241 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1244 if incompatible:
1242 if incompatible:
1245 msg = _(
1243 msg = _(
1246 b'ignoring revlogs selection flags, format requirements '
1244 b'ignoring revlogs selection flags, format requirements '
1247 b'change: %s\n'
1245 b'change: %s\n'
1248 )
1246 )
1249 ui.warn(msg % b', '.join(sorted(incompatible)))
1247 ui.warn(msg % b', '.join(sorted(incompatible)))
1250 revlogs = UPGRADE_ALL_REVLOGS
1248 revlogs = UPGRADE_ALL_REVLOGS
1251
1249
1252 def write_labeled(l, label):
1250 def write_labeled(l, label):
1253 first = True
1251 first = True
1254 for r in sorted(l):
1252 for r in sorted(l):
1255 if not first:
1253 if not first:
1256 ui.write(b', ')
1254 ui.write(b', ')
1257 ui.write(r, label=label)
1255 ui.write(r, label=label)
1258 first = False
1256 first = False
1259
1257
1260 def printrequirements():
1258 def printrequirements():
1261 ui.write(_(b'requirements\n'))
1259 ui.write(_(b'requirements\n'))
1262 ui.write(_(b' preserved: '))
1260 ui.write(_(b' preserved: '))
1263 write_labeled(
1261 write_labeled(
1264 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1262 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1265 )
1263 )
1266 ui.write((b'\n'))
1264 ui.write((b'\n'))
1267 removed = repo.requirements - newreqs
1265 removed = repo.requirements - newreqs
1268 if repo.requirements - newreqs:
1266 if repo.requirements - newreqs:
1269 ui.write(_(b' removed: '))
1267 ui.write(_(b' removed: '))
1270 write_labeled(removed, "upgrade-repo.requirement.removed")
1268 write_labeled(removed, "upgrade-repo.requirement.removed")
1271 ui.write((b'\n'))
1269 ui.write((b'\n'))
1272 added = newreqs - repo.requirements
1270 added = newreqs - repo.requirements
1273 if added:
1271 if added:
1274 ui.write(_(b' added: '))
1272 ui.write(_(b' added: '))
1275 write_labeled(added, "upgrade-repo.requirement.added")
1273 write_labeled(added, "upgrade-repo.requirement.added")
1276 ui.write((b'\n'))
1274 ui.write((b'\n'))
1277 ui.write(b'\n')
1275 ui.write(b'\n')
1278
1276
1279 def printupgradeactions():
1277 def printupgradeactions():
1280 for a in actions:
1278 for a in actions:
1281 ui.write(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1279 ui.write(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1282
1280
1283 if not run:
1281 if not run:
1284 fromconfig = []
1282 fromconfig = []
1285 onlydefault = []
1283 onlydefault = []
1286
1284
1287 for d in deficiencies:
1285 for d in deficiencies:
1288 if d.fromconfig(repo):
1286 if d.fromconfig(repo):
1289 fromconfig.append(d)
1287 fromconfig.append(d)
1290 elif d.default:
1288 elif d.default:
1291 onlydefault.append(d)
1289 onlydefault.append(d)
1292
1290
1293 if fromconfig or onlydefault:
1291 if fromconfig or onlydefault:
1294
1292
1295 if fromconfig:
1293 if fromconfig:
1296 ui.write(
1294 ui.write(
1297 _(
1295 _(
1298 b'repository lacks features recommended by '
1296 b'repository lacks features recommended by '
1299 b'current config options:\n\n'
1297 b'current config options:\n\n'
1300 )
1298 )
1301 )
1299 )
1302 for i in fromconfig:
1300 for i in fromconfig:
1303 ui.write(b'%s\n %s\n\n' % (i.name, i.description))
1301 ui.write(b'%s\n %s\n\n' % (i.name, i.description))
1304
1302
1305 if onlydefault:
1303 if onlydefault:
1306 ui.write(
1304 ui.write(
1307 _(
1305 _(
1308 b'repository lacks features used by the default '
1306 b'repository lacks features used by the default '
1309 b'config options:\n\n'
1307 b'config options:\n\n'
1310 )
1308 )
1311 )
1309 )
1312 for i in onlydefault:
1310 for i in onlydefault:
1313 ui.write(b'%s\n %s\n\n' % (i.name, i.description))
1311 ui.write(b'%s\n %s\n\n' % (i.name, i.description))
1314
1312
1315 ui.write(b'\n')
1313 ui.write(b'\n')
1316 else:
1314 else:
1317 ui.write(
1315 ui.write(
1318 _(
1316 _(
1319 b'(no feature deficiencies found in existing '
1317 b'(no feature deficiencies found in existing '
1320 b'repository)\n'
1318 b'repository)\n'
1321 )
1319 )
1322 )
1320 )
1323
1321
1324 ui.write(
1322 ui.write(
1325 _(
1323 _(
1326 b'performing an upgrade with "--run" will make the following '
1324 b'performing an upgrade with "--run" will make the following '
1327 b'changes:\n\n'
1325 b'changes:\n\n'
1328 )
1326 )
1329 )
1327 )
1330
1328
1331 printrequirements()
1329 printrequirements()
1332 printupgradeactions()
1330 printupgradeactions()
1333
1331
1334 unusedoptimize = [i for i in alloptimizations if i not in actions]
1332 unusedoptimize = [i for i in alloptimizations if i not in actions]
1335
1333
1336 if unusedoptimize:
1334 if unusedoptimize:
1337 ui.write(
1335 ui.write(
1338 _(
1336 _(
1339 b'additional optimizations are available by specifying '
1337 b'additional optimizations are available by specifying '
1340 b'"--optimize <name>":\n\n'
1338 b'"--optimize <name>":\n\n'
1341 )
1339 )
1342 )
1340 )
1343 for i in unusedoptimize:
1341 for i in unusedoptimize:
1344 ui.write(_(b'%s\n %s\n\n') % (i.name, i.description))
1342 ui.write(_(b'%s\n %s\n\n') % (i.name, i.description))
1345 return
1343 return
1346
1344
1347 # Else we're in the run=true case.
1345 # Else we're in the run=true case.
1348 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1346 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1349 printrequirements()
1347 printrequirements()
1350 printupgradeactions()
1348 printupgradeactions()
1351
1349
1352 upgradeactions = [a.name for a in actions]
1350 upgradeactions = [a.name for a in actions]
1353
1351
1354 ui.write(_(b'beginning upgrade...\n'))
1352 ui.write(_(b'beginning upgrade...\n'))
1355 with repo.wlock(), repo.lock():
1353 with repo.wlock(), repo.lock():
1356 ui.write(_(b'repository locked and read-only\n'))
1354 ui.write(_(b'repository locked and read-only\n'))
1357 # Our strategy for upgrading the repository is to create a new,
1355 # Our strategy for upgrading the repository is to create a new,
1358 # temporary repository, write data to it, then do a swap of the
1356 # temporary repository, write data to it, then do a swap of the
1359 # data. There are less heavyweight ways to do this, but it is easier
1357 # data. There are less heavyweight ways to do this, but it is easier
1360 # to create a new repo object than to instantiate all the components
1358 # to create a new repo object than to instantiate all the components
1361 # (like the store) separately.
1359 # (like the store) separately.
1362 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1360 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1363 backuppath = None
1361 backuppath = None
1364 try:
1362 try:
1365 ui.write(
1363 ui.write(
1366 _(
1364 _(
1367 b'creating temporary repository to stage migrated '
1365 b'creating temporary repository to stage migrated '
1368 b'data: %s\n'
1366 b'data: %s\n'
1369 )
1367 )
1370 % tmppath
1368 % tmppath
1371 )
1369 )
1372
1370
1373 # clone ui without using ui.copy because repo.ui is protected
1371 # clone ui without using ui.copy because repo.ui is protected
1374 repoui = repo.ui.__class__(repo.ui)
1372 repoui = repo.ui.__class__(repo.ui)
1375 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1373 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1376
1374
1377 with dstrepo.wlock(), dstrepo.lock():
1375 with dstrepo.wlock(), dstrepo.lock():
1378 backuppath = _upgraderepo(
1376 backuppath = _upgraderepo(
1379 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1377 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1380 )
1378 )
1381 if not (backup or backuppath is None):
1379 if not (backup or backuppath is None):
1382 ui.write(_(b'removing old repository content%s\n') % backuppath)
1380 ui.write(_(b'removing old repository content%s\n') % backuppath)
1383 repo.vfs.rmtree(backuppath, forcibly=True)
1381 repo.vfs.rmtree(backuppath, forcibly=True)
1384 backuppath = None
1382 backuppath = None
1385
1383
1386 finally:
1384 finally:
1387 ui.write(_(b'removing temporary repository %s\n') % tmppath)
1385 ui.write(_(b'removing temporary repository %s\n') % tmppath)
1388 repo.vfs.rmtree(tmppath, forcibly=True)
1386 repo.vfs.rmtree(tmppath, forcibly=True)
1389
1387
1390 if backuppath:
1388 if backuppath:
1391 ui.warn(
1389 ui.warn(
1392 _(b'copy of old repository backed up at %s\n') % backuppath
1390 _(b'copy of old repository backed up at %s\n') % backuppath
1393 )
1391 )
1394 ui.warn(
1392 ui.warn(
1395 _(
1393 _(
1396 b'the old repository will not be deleted; remove '
1394 b'the old repository will not be deleted; remove '
1397 b'it to free up disk space once the upgraded '
1395 b'it to free up disk space once the upgraded '
1398 b'repository is verified\n'
1396 b'repository is verified\n'
1399 )
1397 )
1400 )
1398 )
@@ -1,721 +1,715
1 #testcases lfsremote-on lfsremote-off
1 #testcases lfsremote-on lfsremote-off
2 #require serve no-reposimplestore no-chg
2 #require serve no-reposimplestore no-chg
3
3
4 This test splits `hg serve` with and without using the extension into separate
4 This test splits `hg serve` with and without using the extension into separate
5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
7 indicates whether or not the extension is loaded. The "X" cases are not tested
7 indicates whether or not the extension is loaded. The "X" cases are not tested
8 individually, because the lfs requirement causes the process to bail early if
8 individually, because the lfs requirement causes the process to bail early if
9 the extension is disabled.
9 the extension is disabled.
10
10
11 . Server
11 . Server
12 .
12 .
13 . No-LFS LFS
13 . No-LFS LFS
14 . +----------------------------+
14 . +----------------------------+
15 . | || D | E | D | E |
15 . | || D | E | D | E |
16 . |---++=======================|
16 . |---++=======================|
17 . C | D || N/A | #1 | X | #4 |
17 . C | D || N/A | #1 | X | #4 |
18 . l No +---++-----------------------|
18 . l No +---++-----------------------|
19 . i LFS | E || #2 | #2 | X | #5 |
19 . i LFS | E || #2 | #2 | X | #5 |
20 . e +---++-----------------------|
20 . e +---++-----------------------|
21 . n | D || X | X | X | X |
21 . n | D || X | X | X | X |
22 . t LFS |---++-----------------------|
22 . t LFS |---++-----------------------|
23 . | E || #3 | #3 | X | #6 |
23 . | E || #3 | #3 | X | #6 |
24 . |---++-----------------------+
24 . |---++-----------------------+
25
25
26 make command server magic visible
26 make command server magic visible
27
27
28 #if windows
28 #if windows
29 $ PYTHONPATH="$TESTDIR/../contrib;$PYTHONPATH"
29 $ PYTHONPATH="$TESTDIR/../contrib;$PYTHONPATH"
30 #else
30 #else
31 $ PYTHONPATH="$TESTDIR/../contrib:$PYTHONPATH"
31 $ PYTHONPATH="$TESTDIR/../contrib:$PYTHONPATH"
32 #endif
32 #endif
33 $ export PYTHONPATH
33 $ export PYTHONPATH
34
34
35 $ hg init server
35 $ hg init server
36 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
36 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
37
37
38 $ cat > $TESTTMP/debugprocessors.py <<EOF
38 $ cat > $TESTTMP/debugprocessors.py <<EOF
39 > from mercurial import (
39 > from mercurial import (
40 > cmdutil,
40 > cmdutil,
41 > commands,
41 > commands,
42 > pycompat,
42 > pycompat,
43 > registrar,
43 > registrar,
44 > )
44 > )
45 > cmdtable = {}
45 > cmdtable = {}
46 > command = registrar.command(cmdtable)
46 > command = registrar.command(cmdtable)
47 > @command(b'debugprocessors', [], b'FILE')
47 > @command(b'debugprocessors', [], b'FILE')
48 > def debugprocessors(ui, repo, file_=None, **opts):
48 > def debugprocessors(ui, repo, file_=None, **opts):
49 > opts = pycompat.byteskwargs(opts)
49 > opts = pycompat.byteskwargs(opts)
50 > opts[b'changelog'] = False
50 > opts[b'changelog'] = False
51 > opts[b'manifest'] = False
51 > opts[b'manifest'] = False
52 > opts[b'dir'] = False
52 > opts[b'dir'] = False
53 > rl = cmdutil.openrevlog(repo, b'debugprocessors', file_, opts)
53 > rl = cmdutil.openrevlog(repo, b'debugprocessors', file_, opts)
54 > for flag, proc in rl._flagprocessors.items():
54 > for flag, proc in rl._flagprocessors.items():
55 > ui.status(b"registered processor '%#x'\n" % (flag))
55 > ui.status(b"registered processor '%#x'\n" % (flag))
56 > EOF
56 > EOF
57
57
58 Skip the experimental.changegroup3=True config. Failure to agree on this comes
58 Skip the experimental.changegroup3=True config. Failure to agree on this comes
59 first, and causes an "abort: no common changegroup version" if the extension is
59 first, and causes an "abort: no common changegroup version" if the extension is
60 only loaded on one side. If that *is* enabled, the subsequent failure is "abort:
60 only loaded on one side. If that *is* enabled, the subsequent failure is "abort:
61 missing processor for flag '0x2000'!" if the extension is only loaded on one side
61 missing processor for flag '0x2000'!" if the extension is only loaded on one side
62 (possibly also masked by the Internal Server Error message).
62 (possibly also masked by the Internal Server Error message).
63 $ cat >> $HGRCPATH <<EOF
63 $ cat >> $HGRCPATH <<EOF
64 > [extensions]
64 > [extensions]
65 > debugprocessors = $TESTTMP/debugprocessors.py
65 > debugprocessors = $TESTTMP/debugprocessors.py
66 > [experimental]
66 > [experimental]
67 > lfs.disableusercache = True
67 > lfs.disableusercache = True
68 > lfs.worker-enable = False
68 > lfs.worker-enable = False
69 > [lfs]
69 > [lfs]
70 > threshold=10
70 > threshold=10
71 > [web]
71 > [web]
72 > allow_push=*
72 > allow_push=*
73 > push_ssl=False
73 > push_ssl=False
74 > EOF
74 > EOF
75
75
76 $ cp $HGRCPATH $HGRCPATH.orig
76 $ cp $HGRCPATH $HGRCPATH.orig
77
77
78 #if lfsremote-on
78 #if lfsremote-on
79 $ hg --config extensions.lfs= -R server \
79 $ hg --config extensions.lfs= -R server \
80 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
80 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
81 #else
81 #else
82 $ hg --config extensions.lfs=! -R server \
82 $ hg --config extensions.lfs=! -R server \
83 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
83 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
84 #endif
84 #endif
85
85
86 $ cat hg.pid >> $DAEMON_PIDS
86 $ cat hg.pid >> $DAEMON_PIDS
87 $ hg clone -q http://localhost:$HGPORT client
87 $ hg clone -q http://localhost:$HGPORT client
88 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
88 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
89 [1]
89 [1]
90
90
91 This trivial repo will force commandserver to load the extension, but not call
91 This trivial repo will force commandserver to load the extension, but not call
92 reposetup() on another repo actually being operated on. This gives coverage
92 reposetup() on another repo actually being operated on. This gives coverage
93 that wrapper functions are not assuming reposetup() was called.
93 that wrapper functions are not assuming reposetup() was called.
94
94
95 $ hg init $TESTTMP/cmdservelfs
95 $ hg init $TESTTMP/cmdservelfs
96 $ cat >> $TESTTMP/cmdservelfs/.hg/hgrc << EOF
96 $ cat >> $TESTTMP/cmdservelfs/.hg/hgrc << EOF
97 > [extensions]
97 > [extensions]
98 > lfs =
98 > lfs =
99 > EOF
99 > EOF
100
100
101 --------------------------------------------------------------------------------
101 --------------------------------------------------------------------------------
102 Case #1: client with non-lfs content and the extension disabled; server with
102 Case #1: client with non-lfs content and the extension disabled; server with
103 non-lfs content, and the extension enabled.
103 non-lfs content, and the extension enabled.
104
104
105 $ cd client
105 $ cd client
106 $ echo 'non-lfs' > nonlfs.txt
106 $ echo 'non-lfs' > nonlfs.txt
107 >>> from __future__ import absolute_import
107 >>> from __future__ import absolute_import
108 >>> from hgclient import check, readchannel, runcommand
108 >>> from hgclient import check, readchannel, runcommand
109 >>> @check
109 >>> @check
110 ... def diff(server):
110 ... def diff(server):
111 ... readchannel(server)
111 ... readchannel(server)
112 ... # run an arbitrary command in the repo with the extension loaded
112 ... # run an arbitrary command in the repo with the extension loaded
113 ... runcommand(server, [b'id', b'-R', b'../cmdservelfs'])
113 ... runcommand(server, [b'id', b'-R', b'../cmdservelfs'])
114 ... # now run a command in a repo without the extension to ensure that
114 ... # now run a command in a repo without the extension to ensure that
115 ... # files are added safely..
115 ... # files are added safely..
116 ... runcommand(server, [b'ci', b'-Aqm', b'non-lfs'])
116 ... runcommand(server, [b'ci', b'-Aqm', b'non-lfs'])
117 ... # .. and that scmutil.prefetchfiles() safely no-ops..
117 ... # .. and that scmutil.prefetchfiles() safely no-ops..
118 ... runcommand(server, [b'diff', b'-r', b'.~1'])
118 ... runcommand(server, [b'diff', b'-r', b'.~1'])
119 ... # .. and that debugupgraderepo safely no-ops.
119 ... # .. and that debugupgraderepo safely no-ops.
120 ... runcommand(server, [b'debugupgraderepo', b'-q', b'--run'])
120 ... runcommand(server, [b'debugupgraderepo', b'-q', b'--run'])
121 *** runcommand id -R ../cmdservelfs
121 *** runcommand id -R ../cmdservelfs
122 000000000000 tip
122 000000000000 tip
123 *** runcommand ci -Aqm non-lfs
123 *** runcommand ci -Aqm non-lfs
124 *** runcommand diff -r .~1
124 *** runcommand diff -r .~1
125 diff -r 000000000000 nonlfs.txt
125 diff -r 000000000000 nonlfs.txt
126 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
126 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
127 +++ b/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
127 +++ b/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
128 @@ -0,0 +1,1 @@
128 @@ -0,0 +1,1 @@
129 +non-lfs
129 +non-lfs
130 *** runcommand debugupgraderepo -q --run
130 *** runcommand debugupgraderepo -q --run
131 upgrade will perform the following actions:
131 upgrade will perform the following actions:
132
132
133 requirements
133 requirements
134 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
134 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
135
135
136 sidedata
137 Allows storage of extra data alongside a revision.
138
139 copies-sdc
140 Allows to use more efficient algorithm to deal with copy tracing.
141
142 beginning upgrade...
136 beginning upgrade...
143 repository locked and read-only
137 repository locked and read-only
144 creating temporary repository to stage migrated data: * (glob)
138 creating temporary repository to stage migrated data: * (glob)
145 (it is safe to interrupt this process any time before data migration completes)
139 (it is safe to interrupt this process any time before data migration completes)
146 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
140 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
147 migrating 324 bytes in store; 129 bytes tracked data
141 migrating 324 bytes in store; 129 bytes tracked data
148 migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
142 migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
149 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
143 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
150 migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
144 migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
151 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
145 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
152 migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
146 migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
153 finished migrating 1 changelog revisions; change in size: 0 bytes
147 finished migrating 1 changelog revisions; change in size: 0 bytes
154 finished migrating 3 total revisions; total change in store size: 0 bytes
148 finished migrating 3 total revisions; total change in store size: 0 bytes
155 copying phaseroots
149 copying phaseroots
156 data fully migrated to temporary repository
150 data fully migrated to temporary repository
157 marking source repository as being upgraded; clients will be unable to read from repository
151 marking source repository as being upgraded; clients will be unable to read from repository
158 starting in-place swap of repository data
152 starting in-place swap of repository data
159 replaced files will be backed up at * (glob)
153 replaced files will be backed up at * (glob)
160 replacing store...
154 replacing store...
161 store replacement complete; repository was inconsistent for *s (glob)
155 store replacement complete; repository was inconsistent for *s (glob)
162 finalizing requirements file and making repository readable again
156 finalizing requirements file and making repository readable again
163 removing temporary repository * (glob)
157 removing temporary repository * (glob)
164 copy of old repository backed up at * (glob)
158 copy of old repository backed up at * (glob)
165 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
159 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
166
160
167 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
161 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
168 [1]
162 [1]
169
163
170 #if lfsremote-on
164 #if lfsremote-on
171
165
172 $ hg push -q
166 $ hg push -q
173 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
167 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
174 [1]
168 [1]
175
169
176 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
170 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
177 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
171 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
178 [1]
172 [1]
179
173
180 $ hg init $TESTTMP/client1_pull
174 $ hg init $TESTTMP/client1_pull
181 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
175 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
182 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
176 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
183 [1]
177 [1]
184
178
185 $ hg identify http://localhost:$HGPORT
179 $ hg identify http://localhost:$HGPORT
186 d437e1d24fbd
180 d437e1d24fbd
187
181
188 #endif
182 #endif
189
183
190 --------------------------------------------------------------------------------
184 --------------------------------------------------------------------------------
191 Case #2: client with non-lfs content and the extension enabled; server with
185 Case #2: client with non-lfs content and the extension enabled; server with
192 non-lfs content, and the extension state controlled by #testcases.
186 non-lfs content, and the extension state controlled by #testcases.
193
187
194 $ cat >> $HGRCPATH <<EOF
188 $ cat >> $HGRCPATH <<EOF
195 > [extensions]
189 > [extensions]
196 > lfs =
190 > lfs =
197 > EOF
191 > EOF
198 $ echo 'non-lfs' > nonlfs2.txt
192 $ echo 'non-lfs' > nonlfs2.txt
199 $ hg ci -Aqm 'non-lfs file with lfs client'
193 $ hg ci -Aqm 'non-lfs file with lfs client'
200
194
201 Since no lfs content has been added yet, the push is allowed, even when the
195 Since no lfs content has been added yet, the push is allowed, even when the
202 extension is not enabled remotely.
196 extension is not enabled remotely.
203
197
204 $ hg push -q
198 $ hg push -q
205 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
199 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
206 [1]
200 [1]
207
201
208 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
202 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
209 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
203 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
210 [1]
204 [1]
211
205
212 $ hg init $TESTTMP/client2_pull
206 $ hg init $TESTTMP/client2_pull
213 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
207 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
214 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
208 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
215 [1]
209 [1]
216
210
217 $ hg identify http://localhost:$HGPORT
211 $ hg identify http://localhost:$HGPORT
218 1477875038c6
212 1477875038c6
219
213
220 --------------------------------------------------------------------------------
214 --------------------------------------------------------------------------------
221 Case #3: client with lfs content and the extension enabled; server with
215 Case #3: client with lfs content and the extension enabled; server with
222 non-lfs content, and the extension state controlled by #testcases. The server
216 non-lfs content, and the extension state controlled by #testcases. The server
223 should have an 'lfs' requirement after it picks up its first commit with a blob.
217 should have an 'lfs' requirement after it picks up its first commit with a blob.
224
218
225 $ echo 'this is a big lfs file' > lfs.bin
219 $ echo 'this is a big lfs file' > lfs.bin
226 $ hg ci -Aqm 'lfs'
220 $ hg ci -Aqm 'lfs'
227 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
221 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
228 .hg/requires:lfs
222 .hg/requires:lfs
229
223
230 #if lfsremote-off
224 #if lfsremote-off
231 $ hg push -q
225 $ hg push -q
232 abort: required features are not supported in the destination: lfs
226 abort: required features are not supported in the destination: lfs
233 (enable the lfs extension on the server)
227 (enable the lfs extension on the server)
234 [255]
228 [255]
235 #else
229 #else
236 $ hg push -q
230 $ hg push -q
237 #endif
231 #endif
238 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
232 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
239 .hg/requires:lfs
233 .hg/requires:lfs
240 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
234 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
241
235
242 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
236 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
243 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
237 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
244 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
238 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
245 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
239 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
246
240
247 $ hg init $TESTTMP/client3_pull
241 $ hg init $TESTTMP/client3_pull
248 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
242 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
249 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
243 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
250 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
244 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
251 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
245 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
252
246
253 Test that the commit/changegroup requirement check hook can be run multiple
247 Test that the commit/changegroup requirement check hook can be run multiple
254 times.
248 times.
255
249
256 $ hg clone -qr 0 http://localhost:$HGPORT $TESTTMP/cmdserve_client3
250 $ hg clone -qr 0 http://localhost:$HGPORT $TESTTMP/cmdserve_client3
257
251
258 $ cd ../cmdserve_client3
252 $ cd ../cmdserve_client3
259
253
260 >>> from __future__ import absolute_import
254 >>> from __future__ import absolute_import
261 >>> from hgclient import check, readchannel, runcommand
255 >>> from hgclient import check, readchannel, runcommand
262 >>> @check
256 >>> @check
263 ... def addrequirement(server):
257 ... def addrequirement(server):
264 ... readchannel(server)
258 ... readchannel(server)
265 ... # change the repo in a way that adds the lfs requirement
259 ... # change the repo in a way that adds the lfs requirement
266 ... runcommand(server, [b'pull', b'-qu'])
260 ... runcommand(server, [b'pull', b'-qu'])
267 ... # Now cause the requirement adding hook to fire again, without going
261 ... # Now cause the requirement adding hook to fire again, without going
268 ... # through reposetup() again.
262 ... # through reposetup() again.
269 ... with open('file.txt', 'wb') as fp:
263 ... with open('file.txt', 'wb') as fp:
270 ... fp.write(b'data')
264 ... fp.write(b'data')
271 ... runcommand(server, [b'ci', b'-Aqm', b'non-lfs'])
265 ... runcommand(server, [b'ci', b'-Aqm', b'non-lfs'])
272 *** runcommand pull -qu
266 *** runcommand pull -qu
273 *** runcommand ci -Aqm non-lfs
267 *** runcommand ci -Aqm non-lfs
274
268
275 $ cd ../client
269 $ cd ../client
276
270
277 The difference here is the push failed above when the extension isn't
271 The difference here is the push failed above when the extension isn't
278 enabled on the server.
272 enabled on the server.
279 $ hg identify http://localhost:$HGPORT
273 $ hg identify http://localhost:$HGPORT
280 8374dc4052cb (lfsremote-on !)
274 8374dc4052cb (lfsremote-on !)
281 1477875038c6 (lfsremote-off !)
275 1477875038c6 (lfsremote-off !)
282
276
283 Don't bother testing the lfsremote-off cases- the server won't be able
277 Don't bother testing the lfsremote-off cases- the server won't be able
284 to launch if there's lfs content and the extension is disabled.
278 to launch if there's lfs content and the extension is disabled.
285
279
286 #if lfsremote-on
280 #if lfsremote-on
287
281
288 --------------------------------------------------------------------------------
282 --------------------------------------------------------------------------------
289 Case #4: client with non-lfs content and the extension disabled; server with
283 Case #4: client with non-lfs content and the extension disabled; server with
290 lfs content, and the extension enabled.
284 lfs content, and the extension enabled.
291
285
292 $ cat >> $HGRCPATH <<EOF
286 $ cat >> $HGRCPATH <<EOF
293 > [extensions]
287 > [extensions]
294 > lfs = !
288 > lfs = !
295 > EOF
289 > EOF
296
290
297 $ hg init $TESTTMP/client4
291 $ hg init $TESTTMP/client4
298 $ cd $TESTTMP/client4
292 $ cd $TESTTMP/client4
299 $ cat >> .hg/hgrc <<EOF
293 $ cat >> .hg/hgrc <<EOF
300 > [paths]
294 > [paths]
301 > default = http://localhost:$HGPORT
295 > default = http://localhost:$HGPORT
302 > EOF
296 > EOF
303 $ echo 'non-lfs' > nonlfs2.txt
297 $ echo 'non-lfs' > nonlfs2.txt
304 $ hg ci -Aqm 'non-lfs'
298 $ hg ci -Aqm 'non-lfs'
305 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
299 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
306 $TESTTMP/server/.hg/requires:lfs
300 $TESTTMP/server/.hg/requires:lfs
307
301
308 $ hg push -q --force
302 $ hg push -q --force
309 warning: repository is unrelated
303 warning: repository is unrelated
310 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
304 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
311 $TESTTMP/server/.hg/requires:lfs
305 $TESTTMP/server/.hg/requires:lfs
312
306
313 $ hg clone http://localhost:$HGPORT $TESTTMP/client4_clone
307 $ hg clone http://localhost:$HGPORT $TESTTMP/client4_clone
314 (remote is using large file support (lfs), but it is explicitly disabled in the local configuration)
308 (remote is using large file support (lfs), but it is explicitly disabled in the local configuration)
315 abort: repository requires features unknown to this Mercurial: lfs!
309 abort: repository requires features unknown to this Mercurial: lfs!
316 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
310 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
317 [255]
311 [255]
318 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
312 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
319 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
313 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
320 $TESTTMP/server/.hg/requires:lfs
314 $TESTTMP/server/.hg/requires:lfs
321 [2]
315 [2]
322
316
323 TODO: fail more gracefully.
317 TODO: fail more gracefully.
324
318
325 $ hg init $TESTTMP/client4_pull
319 $ hg init $TESTTMP/client4_pull
326 $ hg -R $TESTTMP/client4_pull pull http://localhost:$HGPORT
320 $ hg -R $TESTTMP/client4_pull pull http://localhost:$HGPORT
327 pulling from http://localhost:$HGPORT/
321 pulling from http://localhost:$HGPORT/
328 requesting all changes
322 requesting all changes
329 remote: abort: no common changegroup version
323 remote: abort: no common changegroup version
330 abort: pull failed on remote
324 abort: pull failed on remote
331 [255]
325 [255]
332 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
326 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
333 $TESTTMP/server/.hg/requires:lfs
327 $TESTTMP/server/.hg/requires:lfs
334
328
335 $ hg identify http://localhost:$HGPORT
329 $ hg identify http://localhost:$HGPORT
336 03b080fa9d93
330 03b080fa9d93
337
331
338 --------------------------------------------------------------------------------
332 --------------------------------------------------------------------------------
339 Case #5: client with non-lfs content and the extension enabled; server with
333 Case #5: client with non-lfs content and the extension enabled; server with
340 lfs content, and the extension enabled.
334 lfs content, and the extension enabled.
341
335
342 $ cat >> $HGRCPATH <<EOF
336 $ cat >> $HGRCPATH <<EOF
343 > [extensions]
337 > [extensions]
344 > lfs =
338 > lfs =
345 > EOF
339 > EOF
346 $ echo 'non-lfs' > nonlfs3.txt
340 $ echo 'non-lfs' > nonlfs3.txt
347 $ hg ci -Aqm 'non-lfs file with lfs client'
341 $ hg ci -Aqm 'non-lfs file with lfs client'
348
342
349 $ hg push -q
343 $ hg push -q
350 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
344 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
351 $TESTTMP/server/.hg/requires:lfs
345 $TESTTMP/server/.hg/requires:lfs
352
346
353 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
347 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
354 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
348 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
355 $TESTTMP/client5_clone/.hg/requires:lfs
349 $TESTTMP/client5_clone/.hg/requires:lfs
356 $TESTTMP/server/.hg/requires:lfs
350 $TESTTMP/server/.hg/requires:lfs
357
351
358 $ hg init $TESTTMP/client5_pull
352 $ hg init $TESTTMP/client5_pull
359 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
353 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
360 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
354 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
361 $TESTTMP/client5_pull/.hg/requires:lfs
355 $TESTTMP/client5_pull/.hg/requires:lfs
362 $TESTTMP/server/.hg/requires:lfs
356 $TESTTMP/server/.hg/requires:lfs
363
357
364 $ hg identify http://localhost:$HGPORT
358 $ hg identify http://localhost:$HGPORT
365 c729025cc5e3
359 c729025cc5e3
366
360
367 $ mv $HGRCPATH $HGRCPATH.tmp
361 $ mv $HGRCPATH $HGRCPATH.tmp
368 $ cp $HGRCPATH.orig $HGRCPATH
362 $ cp $HGRCPATH.orig $HGRCPATH
369
363
370 >>> from __future__ import absolute_import
364 >>> from __future__ import absolute_import
371 >>> from hgclient import bprint, check, readchannel, runcommand, stdout
365 >>> from hgclient import bprint, check, readchannel, runcommand, stdout
372 >>> @check
366 >>> @check
373 ... def checkflags(server):
367 ... def checkflags(server):
374 ... readchannel(server)
368 ... readchannel(server)
375 ... bprint(b'')
369 ... bprint(b'')
376 ... bprint(b'# LFS required- both lfs and non-lfs revlogs have 0x2000 flag')
370 ... bprint(b'# LFS required- both lfs and non-lfs revlogs have 0x2000 flag')
377 ... stdout.flush()
371 ... stdout.flush()
378 ... runcommand(server, [b'debugprocessors', b'lfs.bin', b'-R',
372 ... runcommand(server, [b'debugprocessors', b'lfs.bin', b'-R',
379 ... b'../server'])
373 ... b'../server'])
380 ... runcommand(server, [b'debugprocessors', b'nonlfs2.txt', b'-R',
374 ... runcommand(server, [b'debugprocessors', b'nonlfs2.txt', b'-R',
381 ... b'../server'])
375 ... b'../server'])
382 ... runcommand(server, [b'config', b'extensions', b'--cwd',
376 ... runcommand(server, [b'config', b'extensions', b'--cwd',
383 ... b'../server'])
377 ... b'../server'])
384 ...
378 ...
385 ... bprint(b"\n# LFS not enabled- revlogs don't have 0x2000 flag")
379 ... bprint(b"\n# LFS not enabled- revlogs don't have 0x2000 flag")
386 ... stdout.flush()
380 ... stdout.flush()
387 ... runcommand(server, [b'debugprocessors', b'nonlfs3.txt'])
381 ... runcommand(server, [b'debugprocessors', b'nonlfs3.txt'])
388 ... runcommand(server, [b'config', b'extensions'])
382 ... runcommand(server, [b'config', b'extensions'])
389
383
390 # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
384 # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
391 *** runcommand debugprocessors lfs.bin -R ../server
385 *** runcommand debugprocessors lfs.bin -R ../server
392 registered processor '0x8000'
386 registered processor '0x8000'
393 registered processor '0x2000'
387 registered processor '0x2000'
394 *** runcommand debugprocessors nonlfs2.txt -R ../server
388 *** runcommand debugprocessors nonlfs2.txt -R ../server
395 registered processor '0x8000'
389 registered processor '0x8000'
396 registered processor '0x2000'
390 registered processor '0x2000'
397 *** runcommand config extensions --cwd ../server
391 *** runcommand config extensions --cwd ../server
398 extensions.debugprocessors=$TESTTMP/debugprocessors.py
392 extensions.debugprocessors=$TESTTMP/debugprocessors.py
399 extensions.lfs=
393 extensions.lfs=
400
394
401 # LFS not enabled- revlogs don't have 0x2000 flag
395 # LFS not enabled- revlogs don't have 0x2000 flag
402 *** runcommand debugprocessors nonlfs3.txt
396 *** runcommand debugprocessors nonlfs3.txt
403 registered processor '0x8000'
397 registered processor '0x8000'
404 *** runcommand config extensions
398 *** runcommand config extensions
405 extensions.debugprocessors=$TESTTMP/debugprocessors.py
399 extensions.debugprocessors=$TESTTMP/debugprocessors.py
406
400
407 $ rm $HGRCPATH
401 $ rm $HGRCPATH
408 $ mv $HGRCPATH.tmp $HGRCPATH
402 $ mv $HGRCPATH.tmp $HGRCPATH
409
403
410 $ hg clone $TESTTMP/client $TESTTMP/nonlfs -qr 0 --config extensions.lfs=
404 $ hg clone $TESTTMP/client $TESTTMP/nonlfs -qr 0 --config extensions.lfs=
411 $ cat >> $TESTTMP/nonlfs/.hg/hgrc <<EOF
405 $ cat >> $TESTTMP/nonlfs/.hg/hgrc <<EOF
412 > [extensions]
406 > [extensions]
413 > lfs = !
407 > lfs = !
414 > EOF
408 > EOF
415
409
416 >>> from __future__ import absolute_import, print_function
410 >>> from __future__ import absolute_import, print_function
417 >>> from hgclient import bprint, check, readchannel, runcommand, stdout
411 >>> from hgclient import bprint, check, readchannel, runcommand, stdout
418 >>> @check
412 >>> @check
419 ... def checkflags2(server):
413 ... def checkflags2(server):
420 ... readchannel(server)
414 ... readchannel(server)
421 ... bprint(b'')
415 ... bprint(b'')
422 ... bprint(b'# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag')
416 ... bprint(b'# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag')
423 ... stdout.flush()
417 ... stdout.flush()
424 ... runcommand(server, [b'debugprocessors', b'lfs.bin', b'-R',
418 ... runcommand(server, [b'debugprocessors', b'lfs.bin', b'-R',
425 ... b'../server'])
419 ... b'../server'])
426 ... runcommand(server, [b'debugprocessors', b'nonlfs2.txt', b'-R',
420 ... runcommand(server, [b'debugprocessors', b'nonlfs2.txt', b'-R',
427 ... b'../server'])
421 ... b'../server'])
428 ... runcommand(server, [b'config', b'extensions', b'--cwd',
422 ... runcommand(server, [b'config', b'extensions', b'--cwd',
429 ... b'../server'])
423 ... b'../server'])
430 ...
424 ...
431 ... bprint(b'\n# LFS enabled without requirement- revlogs have 0x2000 flag')
425 ... bprint(b'\n# LFS enabled without requirement- revlogs have 0x2000 flag')
432 ... stdout.flush()
426 ... stdout.flush()
433 ... runcommand(server, [b'debugprocessors', b'nonlfs3.txt'])
427 ... runcommand(server, [b'debugprocessors', b'nonlfs3.txt'])
434 ... runcommand(server, [b'config', b'extensions'])
428 ... runcommand(server, [b'config', b'extensions'])
435 ...
429 ...
436 ... bprint(b"\n# LFS disabled locally- revlogs don't have 0x2000 flag")
430 ... bprint(b"\n# LFS disabled locally- revlogs don't have 0x2000 flag")
437 ... stdout.flush()
431 ... stdout.flush()
438 ... runcommand(server, [b'debugprocessors', b'nonlfs.txt', b'-R',
432 ... runcommand(server, [b'debugprocessors', b'nonlfs.txt', b'-R',
439 ... b'../nonlfs'])
433 ... b'../nonlfs'])
440 ... runcommand(server, [b'config', b'extensions', b'--cwd',
434 ... runcommand(server, [b'config', b'extensions', b'--cwd',
441 ... b'../nonlfs'])
435 ... b'../nonlfs'])
442
436
443 # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
437 # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
444 *** runcommand debugprocessors lfs.bin -R ../server
438 *** runcommand debugprocessors lfs.bin -R ../server
445 registered processor '0x8000'
439 registered processor '0x8000'
446 registered processor '0x2000'
440 registered processor '0x2000'
447 *** runcommand debugprocessors nonlfs2.txt -R ../server
441 *** runcommand debugprocessors nonlfs2.txt -R ../server
448 registered processor '0x8000'
442 registered processor '0x8000'
449 registered processor '0x2000'
443 registered processor '0x2000'
450 *** runcommand config extensions --cwd ../server
444 *** runcommand config extensions --cwd ../server
451 extensions.debugprocessors=$TESTTMP/debugprocessors.py
445 extensions.debugprocessors=$TESTTMP/debugprocessors.py
452 extensions.lfs=
446 extensions.lfs=
453
447
454 # LFS enabled without requirement- revlogs have 0x2000 flag
448 # LFS enabled without requirement- revlogs have 0x2000 flag
455 *** runcommand debugprocessors nonlfs3.txt
449 *** runcommand debugprocessors nonlfs3.txt
456 registered processor '0x8000'
450 registered processor '0x8000'
457 registered processor '0x2000'
451 registered processor '0x2000'
458 *** runcommand config extensions
452 *** runcommand config extensions
459 extensions.debugprocessors=$TESTTMP/debugprocessors.py
453 extensions.debugprocessors=$TESTTMP/debugprocessors.py
460 extensions.lfs=
454 extensions.lfs=
461
455
462 # LFS disabled locally- revlogs don't have 0x2000 flag
456 # LFS disabled locally- revlogs don't have 0x2000 flag
463 *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
457 *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
464 registered processor '0x8000'
458 registered processor '0x8000'
465 *** runcommand config extensions --cwd ../nonlfs
459 *** runcommand config extensions --cwd ../nonlfs
466 extensions.debugprocessors=$TESTTMP/debugprocessors.py
460 extensions.debugprocessors=$TESTTMP/debugprocessors.py
467 extensions.lfs=!
461 extensions.lfs=!
468
462
469 --------------------------------------------------------------------------------
463 --------------------------------------------------------------------------------
470 Case #6: client with lfs content and the extension enabled; server with
464 Case #6: client with lfs content and the extension enabled; server with
471 lfs content, and the extension enabled.
465 lfs content, and the extension enabled.
472
466
473 $ echo 'this is another lfs file' > lfs2.txt
467 $ echo 'this is another lfs file' > lfs2.txt
474 $ hg ci -Aqm 'lfs file with lfs client'
468 $ hg ci -Aqm 'lfs file with lfs client'
475
469
476 $ hg --config paths.default= push -v http://localhost:$HGPORT
470 $ hg --config paths.default= push -v http://localhost:$HGPORT
477 pushing to http://localhost:$HGPORT/
471 pushing to http://localhost:$HGPORT/
478 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
472 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
479 searching for changes
473 searching for changes
480 remote has heads on branch 'default' that are not known locally: 8374dc4052cb
474 remote has heads on branch 'default' that are not known locally: 8374dc4052cb
481 lfs: uploading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
475 lfs: uploading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
482 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
476 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
483 lfs: uploaded 1 files (25 bytes)
477 lfs: uploaded 1 files (25 bytes)
484 1 changesets found
478 1 changesets found
485 uncompressed size of bundle content:
479 uncompressed size of bundle content:
486 206 (changelog)
480 206 (changelog)
487 172 (manifests)
481 172 (manifests)
488 275 lfs2.txt
482 275 lfs2.txt
489 remote: adding changesets
483 remote: adding changesets
490 remote: adding manifests
484 remote: adding manifests
491 remote: adding file changes
485 remote: adding file changes
492 remote: added 1 changesets with 1 changes to 1 files
486 remote: added 1 changesets with 1 changes to 1 files
493 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
487 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
494 .hg/requires:lfs
488 .hg/requires:lfs
495 $TESTTMP/server/.hg/requires:lfs
489 $TESTTMP/server/.hg/requires:lfs
496
490
497 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
491 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
498 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
492 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
499 $TESTTMP/client6_clone/.hg/requires:lfs
493 $TESTTMP/client6_clone/.hg/requires:lfs
500 $TESTTMP/server/.hg/requires:lfs
494 $TESTTMP/server/.hg/requires:lfs
501
495
502 $ hg init $TESTTMP/client6_pull
496 $ hg init $TESTTMP/client6_pull
503 $ hg -R $TESTTMP/client6_pull pull -u -v http://localhost:$HGPORT
497 $ hg -R $TESTTMP/client6_pull pull -u -v http://localhost:$HGPORT
504 pulling from http://localhost:$HGPORT/
498 pulling from http://localhost:$HGPORT/
505 requesting all changes
499 requesting all changes
506 adding changesets
500 adding changesets
507 adding manifests
501 adding manifests
508 adding file changes
502 adding file changes
509 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
503 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
510 added 6 changesets with 5 changes to 5 files (+1 heads)
504 added 6 changesets with 5 changes to 5 files (+1 heads)
511 new changesets d437e1d24fbd:d3b84d50eacb
505 new changesets d437e1d24fbd:d3b84d50eacb
512 resolving manifests
506 resolving manifests
513 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
507 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
514 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
508 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
515 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
509 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
516 lfs: downloaded 1 files (25 bytes)
510 lfs: downloaded 1 files (25 bytes)
517 getting lfs2.txt
511 getting lfs2.txt
518 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
512 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
519 getting nonlfs2.txt
513 getting nonlfs2.txt
520 getting nonlfs3.txt
514 getting nonlfs3.txt
521 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
515 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
522 updated to "d3b84d50eacb: lfs file with lfs client"
516 updated to "d3b84d50eacb: lfs file with lfs client"
523 1 other heads for branch "default"
517 1 other heads for branch "default"
524 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
518 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
525 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
519 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
526 $TESTTMP/client6_pull/.hg/requires:lfs
520 $TESTTMP/client6_pull/.hg/requires:lfs
527 $TESTTMP/server/.hg/requires:lfs
521 $TESTTMP/server/.hg/requires:lfs
528
522
529 $ hg identify http://localhost:$HGPORT
523 $ hg identify http://localhost:$HGPORT
530 d3b84d50eacb
524 d3b84d50eacb
531
525
532 --------------------------------------------------------------------------------
526 --------------------------------------------------------------------------------
533 Misc: process dies early if a requirement exists and the extension is disabled
527 Misc: process dies early if a requirement exists and the extension is disabled
534
528
535 $ hg --config extensions.lfs=! summary
529 $ hg --config extensions.lfs=! summary
536 abort: repository requires features unknown to this Mercurial: lfs!
530 abort: repository requires features unknown to this Mercurial: lfs!
537 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
531 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
538 [255]
532 [255]
539
533
540 $ echo 'this is an lfs file' > $TESTTMP/client6_clone/lfspair1.bin
534 $ echo 'this is an lfs file' > $TESTTMP/client6_clone/lfspair1.bin
541 $ echo 'this is an lfs file too' > $TESTTMP/client6_clone/lfspair2.bin
535 $ echo 'this is an lfs file too' > $TESTTMP/client6_clone/lfspair2.bin
542 $ hg -R $TESTTMP/client6_clone ci -Aqm 'add lfs pair'
536 $ hg -R $TESTTMP/client6_clone ci -Aqm 'add lfs pair'
543 $ hg -R $TESTTMP/client6_clone push -q
537 $ hg -R $TESTTMP/client6_clone push -q
544
538
545 $ hg clone -qU http://localhost:$HGPORT $TESTTMP/bulkfetch
539 $ hg clone -qU http://localhost:$HGPORT $TESTTMP/bulkfetch
546
540
547 Cat doesn't prefetch unless data is needed (e.g. '-T {rawdata}' doesn't need it)
541 Cat doesn't prefetch unless data is needed (e.g. '-T {rawdata}' doesn't need it)
548
542
549 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T '{rawdata}\n{path}\n'
543 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T '{rawdata}\n{path}\n'
550 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
544 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
551 version https://git-lfs.github.com/spec/v1
545 version https://git-lfs.github.com/spec/v1
552 oid sha256:cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
546 oid sha256:cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
553 size 20
547 size 20
554 x-is-binary 0
548 x-is-binary 0
555
549
556 lfspair1.bin
550 lfspair1.bin
557
551
558 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T json
552 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T json
559 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
553 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
560 [lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
554 [lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
561 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
555 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
562 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
556 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
563 lfs: downloaded 1 files (20 bytes)
557 lfs: downloaded 1 files (20 bytes)
564 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
558 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
565
559
566 {
560 {
567 "data": "this is an lfs file\n",
561 "data": "this is an lfs file\n",
568 "path": "lfspair1.bin",
562 "path": "lfspair1.bin",
569 "rawdata": "version https://git-lfs.github.com/spec/v1\noid sha256:cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782\nsize 20\nx-is-binary 0\n"
563 "rawdata": "version https://git-lfs.github.com/spec/v1\noid sha256:cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782\nsize 20\nx-is-binary 0\n"
570 }
564 }
571 ]
565 ]
572
566
573 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
567 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
574
568
575 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T '{data}\n'
569 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T '{data}\n'
576 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
570 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
577 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
571 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
578 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
572 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
579 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
573 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
580 lfs: downloaded 1 files (20 bytes)
574 lfs: downloaded 1 files (20 bytes)
581 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
575 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
582 this is an lfs file
576 this is an lfs file
583
577
584 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair2.bin
578 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair2.bin
585 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
579 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
586 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
580 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
587 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
581 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
588 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
582 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
589 lfs: downloaded 1 files (24 bytes)
583 lfs: downloaded 1 files (24 bytes)
590 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
584 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
591 this is an lfs file too
585 this is an lfs file too
592
586
593 Export will prefetch all needed files across all needed revisions
587 Export will prefetch all needed files across all needed revisions
594
588
595 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
589 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
596 $ hg -R $TESTTMP/bulkfetch -v export -r 0:tip -o all.export
590 $ hg -R $TESTTMP/bulkfetch -v export -r 0:tip -o all.export
597 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
591 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
598 exporting patches:
592 exporting patches:
599 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
593 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
600 lfs: need to transfer 4 objects (92 bytes)
594 lfs: need to transfer 4 objects (92 bytes)
601 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
595 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
602 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
596 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
603 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
597 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
604 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
598 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
605 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
599 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
606 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
600 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
607 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
601 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
608 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
602 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
609 lfs: downloaded 4 files (92 bytes)
603 lfs: downloaded 4 files (92 bytes)
610 all.export
604 all.export
611 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
605 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
612 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
606 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
613 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
607 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
614 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
608 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
615
609
616 Export with selected files is used with `extdiff --patch`
610 Export with selected files is used with `extdiff --patch`
617
611
618 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
612 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
619 $ hg --config extensions.extdiff= \
613 $ hg --config extensions.extdiff= \
620 > -R $TESTTMP/bulkfetch -v extdiff -r 2:tip --patch $TESTTMP/bulkfetch/lfs.bin
614 > -R $TESTTMP/bulkfetch -v extdiff -r 2:tip --patch $TESTTMP/bulkfetch/lfs.bin
621 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
615 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
622 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
616 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
623 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
617 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
624 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
618 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
625 lfs: downloaded 1 files (23 bytes)
619 lfs: downloaded 1 files (23 bytes)
626 */hg-8374dc4052cb.patch (glob)
620 */hg-8374dc4052cb.patch (glob)
627 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
621 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
628 */hg-9640b57e77b1.patch (glob)
622 */hg-9640b57e77b1.patch (glob)
629 --- */hg-8374dc4052cb.patch * (glob)
623 --- */hg-8374dc4052cb.patch * (glob)
630 +++ */hg-9640b57e77b1.patch * (glob)
624 +++ */hg-9640b57e77b1.patch * (glob)
631 @@ -2,12 +2,7 @@
625 @@ -2,12 +2,7 @@
632 # User test
626 # User test
633 # Date 0 0
627 # Date 0 0
634 # Thu Jan 01 00:00:00 1970 +0000
628 # Thu Jan 01 00:00:00 1970 +0000
635 -# Node ID 8374dc4052cbd388e79d9dc4ddb29784097aa354
629 -# Node ID 8374dc4052cbd388e79d9dc4ddb29784097aa354
636 -# Parent 1477875038c60152e391238920a16381c627b487
630 -# Parent 1477875038c60152e391238920a16381c627b487
637 -lfs
631 -lfs
638 +# Node ID 9640b57e77b14c3a0144fb4478b6cc13e13ea0d1
632 +# Node ID 9640b57e77b14c3a0144fb4478b6cc13e13ea0d1
639 +# Parent d3b84d50eacbd56638e11abce6b8616aaba54420
633 +# Parent d3b84d50eacbd56638e11abce6b8616aaba54420
640 +add lfs pair
634 +add lfs pair
641
635
642 -diff -r 1477875038c6 -r 8374dc4052cb lfs.bin
636 -diff -r 1477875038c6 -r 8374dc4052cb lfs.bin
643 ---- /dev/null Thu Jan 01 00:00:00 1970 +0000
637 ---- /dev/null Thu Jan 01 00:00:00 1970 +0000
644 -+++ b/lfs.bin Thu Jan 01 00:00:00 1970 +0000
638 -+++ b/lfs.bin Thu Jan 01 00:00:00 1970 +0000
645 -@@ -0,0 +1,1 @@
639 -@@ -0,0 +1,1 @@
646 -+this is a big lfs file
640 -+this is a big lfs file
647 cleaning up temp directory
641 cleaning up temp directory
648 [1]
642 [1]
649
643
650 Diff will prefetch files
644 Diff will prefetch files
651
645
652 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
646 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
653 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip
647 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip
654 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
648 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
655 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
649 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
656 lfs: need to transfer 4 objects (92 bytes)
650 lfs: need to transfer 4 objects (92 bytes)
657 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
651 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
658 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
652 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
659 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
653 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
660 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
654 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
661 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
655 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
662 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
656 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
663 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
657 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
664 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
658 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
665 lfs: downloaded 4 files (92 bytes)
659 lfs: downloaded 4 files (92 bytes)
666 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
660 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
667 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
661 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
668 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
662 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
669 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
663 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
670 diff -r 8374dc4052cb -r 9640b57e77b1 lfs.bin
664 diff -r 8374dc4052cb -r 9640b57e77b1 lfs.bin
671 --- a/lfs.bin Thu Jan 01 00:00:00 1970 +0000
665 --- a/lfs.bin Thu Jan 01 00:00:00 1970 +0000
672 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
666 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
673 @@ -1,1 +0,0 @@
667 @@ -1,1 +0,0 @@
674 -this is a big lfs file
668 -this is a big lfs file
675 diff -r 8374dc4052cb -r 9640b57e77b1 lfs2.txt
669 diff -r 8374dc4052cb -r 9640b57e77b1 lfs2.txt
676 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
670 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
677 +++ b/lfs2.txt Thu Jan 01 00:00:00 1970 +0000
671 +++ b/lfs2.txt Thu Jan 01 00:00:00 1970 +0000
678 @@ -0,0 +1,1 @@
672 @@ -0,0 +1,1 @@
679 +this is another lfs file
673 +this is another lfs file
680 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair1.bin
674 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair1.bin
681 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
675 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
682 +++ b/lfspair1.bin Thu Jan 01 00:00:00 1970 +0000
676 +++ b/lfspair1.bin Thu Jan 01 00:00:00 1970 +0000
683 @@ -0,0 +1,1 @@
677 @@ -0,0 +1,1 @@
684 +this is an lfs file
678 +this is an lfs file
685 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
679 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
686 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
680 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
687 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
681 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
688 @@ -0,0 +1,1 @@
682 @@ -0,0 +1,1 @@
689 +this is an lfs file too
683 +this is an lfs file too
690 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs.txt
684 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs.txt
691 --- a/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
685 --- a/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
692 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
686 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
693 @@ -1,1 +0,0 @@
687 @@ -1,1 +0,0 @@
694 -non-lfs
688 -non-lfs
695 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs3.txt
689 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs3.txt
696 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
690 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
697 +++ b/nonlfs3.txt Thu Jan 01 00:00:00 1970 +0000
691 +++ b/nonlfs3.txt Thu Jan 01 00:00:00 1970 +0000
698 @@ -0,0 +1,1 @@
692 @@ -0,0 +1,1 @@
699 +non-lfs
693 +non-lfs
700
694
701 Only the files required by diff are prefetched
695 Only the files required by diff are prefetched
702
696
703 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
697 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
704 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip $TESTTMP/bulkfetch/lfspair2.bin
698 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip $TESTTMP/bulkfetch/lfspair2.bin
705 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
699 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
706 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
700 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
707 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
701 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
708 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
702 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
709 lfs: downloaded 1 files (24 bytes)
703 lfs: downloaded 1 files (24 bytes)
710 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
704 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
711 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
705 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
712 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
706 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
713 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
707 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
714 @@ -0,0 +1,1 @@
708 @@ -0,0 +1,1 @@
715 +this is an lfs file too
709 +this is an lfs file too
716
710
717 #endif
711 #endif
718
712
719 $ "$PYTHON" $TESTDIR/killdaemons.py $DAEMON_PIDS
713 $ "$PYTHON" $TESTDIR/killdaemons.py $DAEMON_PIDS
720
714
721 $ cat $TESTTMP/errors.log
715 $ cat $TESTTMP/errors.log
@@ -1,1493 +1,1379
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > EOF
6 > EOF
7
7
8 store and revlogv1 are required in source
8 store and revlogv1 are required in source
9
9
10 $ hg --config format.usestore=false init no-store
10 $ hg --config format.usestore=false init no-store
11 $ hg -R no-store debugupgraderepo
11 $ hg -R no-store debugupgraderepo
12 abort: cannot upgrade repository; requirement missing: store
12 abort: cannot upgrade repository; requirement missing: store
13 [255]
13 [255]
14
14
15 $ hg init no-revlogv1
15 $ hg init no-revlogv1
16 $ cat > no-revlogv1/.hg/requires << EOF
16 $ cat > no-revlogv1/.hg/requires << EOF
17 > dotencode
17 > dotencode
18 > fncache
18 > fncache
19 > generaldelta
19 > generaldelta
20 > store
20 > store
21 > EOF
21 > EOF
22
22
23 $ hg -R no-revlogv1 debugupgraderepo
23 $ hg -R no-revlogv1 debugupgraderepo
24 abort: cannot upgrade repository; requirement missing: revlogv1
24 abort: cannot upgrade repository; requirement missing: revlogv1
25 [255]
25 [255]
26
26
27 Cannot upgrade shared repositories
27 Cannot upgrade shared repositories
28
28
29 $ hg init share-parent
29 $ hg init share-parent
30 $ hg -q share share-parent share-child
30 $ hg -q share share-parent share-child
31
31
32 $ hg -R share-child debugupgraderepo
32 $ hg -R share-child debugupgraderepo
33 abort: cannot upgrade repository; unsupported source requirement: shared
33 abort: cannot upgrade repository; unsupported source requirement: shared
34 [255]
34 [255]
35
35
36 Do not yet support upgrading treemanifest repos
36 Do not yet support upgrading treemanifest repos
37
37
38 $ hg --config experimental.treemanifest=true init treemanifest
38 $ hg --config experimental.treemanifest=true init treemanifest
39 $ hg -R treemanifest debugupgraderepo
39 $ hg -R treemanifest debugupgraderepo
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 [255]
41 [255]
42
42
43 Cannot add treemanifest requirement during upgrade
43 Cannot add treemanifest requirement during upgrade
44
44
45 $ hg init disallowaddedreq
45 $ hg init disallowaddedreq
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 [255]
48 [255]
49
49
50 An upgrade of a repository created with recommended settings only suggests optimizations
50 An upgrade of a repository created with recommended settings only suggests optimizations
51
51
52 $ hg init empty
52 $ hg init empty
53 $ cd empty
53 $ cd empty
54 $ hg debugformat
54 $ hg debugformat
55 format-variant repo
55 format-variant repo
56 fncache: yes
56 fncache: yes
57 dotencode: yes
57 dotencode: yes
58 generaldelta: yes
58 generaldelta: yes
59 sparserevlog: yes
59 sparserevlog: yes
60 sidedata: no
60 sidedata: no
61 copies-sdc: no
61 copies-sdc: no
62 plain-cl-delta: yes
62 plain-cl-delta: yes
63 compression: zlib
63 compression: zlib
64 compression-level: default
64 compression-level: default
65 $ hg debugformat --verbose
65 $ hg debugformat --verbose
66 format-variant repo config default
66 format-variant repo config default
67 fncache: yes yes yes
67 fncache: yes yes yes
68 dotencode: yes yes yes
68 dotencode: yes yes yes
69 generaldelta: yes yes yes
69 generaldelta: yes yes yes
70 sparserevlog: yes yes yes
70 sparserevlog: yes yes yes
71 sidedata: no no no
71 sidedata: no no no
72 copies-sdc: no no no
72 copies-sdc: no no no
73 plain-cl-delta: yes yes yes
73 plain-cl-delta: yes yes yes
74 compression: zlib zlib zlib
74 compression: zlib zlib zlib
75 compression-level: default default default
75 compression-level: default default default
76 $ hg debugformat --verbose --config format.usefncache=no
76 $ hg debugformat --verbose --config format.usefncache=no
77 format-variant repo config default
77 format-variant repo config default
78 fncache: yes no yes
78 fncache: yes no yes
79 dotencode: yes no yes
79 dotencode: yes no yes
80 generaldelta: yes yes yes
80 generaldelta: yes yes yes
81 sparserevlog: yes yes yes
81 sparserevlog: yes yes yes
82 sidedata: no no no
82 sidedata: no no no
83 copies-sdc: no no no
83 copies-sdc: no no no
84 plain-cl-delta: yes yes yes
84 plain-cl-delta: yes yes yes
85 compression: zlib zlib zlib
85 compression: zlib zlib zlib
86 compression-level: default default default
86 compression-level: default default default
87 $ hg debugformat --verbose --config format.usefncache=no --color=debug
87 $ hg debugformat --verbose --config format.usefncache=no --color=debug
88 format-variant repo config default
88 format-variant repo config default
89 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
89 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
90 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
90 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
91 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
91 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
92 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
92 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
93 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
93 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
94 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
94 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
95 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
95 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
96 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
96 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
97 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
97 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
98 $ hg debugformat -Tjson
98 $ hg debugformat -Tjson
99 [
99 [
100 {
100 {
101 "config": true,
101 "config": true,
102 "default": true,
102 "default": true,
103 "name": "fncache",
103 "name": "fncache",
104 "repo": true
104 "repo": true
105 },
105 },
106 {
106 {
107 "config": true,
107 "config": true,
108 "default": true,
108 "default": true,
109 "name": "dotencode",
109 "name": "dotencode",
110 "repo": true
110 "repo": true
111 },
111 },
112 {
112 {
113 "config": true,
113 "config": true,
114 "default": true,
114 "default": true,
115 "name": "generaldelta",
115 "name": "generaldelta",
116 "repo": true
116 "repo": true
117 },
117 },
118 {
118 {
119 "config": true,
119 "config": true,
120 "default": true,
120 "default": true,
121 "name": "sparserevlog",
121 "name": "sparserevlog",
122 "repo": true
122 "repo": true
123 },
123 },
124 {
124 {
125 "config": false,
125 "config": false,
126 "default": false,
126 "default": false,
127 "name": "sidedata",
127 "name": "sidedata",
128 "repo": false
128 "repo": false
129 },
129 },
130 {
130 {
131 "config": false,
131 "config": false,
132 "default": false,
132 "default": false,
133 "name": "copies-sdc",
133 "name": "copies-sdc",
134 "repo": false
134 "repo": false
135 },
135 },
136 {
136 {
137 "config": true,
137 "config": true,
138 "default": true,
138 "default": true,
139 "name": "plain-cl-delta",
139 "name": "plain-cl-delta",
140 "repo": true
140 "repo": true
141 },
141 },
142 {
142 {
143 "config": "zlib",
143 "config": "zlib",
144 "default": "zlib",
144 "default": "zlib",
145 "name": "compression",
145 "name": "compression",
146 "repo": "zlib"
146 "repo": "zlib"
147 },
147 },
148 {
148 {
149 "config": "default",
149 "config": "default",
150 "default": "default",
150 "default": "default",
151 "name": "compression-level",
151 "name": "compression-level",
152 "repo": "default"
152 "repo": "default"
153 }
153 }
154 ]
154 ]
155 $ hg debugupgraderepo
155 $ hg debugupgraderepo
156 (no feature deficiencies found in existing repository)
156 (no feature deficiencies found in existing repository)
157 performing an upgrade with "--run" will make the following changes:
157 performing an upgrade with "--run" will make the following changes:
158
158
159 requirements
159 requirements
160 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
160 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
161
161
162 sidedata
163 Allows storage of extra data alongside a revision.
164
165 copies-sdc
166 Allows to use more efficient algorithm to deal with copy tracing.
167
168 additional optimizations are available by specifying "--optimize <name>":
162 additional optimizations are available by specifying "--optimize <name>":
169
163
170 re-delta-parent
164 re-delta-parent
171 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
165 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
172
166
173 re-delta-multibase
167 re-delta-multibase
174 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
168 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
175
169
176 re-delta-all
170 re-delta-all
177 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
171 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
178
172
179 re-delta-fulladd
173 re-delta-fulladd
180 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
174 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
181
175
182
176
183 --optimize can be used to add optimizations
177 --optimize can be used to add optimizations
184
178
185 $ hg debugupgrade --optimize redeltaparent
179 $ hg debugupgrade --optimize redeltaparent
186 (no feature deficiencies found in existing repository)
180 (no feature deficiencies found in existing repository)
187 performing an upgrade with "--run" will make the following changes:
181 performing an upgrade with "--run" will make the following changes:
188
182
189 requirements
183 requirements
190 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
184 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
191
185
192 sidedata
193 Allows storage of extra data alongside a revision.
194
195 copies-sdc
196 Allows to use more efficient algorithm to deal with copy tracing.
197
198 re-delta-parent
186 re-delta-parent
199 deltas within internal storage will choose a new base revision if needed
187 deltas within internal storage will choose a new base revision if needed
200
188
201 additional optimizations are available by specifying "--optimize <name>":
189 additional optimizations are available by specifying "--optimize <name>":
202
190
203 re-delta-multibase
191 re-delta-multibase
204 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
192 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
205
193
206 re-delta-all
194 re-delta-all
207 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
195 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
208
196
209 re-delta-fulladd
197 re-delta-fulladd
210 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
198 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
211
199
212
200
213 modern form of the option
201 modern form of the option
214
202
215 $ hg debugupgrade --optimize re-delta-parent
203 $ hg debugupgrade --optimize re-delta-parent
216 (no feature deficiencies found in existing repository)
204 (no feature deficiencies found in existing repository)
217 performing an upgrade with "--run" will make the following changes:
205 performing an upgrade with "--run" will make the following changes:
218
206
219 requirements
207 requirements
220 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
208 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
221
209
222 sidedata
223 Allows storage of extra data alongside a revision.
224
225 copies-sdc
226 Allows to use more efficient algorithm to deal with copy tracing.
227
228 re-delta-parent
210 re-delta-parent
229 deltas within internal storage will choose a new base revision if needed
211 deltas within internal storage will choose a new base revision if needed
230
212
231 additional optimizations are available by specifying "--optimize <name>":
213 additional optimizations are available by specifying "--optimize <name>":
232
214
233 re-delta-multibase
215 re-delta-multibase
234 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
216 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
235
217
236 re-delta-all
218 re-delta-all
237 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
219 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
238
220
239 re-delta-fulladd
221 re-delta-fulladd
240 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
222 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
241
223
242
224
243 unknown optimization:
225 unknown optimization:
244
226
245 $ hg debugupgrade --optimize foobar
227 $ hg debugupgrade --optimize foobar
246 abort: unknown optimization action requested: foobar
228 abort: unknown optimization action requested: foobar
247 (run without arguments to see valid optimizations)
229 (run without arguments to see valid optimizations)
248 [255]
230 [255]
249
231
250 Various sub-optimal detections work
232 Various sub-optimal detections work
251
233
252 $ cat > .hg/requires << EOF
234 $ cat > .hg/requires << EOF
253 > revlogv1
235 > revlogv1
254 > store
236 > store
255 > EOF
237 > EOF
256
238
257 $ hg debugformat
239 $ hg debugformat
258 format-variant repo
240 format-variant repo
259 fncache: no
241 fncache: no
260 dotencode: no
242 dotencode: no
261 generaldelta: no
243 generaldelta: no
262 sparserevlog: no
244 sparserevlog: no
263 sidedata: no
245 sidedata: no
264 copies-sdc: no
246 copies-sdc: no
265 plain-cl-delta: yes
247 plain-cl-delta: yes
266 compression: zlib
248 compression: zlib
267 compression-level: default
249 compression-level: default
268 $ hg debugformat --verbose
250 $ hg debugformat --verbose
269 format-variant repo config default
251 format-variant repo config default
270 fncache: no yes yes
252 fncache: no yes yes
271 dotencode: no yes yes
253 dotencode: no yes yes
272 generaldelta: no yes yes
254 generaldelta: no yes yes
273 sparserevlog: no yes yes
255 sparserevlog: no yes yes
274 sidedata: no no no
256 sidedata: no no no
275 copies-sdc: no no no
257 copies-sdc: no no no
276 plain-cl-delta: yes yes yes
258 plain-cl-delta: yes yes yes
277 compression: zlib zlib zlib
259 compression: zlib zlib zlib
278 compression-level: default default default
260 compression-level: default default default
279 $ hg debugformat --verbose --config format.usegeneraldelta=no
261 $ hg debugformat --verbose --config format.usegeneraldelta=no
280 format-variant repo config default
262 format-variant repo config default
281 fncache: no yes yes
263 fncache: no yes yes
282 dotencode: no yes yes
264 dotencode: no yes yes
283 generaldelta: no no yes
265 generaldelta: no no yes
284 sparserevlog: no no yes
266 sparserevlog: no no yes
285 sidedata: no no no
267 sidedata: no no no
286 copies-sdc: no no no
268 copies-sdc: no no no
287 plain-cl-delta: yes yes yes
269 plain-cl-delta: yes yes yes
288 compression: zlib zlib zlib
270 compression: zlib zlib zlib
289 compression-level: default default default
271 compression-level: default default default
290 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
272 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
291 format-variant repo config default
273 format-variant repo config default
292 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
274 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
293 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
275 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
294 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
276 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
295 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
277 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
296 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
278 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
297 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
279 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
298 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
280 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
299 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
281 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
300 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
282 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
301 $ hg debugupgraderepo
283 $ hg debugupgraderepo
302 repository lacks features recommended by current config options:
284 repository lacks features recommended by current config options:
303
285
304 fncache
286 fncache
305 long and reserved filenames may not work correctly; repository performance is sub-optimal
287 long and reserved filenames may not work correctly; repository performance is sub-optimal
306
288
307 dotencode
289 dotencode
308 storage of filenames beginning with a period or space may not work correctly
290 storage of filenames beginning with a period or space may not work correctly
309
291
310 generaldelta
292 generaldelta
311 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
293 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
312
294
313 sparserevlog
295 sparserevlog
314 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
296 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
315
297
316
298
317 performing an upgrade with "--run" will make the following changes:
299 performing an upgrade with "--run" will make the following changes:
318
300
319 requirements
301 requirements
320 preserved: revlogv1, store
302 preserved: revlogv1, store
321 added: dotencode, fncache, generaldelta, sparserevlog
303 added: dotencode, fncache, generaldelta, sparserevlog
322
304
323 fncache
305 fncache
324 repository will be more resilient to storing certain paths and performance of certain operations should be improved
306 repository will be more resilient to storing certain paths and performance of certain operations should be improved
325
307
326 dotencode
308 dotencode
327 repository will be better able to store files beginning with a space or period
309 repository will be better able to store files beginning with a space or period
328
310
329 generaldelta
311 generaldelta
330 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
312 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
331
313
332 sparserevlog
314 sparserevlog
333 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
315 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
334
316
335 sidedata
336 Allows storage of extra data alongside a revision.
337
338 copies-sdc
339 Allows to use more efficient algorithm to deal with copy tracing.
340
341 additional optimizations are available by specifying "--optimize <name>":
317 additional optimizations are available by specifying "--optimize <name>":
342
318
343 re-delta-parent
319 re-delta-parent
344 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
320 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
345
321
346 re-delta-multibase
322 re-delta-multibase
347 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
323 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
348
324
349 re-delta-all
325 re-delta-all
350 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
326 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
351
327
352 re-delta-fulladd
328 re-delta-fulladd
353 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
329 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
354
330
355
331
356 $ hg --config format.dotencode=false debugupgraderepo
332 $ hg --config format.dotencode=false debugupgraderepo
357 repository lacks features recommended by current config options:
333 repository lacks features recommended by current config options:
358
334
359 fncache
335 fncache
360 long and reserved filenames may not work correctly; repository performance is sub-optimal
336 long and reserved filenames may not work correctly; repository performance is sub-optimal
361
337
362 generaldelta
338 generaldelta
363 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
339 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
364
340
365 sparserevlog
341 sparserevlog
366 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
342 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
367
343
368 repository lacks features used by the default config options:
344 repository lacks features used by the default config options:
369
345
370 dotencode
346 dotencode
371 storage of filenames beginning with a period or space may not work correctly
347 storage of filenames beginning with a period or space may not work correctly
372
348
373
349
374 performing an upgrade with "--run" will make the following changes:
350 performing an upgrade with "--run" will make the following changes:
375
351
376 requirements
352 requirements
377 preserved: revlogv1, store
353 preserved: revlogv1, store
378 added: fncache, generaldelta, sparserevlog
354 added: fncache, generaldelta, sparserevlog
379
355
380 fncache
356 fncache
381 repository will be more resilient to storing certain paths and performance of certain operations should be improved
357 repository will be more resilient to storing certain paths and performance of certain operations should be improved
382
358
383 generaldelta
359 generaldelta
384 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
360 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
385
361
386 sparserevlog
362 sparserevlog
387 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
363 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
388
364
389 sidedata
390 Allows storage of extra data alongside a revision.
391
392 copies-sdc
393 Allows to use more efficient algorithm to deal with copy tracing.
394
395 additional optimizations are available by specifying "--optimize <name>":
365 additional optimizations are available by specifying "--optimize <name>":
396
366
397 re-delta-parent
367 re-delta-parent
398 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
368 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
399
369
400 re-delta-multibase
370 re-delta-multibase
401 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
371 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
402
372
403 re-delta-all
373 re-delta-all
404 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
374 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
405
375
406 re-delta-fulladd
376 re-delta-fulladd
407 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
377 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
408
378
409
379
410 $ cd ..
380 $ cd ..
411
381
412 Upgrading a repository that is already modern essentially no-ops
382 Upgrading a repository that is already modern essentially no-ops
413
383
414 $ hg init modern
384 $ hg init modern
415 $ hg -R modern debugupgraderepo --run
385 $ hg -R modern debugupgraderepo --run
416 upgrade will perform the following actions:
386 upgrade will perform the following actions:
417
387
418 requirements
388 requirements
419 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
389 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
420
390
421 sidedata
422 Allows storage of extra data alongside a revision.
423
424 copies-sdc
425 Allows to use more efficient algorithm to deal with copy tracing.
426
427 beginning upgrade...
391 beginning upgrade...
428 repository locked and read-only
392 repository locked and read-only
429 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
393 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
430 (it is safe to interrupt this process any time before data migration completes)
394 (it is safe to interrupt this process any time before data migration completes)
431 data fully migrated to temporary repository
395 data fully migrated to temporary repository
432 marking source repository as being upgraded; clients will be unable to read from repository
396 marking source repository as being upgraded; clients will be unable to read from repository
433 starting in-place swap of repository data
397 starting in-place swap of repository data
434 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
398 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
435 replacing store...
399 replacing store...
436 store replacement complete; repository was inconsistent for *s (glob)
400 store replacement complete; repository was inconsistent for *s (glob)
437 finalizing requirements file and making repository readable again
401 finalizing requirements file and making repository readable again
438 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
402 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
439 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
403 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
440 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
404 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
441
405
442 Upgrading a repository to generaldelta works
406 Upgrading a repository to generaldelta works
443
407
444 $ hg --config format.usegeneraldelta=false init upgradegd
408 $ hg --config format.usegeneraldelta=false init upgradegd
445 $ cd upgradegd
409 $ cd upgradegd
446 $ touch f0
410 $ touch f0
447 $ hg -q commit -A -m initial
411 $ hg -q commit -A -m initial
448 $ mkdir FooBarDirectory.d
412 $ mkdir FooBarDirectory.d
449 $ touch FooBarDirectory.d/f1
413 $ touch FooBarDirectory.d/f1
450 $ hg -q commit -A -m 'add f1'
414 $ hg -q commit -A -m 'add f1'
451 $ hg -q up -r 0
415 $ hg -q up -r 0
452 >>> from __future__ import absolute_import, print_function
416 >>> from __future__ import absolute_import, print_function
453 >>> import random
417 >>> import random
454 >>> random.seed(0) # have a reproducible content
418 >>> random.seed(0) # have a reproducible content
455 >>> with open("f2", "wb") as f:
419 >>> with open("f2", "wb") as f:
456 ... for i in range(100000):
420 ... for i in range(100000):
457 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
421 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
458 $ hg -q commit -A -m 'add f2'
422 $ hg -q commit -A -m 'add f2'
459
423
460 make sure we have a .d file
424 make sure we have a .d file
461
425
462 $ ls -d .hg/store/data/*
426 $ ls -d .hg/store/data/*
463 .hg/store/data/_foo_bar_directory.d.hg
427 .hg/store/data/_foo_bar_directory.d.hg
464 .hg/store/data/f0.i
428 .hg/store/data/f0.i
465 .hg/store/data/f2.d
429 .hg/store/data/f2.d
466 .hg/store/data/f2.i
430 .hg/store/data/f2.i
467
431
468 $ hg debugupgraderepo --run --config format.sparse-revlog=false
432 $ hg debugupgraderepo --run --config format.sparse-revlog=false
469 upgrade will perform the following actions:
433 upgrade will perform the following actions:
470
434
471 requirements
435 requirements
472 preserved: dotencode, fncache, revlogv1, store
436 preserved: dotencode, fncache, revlogv1, store
473 added: generaldelta
437 added: generaldelta
474
438
475 generaldelta
439 generaldelta
476 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
440 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
477
441
478 sidedata
479 Allows storage of extra data alongside a revision.
480
481 copies-sdc
482 Allows to use more efficient algorithm to deal with copy tracing.
483
484 beginning upgrade...
442 beginning upgrade...
485 repository locked and read-only
443 repository locked and read-only
486 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
444 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
487 (it is safe to interrupt this process any time before data migration completes)
445 (it is safe to interrupt this process any time before data migration completes)
488 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
446 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
489 migrating 519 KB in store; 1.05 MB tracked data
447 migrating 519 KB in store; 1.05 MB tracked data
490 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
448 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
491 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
449 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
492 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
450 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
493 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
451 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
494 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
452 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
495 finished migrating 3 changelog revisions; change in size: 0 bytes
453 finished migrating 3 changelog revisions; change in size: 0 bytes
496 finished migrating 9 total revisions; total change in store size: -17 bytes
454 finished migrating 9 total revisions; total change in store size: -17 bytes
497 copying phaseroots
455 copying phaseroots
498 data fully migrated to temporary repository
456 data fully migrated to temporary repository
499 marking source repository as being upgraded; clients will be unable to read from repository
457 marking source repository as being upgraded; clients will be unable to read from repository
500 starting in-place swap of repository data
458 starting in-place swap of repository data
501 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
459 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
502 replacing store...
460 replacing store...
503 store replacement complete; repository was inconsistent for *s (glob)
461 store replacement complete; repository was inconsistent for *s (glob)
504 finalizing requirements file and making repository readable again
462 finalizing requirements file and making repository readable again
505 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
463 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
506 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
464 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
507 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
465 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
508
466
509 Original requirements backed up
467 Original requirements backed up
510
468
511 $ cat .hg/upgradebackup.*/requires
469 $ cat .hg/upgradebackup.*/requires
512 dotencode
470 dotencode
513 fncache
471 fncache
514 revlogv1
472 revlogv1
515 store
473 store
516
474
517 generaldelta added to original requirements files
475 generaldelta added to original requirements files
518
476
519 $ cat .hg/requires
477 $ cat .hg/requires
520 dotencode
478 dotencode
521 fncache
479 fncache
522 generaldelta
480 generaldelta
523 revlogv1
481 revlogv1
524 store
482 store
525
483
526 store directory has files we expect
484 store directory has files we expect
527
485
528 $ ls .hg/store
486 $ ls .hg/store
529 00changelog.i
487 00changelog.i
530 00manifest.i
488 00manifest.i
531 data
489 data
532 fncache
490 fncache
533 phaseroots
491 phaseroots
534 undo
492 undo
535 undo.backupfiles
493 undo.backupfiles
536 undo.phaseroots
494 undo.phaseroots
537
495
538 manifest should be generaldelta
496 manifest should be generaldelta
539
497
540 $ hg debugrevlog -m | grep flags
498 $ hg debugrevlog -m | grep flags
541 flags : inline, generaldelta
499 flags : inline, generaldelta
542
500
543 verify should be happy
501 verify should be happy
544
502
545 $ hg verify
503 $ hg verify
546 checking changesets
504 checking changesets
547 checking manifests
505 checking manifests
548 crosschecking files in changesets and manifests
506 crosschecking files in changesets and manifests
549 checking files
507 checking files
550 checked 3 changesets with 3 changes to 3 files
508 checked 3 changesets with 3 changes to 3 files
551
509
552 old store should be backed up
510 old store should be backed up
553
511
554 $ ls -d .hg/upgradebackup.*/
512 $ ls -d .hg/upgradebackup.*/
555 .hg/upgradebackup.*/ (glob)
513 .hg/upgradebackup.*/ (glob)
556 $ ls .hg/upgradebackup.*/store
514 $ ls .hg/upgradebackup.*/store
557 00changelog.i
515 00changelog.i
558 00manifest.i
516 00manifest.i
559 data
517 data
560 fncache
518 fncache
561 phaseroots
519 phaseroots
562 undo
520 undo
563 undo.backup.fncache
521 undo.backup.fncache
564 undo.backupfiles
522 undo.backupfiles
565 undo.phaseroots
523 undo.phaseroots
566
524
567 unless --no-backup is passed
525 unless --no-backup is passed
568
526
569 $ rm -rf .hg/upgradebackup.*/
527 $ rm -rf .hg/upgradebackup.*/
570 $ hg debugupgraderepo --run --no-backup
528 $ hg debugupgraderepo --run --no-backup
571 upgrade will perform the following actions:
529 upgrade will perform the following actions:
572
530
573 requirements
531 requirements
574 preserved: dotencode, fncache, generaldelta, revlogv1, store
532 preserved: dotencode, fncache, generaldelta, revlogv1, store
575 added: sparserevlog
533 added: sparserevlog
576
534
577 sparserevlog
535 sparserevlog
578 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
536 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
579
537
580 sidedata
581 Allows storage of extra data alongside a revision.
582
583 copies-sdc
584 Allows to use more efficient algorithm to deal with copy tracing.
585
586 beginning upgrade...
538 beginning upgrade...
587 repository locked and read-only
539 repository locked and read-only
588 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
540 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
589 (it is safe to interrupt this process any time before data migration completes)
541 (it is safe to interrupt this process any time before data migration completes)
590 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
542 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
591 migrating 519 KB in store; 1.05 MB tracked data
543 migrating 519 KB in store; 1.05 MB tracked data
592 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
544 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
593 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
545 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
594 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
546 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
595 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
547 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
596 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
548 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
597 finished migrating 3 changelog revisions; change in size: 0 bytes
549 finished migrating 3 changelog revisions; change in size: 0 bytes
598 finished migrating 9 total revisions; total change in store size: 0 bytes
550 finished migrating 9 total revisions; total change in store size: 0 bytes
599 copying phaseroots
551 copying phaseroots
600 data fully migrated to temporary repository
552 data fully migrated to temporary repository
601 marking source repository as being upgraded; clients will be unable to read from repository
553 marking source repository as being upgraded; clients will be unable to read from repository
602 starting in-place swap of repository data
554 starting in-place swap of repository data
603 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
555 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
604 replacing store...
556 replacing store...
605 store replacement complete; repository was inconsistent for * (glob)
557 store replacement complete; repository was inconsistent for * (glob)
606 finalizing requirements file and making repository readable again
558 finalizing requirements file and making repository readable again
607 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
559 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
608 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
560 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
609 $ ls -1 .hg/ | grep upgradebackup
561 $ ls -1 .hg/ | grep upgradebackup
610 [1]
562 [1]
611
563
612 We can restrict optimization to some revlog:
564 We can restrict optimization to some revlog:
613
565
614 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
566 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
615 upgrade will perform the following actions:
567 upgrade will perform the following actions:
616
568
617 requirements
569 requirements
618 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
570 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
619
571
620 sidedata
621 Allows storage of extra data alongside a revision.
622
623 copies-sdc
624 Allows to use more efficient algorithm to deal with copy tracing.
625
626 re-delta-parent
572 re-delta-parent
627 deltas within internal storage will choose a new base revision if needed
573 deltas within internal storage will choose a new base revision if needed
628
574
629 beginning upgrade...
575 beginning upgrade...
630 repository locked and read-only
576 repository locked and read-only
631 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
577 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
632 (it is safe to interrupt this process any time before data migration completes)
578 (it is safe to interrupt this process any time before data migration completes)
633 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
579 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
634 migrating 519 KB in store; 1.05 MB tracked data
580 migrating 519 KB in store; 1.05 MB tracked data
635 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
581 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
636 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
582 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
637 blindly copying data/f0.i containing 1 revisions
583 blindly copying data/f0.i containing 1 revisions
638 blindly copying data/f2.i containing 1 revisions
584 blindly copying data/f2.i containing 1 revisions
639 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
585 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
640 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
586 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
641 cloning 3 revisions from 00manifest.i
587 cloning 3 revisions from 00manifest.i
642 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
588 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
643 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
589 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
644 blindly copying 00changelog.i containing 3 revisions
590 blindly copying 00changelog.i containing 3 revisions
645 finished migrating 3 changelog revisions; change in size: 0 bytes
591 finished migrating 3 changelog revisions; change in size: 0 bytes
646 finished migrating 9 total revisions; total change in store size: 0 bytes
592 finished migrating 9 total revisions; total change in store size: 0 bytes
647 copying phaseroots
593 copying phaseroots
648 data fully migrated to temporary repository
594 data fully migrated to temporary repository
649 marking source repository as being upgraded; clients will be unable to read from repository
595 marking source repository as being upgraded; clients will be unable to read from repository
650 starting in-place swap of repository data
596 starting in-place swap of repository data
651 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
597 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
652 replacing store...
598 replacing store...
653 store replacement complete; repository was inconsistent for *s (glob)
599 store replacement complete; repository was inconsistent for *s (glob)
654 finalizing requirements file and making repository readable again
600 finalizing requirements file and making repository readable again
655 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
601 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
656 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
602 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
657
603
658 Check that the repo still works fine
604 Check that the repo still works fine
659
605
660 $ hg log -G --stat
606 $ hg log -G --stat
661 @ changeset: 2:76d4395f5413 (no-py3 !)
607 @ changeset: 2:76d4395f5413 (no-py3 !)
662 @ changeset: 2:fca376863211 (py3 !)
608 @ changeset: 2:fca376863211 (py3 !)
663 | tag: tip
609 | tag: tip
664 | parent: 0:ba592bf28da2
610 | parent: 0:ba592bf28da2
665 | user: test
611 | user: test
666 | date: Thu Jan 01 00:00:00 1970 +0000
612 | date: Thu Jan 01 00:00:00 1970 +0000
667 | summary: add f2
613 | summary: add f2
668 |
614 |
669 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
615 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
670 | 1 files changed, 100000 insertions(+), 0 deletions(-)
616 | 1 files changed, 100000 insertions(+), 0 deletions(-)
671 |
617 |
672 | o changeset: 1:2029ce2354e2
618 | o changeset: 1:2029ce2354e2
673 |/ user: test
619 |/ user: test
674 | date: Thu Jan 01 00:00:00 1970 +0000
620 | date: Thu Jan 01 00:00:00 1970 +0000
675 | summary: add f1
621 | summary: add f1
676 |
622 |
677 |
623 |
678 o changeset: 0:ba592bf28da2
624 o changeset: 0:ba592bf28da2
679 user: test
625 user: test
680 date: Thu Jan 01 00:00:00 1970 +0000
626 date: Thu Jan 01 00:00:00 1970 +0000
681 summary: initial
627 summary: initial
682
628
683
629
684
630
685 $ hg verify
631 $ hg verify
686 checking changesets
632 checking changesets
687 checking manifests
633 checking manifests
688 crosschecking files in changesets and manifests
634 crosschecking files in changesets and manifests
689 checking files
635 checking files
690 checked 3 changesets with 3 changes to 3 files
636 checked 3 changesets with 3 changes to 3 files
691
637
692 Check we can select negatively
638 Check we can select negatively
693
639
694 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
640 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
695 upgrade will perform the following actions:
641 upgrade will perform the following actions:
696
642
697 requirements
643 requirements
698 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
644 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
699
645
700 sidedata
701 Allows storage of extra data alongside a revision.
702
703 copies-sdc
704 Allows to use more efficient algorithm to deal with copy tracing.
705
706 re-delta-parent
646 re-delta-parent
707 deltas within internal storage will choose a new base revision if needed
647 deltas within internal storage will choose a new base revision if needed
708
648
709 beginning upgrade...
649 beginning upgrade...
710 repository locked and read-only
650 repository locked and read-only
711 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
651 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
712 (it is safe to interrupt this process any time before data migration completes)
652 (it is safe to interrupt this process any time before data migration completes)
713 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
653 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
714 migrating 519 KB in store; 1.05 MB tracked data
654 migrating 519 KB in store; 1.05 MB tracked data
715 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
655 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
716 cloning 1 revisions from data/FooBarDirectory.d/f1.i
656 cloning 1 revisions from data/FooBarDirectory.d/f1.i
717 cloning 1 revisions from data/f0.i
657 cloning 1 revisions from data/f0.i
718 cloning 1 revisions from data/f2.i
658 cloning 1 revisions from data/f2.i
719 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
659 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
720 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
660 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
721 blindly copying 00manifest.i containing 3 revisions
661 blindly copying 00manifest.i containing 3 revisions
722 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
662 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
723 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
663 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
724 cloning 3 revisions from 00changelog.i
664 cloning 3 revisions from 00changelog.i
725 finished migrating 3 changelog revisions; change in size: 0 bytes
665 finished migrating 3 changelog revisions; change in size: 0 bytes
726 finished migrating 9 total revisions; total change in store size: 0 bytes
666 finished migrating 9 total revisions; total change in store size: 0 bytes
727 copying phaseroots
667 copying phaseroots
728 data fully migrated to temporary repository
668 data fully migrated to temporary repository
729 marking source repository as being upgraded; clients will be unable to read from repository
669 marking source repository as being upgraded; clients will be unable to read from repository
730 starting in-place swap of repository data
670 starting in-place swap of repository data
731 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
671 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
732 replacing store...
672 replacing store...
733 store replacement complete; repository was inconsistent for *s (glob)
673 store replacement complete; repository was inconsistent for *s (glob)
734 finalizing requirements file and making repository readable again
674 finalizing requirements file and making repository readable again
735 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
675 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
736 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
676 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
737 $ hg verify
677 $ hg verify
738 checking changesets
678 checking changesets
739 checking manifests
679 checking manifests
740 crosschecking files in changesets and manifests
680 crosschecking files in changesets and manifests
741 checking files
681 checking files
742 checked 3 changesets with 3 changes to 3 files
682 checked 3 changesets with 3 changes to 3 files
743
683
744 Check that we can select changelog only
684 Check that we can select changelog only
745
685
746 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
686 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
747 upgrade will perform the following actions:
687 upgrade will perform the following actions:
748
688
749 requirements
689 requirements
750 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
690 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
751
691
752 sidedata
753 Allows storage of extra data alongside a revision.
754
755 copies-sdc
756 Allows to use more efficient algorithm to deal with copy tracing.
757
758 re-delta-parent
692 re-delta-parent
759 deltas within internal storage will choose a new base revision if needed
693 deltas within internal storage will choose a new base revision if needed
760
694
761 beginning upgrade...
695 beginning upgrade...
762 repository locked and read-only
696 repository locked and read-only
763 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
697 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
764 (it is safe to interrupt this process any time before data migration completes)
698 (it is safe to interrupt this process any time before data migration completes)
765 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
699 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
766 migrating 519 KB in store; 1.05 MB tracked data
700 migrating 519 KB in store; 1.05 MB tracked data
767 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
701 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
768 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
702 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
769 blindly copying data/f0.i containing 1 revisions
703 blindly copying data/f0.i containing 1 revisions
770 blindly copying data/f2.i containing 1 revisions
704 blindly copying data/f2.i containing 1 revisions
771 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
705 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
772 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
706 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
773 blindly copying 00manifest.i containing 3 revisions
707 blindly copying 00manifest.i containing 3 revisions
774 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
708 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
775 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
709 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
776 cloning 3 revisions from 00changelog.i
710 cloning 3 revisions from 00changelog.i
777 finished migrating 3 changelog revisions; change in size: 0 bytes
711 finished migrating 3 changelog revisions; change in size: 0 bytes
778 finished migrating 9 total revisions; total change in store size: 0 bytes
712 finished migrating 9 total revisions; total change in store size: 0 bytes
779 copying phaseroots
713 copying phaseroots
780 data fully migrated to temporary repository
714 data fully migrated to temporary repository
781 marking source repository as being upgraded; clients will be unable to read from repository
715 marking source repository as being upgraded; clients will be unable to read from repository
782 starting in-place swap of repository data
716 starting in-place swap of repository data
783 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
717 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
784 replacing store...
718 replacing store...
785 store replacement complete; repository was inconsistent for *s (glob)
719 store replacement complete; repository was inconsistent for *s (glob)
786 finalizing requirements file and making repository readable again
720 finalizing requirements file and making repository readable again
787 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
721 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
788 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
722 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
789 $ hg verify
723 $ hg verify
790 checking changesets
724 checking changesets
791 checking manifests
725 checking manifests
792 crosschecking files in changesets and manifests
726 crosschecking files in changesets and manifests
793 checking files
727 checking files
794 checked 3 changesets with 3 changes to 3 files
728 checked 3 changesets with 3 changes to 3 files
795
729
796 Check that we can select filelog only
730 Check that we can select filelog only
797
731
798 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
732 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
799 upgrade will perform the following actions:
733 upgrade will perform the following actions:
800
734
801 requirements
735 requirements
802 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
736 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
803
737
804 sidedata
805 Allows storage of extra data alongside a revision.
806
807 copies-sdc
808 Allows to use more efficient algorithm to deal with copy tracing.
809
810 re-delta-parent
738 re-delta-parent
811 deltas within internal storage will choose a new base revision if needed
739 deltas within internal storage will choose a new base revision if needed
812
740
813 beginning upgrade...
741 beginning upgrade...
814 repository locked and read-only
742 repository locked and read-only
815 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
743 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
816 (it is safe to interrupt this process any time before data migration completes)
744 (it is safe to interrupt this process any time before data migration completes)
817 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
745 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
818 migrating 519 KB in store; 1.05 MB tracked data
746 migrating 519 KB in store; 1.05 MB tracked data
819 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
747 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
820 cloning 1 revisions from data/FooBarDirectory.d/f1.i
748 cloning 1 revisions from data/FooBarDirectory.d/f1.i
821 cloning 1 revisions from data/f0.i
749 cloning 1 revisions from data/f0.i
822 cloning 1 revisions from data/f2.i
750 cloning 1 revisions from data/f2.i
823 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
751 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
824 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
752 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
825 blindly copying 00manifest.i containing 3 revisions
753 blindly copying 00manifest.i containing 3 revisions
826 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
754 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
827 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
755 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
828 blindly copying 00changelog.i containing 3 revisions
756 blindly copying 00changelog.i containing 3 revisions
829 finished migrating 3 changelog revisions; change in size: 0 bytes
757 finished migrating 3 changelog revisions; change in size: 0 bytes
830 finished migrating 9 total revisions; total change in store size: 0 bytes
758 finished migrating 9 total revisions; total change in store size: 0 bytes
831 copying phaseroots
759 copying phaseroots
832 data fully migrated to temporary repository
760 data fully migrated to temporary repository
833 marking source repository as being upgraded; clients will be unable to read from repository
761 marking source repository as being upgraded; clients will be unable to read from repository
834 starting in-place swap of repository data
762 starting in-place swap of repository data
835 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
763 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
836 replacing store...
764 replacing store...
837 store replacement complete; repository was inconsistent for *s (glob)
765 store replacement complete; repository was inconsistent for *s (glob)
838 finalizing requirements file and making repository readable again
766 finalizing requirements file and making repository readable again
839 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
767 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
840 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
768 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
841 $ hg verify
769 $ hg verify
842 checking changesets
770 checking changesets
843 checking manifests
771 checking manifests
844 crosschecking files in changesets and manifests
772 crosschecking files in changesets and manifests
845 checking files
773 checking files
846 checked 3 changesets with 3 changes to 3 files
774 checked 3 changesets with 3 changes to 3 files
847
775
848
776
849 Check you can't skip revlog clone during important format downgrade
777 Check you can't skip revlog clone during important format downgrade
850
778
851 $ echo "[format]" > .hg/hgrc
779 $ echo "[format]" > .hg/hgrc
852 $ echo "sparse-revlog=no" >> .hg/hgrc
780 $ echo "sparse-revlog=no" >> .hg/hgrc
853 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
781 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
854 ignoring revlogs selection flags, format requirements change: sparserevlog
782 ignoring revlogs selection flags, format requirements change: sparserevlog
855 upgrade will perform the following actions:
783 upgrade will perform the following actions:
856
784
857 requirements
785 requirements
858 preserved: dotencode, fncache, generaldelta, revlogv1, store
786 preserved: dotencode, fncache, generaldelta, revlogv1, store
859 removed: sparserevlog
787 removed: sparserevlog
860
788
861 sidedata
862 Allows storage of extra data alongside a revision.
863
864 copies-sdc
865 Allows to use more efficient algorithm to deal with copy tracing.
866
867 re-delta-parent
789 re-delta-parent
868 deltas within internal storage will choose a new base revision if needed
790 deltas within internal storage will choose a new base revision if needed
869
791
870 beginning upgrade...
792 beginning upgrade...
871 repository locked and read-only
793 repository locked and read-only
872 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
794 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
873 (it is safe to interrupt this process any time before data migration completes)
795 (it is safe to interrupt this process any time before data migration completes)
874 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
796 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
875 migrating 519 KB in store; 1.05 MB tracked data
797 migrating 519 KB in store; 1.05 MB tracked data
876 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
798 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
877 cloning 1 revisions from data/FooBarDirectory.d/f1.i
799 cloning 1 revisions from data/FooBarDirectory.d/f1.i
878 cloning 1 revisions from data/f0.i
800 cloning 1 revisions from data/f0.i
879 cloning 1 revisions from data/f2.i
801 cloning 1 revisions from data/f2.i
880 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
802 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
881 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
803 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
882 cloning 3 revisions from 00manifest.i
804 cloning 3 revisions from 00manifest.i
883 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
805 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
884 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
806 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
885 cloning 3 revisions from 00changelog.i
807 cloning 3 revisions from 00changelog.i
886 finished migrating 3 changelog revisions; change in size: 0 bytes
808 finished migrating 3 changelog revisions; change in size: 0 bytes
887 finished migrating 9 total revisions; total change in store size: 0 bytes
809 finished migrating 9 total revisions; total change in store size: 0 bytes
888 copying phaseroots
810 copying phaseroots
889 data fully migrated to temporary repository
811 data fully migrated to temporary repository
890 marking source repository as being upgraded; clients will be unable to read from repository
812 marking source repository as being upgraded; clients will be unable to read from repository
891 starting in-place swap of repository data
813 starting in-place swap of repository data
892 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
814 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
893 replacing store...
815 replacing store...
894 store replacement complete; repository was inconsistent for *s (glob)
816 store replacement complete; repository was inconsistent for *s (glob)
895 finalizing requirements file and making repository readable again
817 finalizing requirements file and making repository readable again
896 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
818 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
897 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
819 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
898 $ hg verify
820 $ hg verify
899 checking changesets
821 checking changesets
900 checking manifests
822 checking manifests
901 crosschecking files in changesets and manifests
823 crosschecking files in changesets and manifests
902 checking files
824 checking files
903 checked 3 changesets with 3 changes to 3 files
825 checked 3 changesets with 3 changes to 3 files
904
826
905 Check you can't skip revlog clone during important format upgrade
827 Check you can't skip revlog clone during important format upgrade
906
828
907 $ echo "sparse-revlog=yes" >> .hg/hgrc
829 $ echo "sparse-revlog=yes" >> .hg/hgrc
908 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
830 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
909 ignoring revlogs selection flags, format requirements change: sparserevlog
831 ignoring revlogs selection flags, format requirements change: sparserevlog
910 upgrade will perform the following actions:
832 upgrade will perform the following actions:
911
833
912 requirements
834 requirements
913 preserved: dotencode, fncache, generaldelta, revlogv1, store
835 preserved: dotencode, fncache, generaldelta, revlogv1, store
914 added: sparserevlog
836 added: sparserevlog
915
837
916 sparserevlog
838 sparserevlog
917 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
839 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
918
840
919 sidedata
920 Allows storage of extra data alongside a revision.
921
922 copies-sdc
923 Allows to use more efficient algorithm to deal with copy tracing.
924
925 re-delta-parent
841 re-delta-parent
926 deltas within internal storage will choose a new base revision if needed
842 deltas within internal storage will choose a new base revision if needed
927
843
928 beginning upgrade...
844 beginning upgrade...
929 repository locked and read-only
845 repository locked and read-only
930 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
846 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
931 (it is safe to interrupt this process any time before data migration completes)
847 (it is safe to interrupt this process any time before data migration completes)
932 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
848 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
933 migrating 519 KB in store; 1.05 MB tracked data
849 migrating 519 KB in store; 1.05 MB tracked data
934 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
850 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
935 cloning 1 revisions from data/FooBarDirectory.d/f1.i
851 cloning 1 revisions from data/FooBarDirectory.d/f1.i
936 cloning 1 revisions from data/f0.i
852 cloning 1 revisions from data/f0.i
937 cloning 1 revisions from data/f2.i
853 cloning 1 revisions from data/f2.i
938 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
854 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
939 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
855 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
940 cloning 3 revisions from 00manifest.i
856 cloning 3 revisions from 00manifest.i
941 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
857 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
942 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
858 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
943 cloning 3 revisions from 00changelog.i
859 cloning 3 revisions from 00changelog.i
944 finished migrating 3 changelog revisions; change in size: 0 bytes
860 finished migrating 3 changelog revisions; change in size: 0 bytes
945 finished migrating 9 total revisions; total change in store size: 0 bytes
861 finished migrating 9 total revisions; total change in store size: 0 bytes
946 copying phaseroots
862 copying phaseroots
947 data fully migrated to temporary repository
863 data fully migrated to temporary repository
948 marking source repository as being upgraded; clients will be unable to read from repository
864 marking source repository as being upgraded; clients will be unable to read from repository
949 starting in-place swap of repository data
865 starting in-place swap of repository data
950 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
866 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
951 replacing store...
867 replacing store...
952 store replacement complete; repository was inconsistent for *s (glob)
868 store replacement complete; repository was inconsistent for *s (glob)
953 finalizing requirements file and making repository readable again
869 finalizing requirements file and making repository readable again
954 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
870 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
955 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
871 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
956 $ hg verify
872 $ hg verify
957 checking changesets
873 checking changesets
958 checking manifests
874 checking manifests
959 crosschecking files in changesets and manifests
875 crosschecking files in changesets and manifests
960 checking files
876 checking files
961 checked 3 changesets with 3 changes to 3 files
877 checked 3 changesets with 3 changes to 3 files
962
878
963 $ cd ..
879 $ cd ..
964
880
965 store files with special filenames aren't encoded during copy
881 store files with special filenames aren't encoded during copy
966
882
967 $ hg init store-filenames
883 $ hg init store-filenames
968 $ cd store-filenames
884 $ cd store-filenames
969 $ touch foo
885 $ touch foo
970 $ hg -q commit -A -m initial
886 $ hg -q commit -A -m initial
971 $ touch .hg/store/.XX_special_filename
887 $ touch .hg/store/.XX_special_filename
972
888
973 $ hg debugupgraderepo --run
889 $ hg debugupgraderepo --run
974 upgrade will perform the following actions:
890 upgrade will perform the following actions:
975
891
976 requirements
892 requirements
977 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
893 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
978
894
979 sidedata
980 Allows storage of extra data alongside a revision.
981
982 copies-sdc
983 Allows to use more efficient algorithm to deal with copy tracing.
984
985 beginning upgrade...
895 beginning upgrade...
986 repository locked and read-only
896 repository locked and read-only
987 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
897 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
988 (it is safe to interrupt this process any time before data migration completes)
898 (it is safe to interrupt this process any time before data migration completes)
989 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
899 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
990 migrating 301 bytes in store; 107 bytes tracked data
900 migrating 301 bytes in store; 107 bytes tracked data
991 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
901 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
992 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
902 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
993 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
903 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
994 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
904 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
995 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
905 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
996 finished migrating 1 changelog revisions; change in size: 0 bytes
906 finished migrating 1 changelog revisions; change in size: 0 bytes
997 finished migrating 3 total revisions; total change in store size: 0 bytes
907 finished migrating 3 total revisions; total change in store size: 0 bytes
998 copying .XX_special_filename
908 copying .XX_special_filename
999 copying phaseroots
909 copying phaseroots
1000 data fully migrated to temporary repository
910 data fully migrated to temporary repository
1001 marking source repository as being upgraded; clients will be unable to read from repository
911 marking source repository as being upgraded; clients will be unable to read from repository
1002 starting in-place swap of repository data
912 starting in-place swap of repository data
1003 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
913 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1004 replacing store...
914 replacing store...
1005 store replacement complete; repository was inconsistent for *s (glob)
915 store replacement complete; repository was inconsistent for *s (glob)
1006 finalizing requirements file and making repository readable again
916 finalizing requirements file and making repository readable again
1007 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
917 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1008 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
918 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1009 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
919 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1010 $ hg debugupgraderepo --run --optimize redeltafulladd
920 $ hg debugupgraderepo --run --optimize redeltafulladd
1011 upgrade will perform the following actions:
921 upgrade will perform the following actions:
1012
922
1013 requirements
923 requirements
1014 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
924 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1015
925
1016 sidedata
1017 Allows storage of extra data alongside a revision.
1018
1019 copies-sdc
1020 Allows to use more efficient algorithm to deal with copy tracing.
1021
1022 re-delta-fulladd
926 re-delta-fulladd
1023 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
927 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1024
928
1025 beginning upgrade...
929 beginning upgrade...
1026 repository locked and read-only
930 repository locked and read-only
1027 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
931 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1028 (it is safe to interrupt this process any time before data migration completes)
932 (it is safe to interrupt this process any time before data migration completes)
1029 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
933 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1030 migrating 301 bytes in store; 107 bytes tracked data
934 migrating 301 bytes in store; 107 bytes tracked data
1031 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
935 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1032 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
936 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1033 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
937 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1034 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
938 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1035 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
939 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1036 finished migrating 1 changelog revisions; change in size: 0 bytes
940 finished migrating 1 changelog revisions; change in size: 0 bytes
1037 finished migrating 3 total revisions; total change in store size: 0 bytes
941 finished migrating 3 total revisions; total change in store size: 0 bytes
1038 copying .XX_special_filename
942 copying .XX_special_filename
1039 copying phaseroots
943 copying phaseroots
1040 data fully migrated to temporary repository
944 data fully migrated to temporary repository
1041 marking source repository as being upgraded; clients will be unable to read from repository
945 marking source repository as being upgraded; clients will be unable to read from repository
1042 starting in-place swap of repository data
946 starting in-place swap of repository data
1043 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
947 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1044 replacing store...
948 replacing store...
1045 store replacement complete; repository was inconsistent for *s (glob)
949 store replacement complete; repository was inconsistent for *s (glob)
1046 finalizing requirements file and making repository readable again
950 finalizing requirements file and making repository readable again
1047 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
951 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1048 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
952 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1049 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
953 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1050
954
1051 fncache is valid after upgrade
955 fncache is valid after upgrade
1052
956
1053 $ hg debugrebuildfncache
957 $ hg debugrebuildfncache
1054 fncache already up to date
958 fncache already up to date
1055
959
1056 $ cd ..
960 $ cd ..
1057
961
1058 Check upgrading a large file repository
962 Check upgrading a large file repository
1059 ---------------------------------------
963 ---------------------------------------
1060
964
1061 $ hg init largefilesrepo
965 $ hg init largefilesrepo
1062 $ cat << EOF >> largefilesrepo/.hg/hgrc
966 $ cat << EOF >> largefilesrepo/.hg/hgrc
1063 > [extensions]
967 > [extensions]
1064 > largefiles =
968 > largefiles =
1065 > EOF
969 > EOF
1066
970
1067 $ cd largefilesrepo
971 $ cd largefilesrepo
1068 $ touch foo
972 $ touch foo
1069 $ hg add --large foo
973 $ hg add --large foo
1070 $ hg -q commit -m initial
974 $ hg -q commit -m initial
1071 $ cat .hg/requires
975 $ cat .hg/requires
1072 dotencode
976 dotencode
1073 fncache
977 fncache
1074 generaldelta
978 generaldelta
1075 largefiles
979 largefiles
1076 revlogv1
980 revlogv1
1077 sparserevlog
981 sparserevlog
1078 store
982 store
1079
983
1080 $ hg debugupgraderepo --run
984 $ hg debugupgraderepo --run
1081 upgrade will perform the following actions:
985 upgrade will perform the following actions:
1082
986
1083 requirements
987 requirements
1084 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
988 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
1085
989
1086 sidedata
1087 Allows storage of extra data alongside a revision.
1088
1089 copies-sdc
1090 Allows to use more efficient algorithm to deal with copy tracing.
1091
1092 beginning upgrade...
990 beginning upgrade...
1093 repository locked and read-only
991 repository locked and read-only
1094 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
992 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1095 (it is safe to interrupt this process any time before data migration completes)
993 (it is safe to interrupt this process any time before data migration completes)
1096 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
994 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1097 migrating 355 bytes in store; 160 bytes tracked data
995 migrating 355 bytes in store; 160 bytes tracked data
1098 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
996 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
1099 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
997 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1100 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
998 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
1101 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
999 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1102 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
1000 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
1103 finished migrating 1 changelog revisions; change in size: 0 bytes
1001 finished migrating 1 changelog revisions; change in size: 0 bytes
1104 finished migrating 3 total revisions; total change in store size: 0 bytes
1002 finished migrating 3 total revisions; total change in store size: 0 bytes
1105 copying phaseroots
1003 copying phaseroots
1106 data fully migrated to temporary repository
1004 data fully migrated to temporary repository
1107 marking source repository as being upgraded; clients will be unable to read from repository
1005 marking source repository as being upgraded; clients will be unable to read from repository
1108 starting in-place swap of repository data
1006 starting in-place swap of repository data
1109 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1007 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1110 replacing store...
1008 replacing store...
1111 store replacement complete; repository was inconsistent for *s (glob)
1009 store replacement complete; repository was inconsistent for *s (glob)
1112 finalizing requirements file and making repository readable again
1010 finalizing requirements file and making repository readable again
1113 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1011 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1114 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1012 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1115 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1013 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1116 $ cat .hg/requires
1014 $ cat .hg/requires
1117 dotencode
1015 dotencode
1118 fncache
1016 fncache
1119 generaldelta
1017 generaldelta
1120 largefiles
1018 largefiles
1121 revlogv1
1019 revlogv1
1122 sparserevlog
1020 sparserevlog
1123 store
1021 store
1124
1022
1125 $ cat << EOF >> .hg/hgrc
1023 $ cat << EOF >> .hg/hgrc
1126 > [extensions]
1024 > [extensions]
1127 > lfs =
1025 > lfs =
1128 > [lfs]
1026 > [lfs]
1129 > threshold = 10
1027 > threshold = 10
1130 > EOF
1028 > EOF
1131 $ echo '123456789012345' > lfs.bin
1029 $ echo '123456789012345' > lfs.bin
1132 $ hg ci -Am 'lfs.bin'
1030 $ hg ci -Am 'lfs.bin'
1133 adding lfs.bin
1031 adding lfs.bin
1134 $ grep lfs .hg/requires
1032 $ grep lfs .hg/requires
1135 lfs
1033 lfs
1136 $ find .hg/store/lfs -type f
1034 $ find .hg/store/lfs -type f
1137 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1035 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1138
1036
1139 $ hg debugupgraderepo --run
1037 $ hg debugupgraderepo --run
1140 upgrade will perform the following actions:
1038 upgrade will perform the following actions:
1141
1039
1142 requirements
1040 requirements
1143 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
1041 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
1144
1042
1145 sidedata
1146 Allows storage of extra data alongside a revision.
1147
1148 copies-sdc
1149 Allows to use more efficient algorithm to deal with copy tracing.
1150
1151 beginning upgrade...
1043 beginning upgrade...
1152 repository locked and read-only
1044 repository locked and read-only
1153 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1045 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1154 (it is safe to interrupt this process any time before data migration completes)
1046 (it is safe to interrupt this process any time before data migration completes)
1155 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
1047 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
1156 migrating 801 bytes in store; 467 bytes tracked data
1048 migrating 801 bytes in store; 467 bytes tracked data
1157 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
1049 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
1158 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
1050 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
1159 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
1051 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
1160 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
1052 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
1161 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
1053 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
1162 finished migrating 2 changelog revisions; change in size: 0 bytes
1054 finished migrating 2 changelog revisions; change in size: 0 bytes
1163 finished migrating 6 total revisions; total change in store size: 0 bytes
1055 finished migrating 6 total revisions; total change in store size: 0 bytes
1164 copying phaseroots
1056 copying phaseroots
1165 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1057 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1166 data fully migrated to temporary repository
1058 data fully migrated to temporary repository
1167 marking source repository as being upgraded; clients will be unable to read from repository
1059 marking source repository as being upgraded; clients will be unable to read from repository
1168 starting in-place swap of repository data
1060 starting in-place swap of repository data
1169 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1061 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1170 replacing store...
1062 replacing store...
1171 store replacement complete; repository was inconsistent for *s (glob)
1063 store replacement complete; repository was inconsistent for *s (glob)
1172 finalizing requirements file and making repository readable again
1064 finalizing requirements file and making repository readable again
1173 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1065 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1174 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1066 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1175 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1067 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1176
1068
1177 $ grep lfs .hg/requires
1069 $ grep lfs .hg/requires
1178 lfs
1070 lfs
1179 $ find .hg/store/lfs -type f
1071 $ find .hg/store/lfs -type f
1180 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1072 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1181 $ hg verify
1073 $ hg verify
1182 checking changesets
1074 checking changesets
1183 checking manifests
1075 checking manifests
1184 crosschecking files in changesets and manifests
1076 crosschecking files in changesets and manifests
1185 checking files
1077 checking files
1186 checked 2 changesets with 2 changes to 2 files
1078 checked 2 changesets with 2 changes to 2 files
1187 $ hg debugdata lfs.bin 0
1079 $ hg debugdata lfs.bin 0
1188 version https://git-lfs.github.com/spec/v1
1080 version https://git-lfs.github.com/spec/v1
1189 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1081 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1190 size 16
1082 size 16
1191 x-is-binary 0
1083 x-is-binary 0
1192
1084
1193 $ cd ..
1085 $ cd ..
1194
1086
1195 repository config is taken in account
1087 repository config is taken in account
1196 -------------------------------------
1088 -------------------------------------
1197
1089
1198 $ cat << EOF >> $HGRCPATH
1090 $ cat << EOF >> $HGRCPATH
1199 > [format]
1091 > [format]
1200 > maxchainlen = 1
1092 > maxchainlen = 1
1201 > EOF
1093 > EOF
1202
1094
1203 $ hg init localconfig
1095 $ hg init localconfig
1204 $ cd localconfig
1096 $ cd localconfig
1205 $ cat << EOF > file
1097 $ cat << EOF > file
1206 > some content
1098 > some content
1207 > with some length
1099 > with some length
1208 > to make sure we get a delta
1100 > to make sure we get a delta
1209 > after changes
1101 > after changes
1210 > very long
1102 > very long
1211 > very long
1103 > very long
1212 > very long
1104 > very long
1213 > very long
1105 > very long
1214 > very long
1106 > very long
1215 > very long
1107 > very long
1216 > very long
1108 > very long
1217 > very long
1109 > very long
1218 > very long
1110 > very long
1219 > very long
1111 > very long
1220 > very long
1112 > very long
1221 > EOF
1113 > EOF
1222 $ hg -q commit -A -m A
1114 $ hg -q commit -A -m A
1223 $ echo "new line" >> file
1115 $ echo "new line" >> file
1224 $ hg -q commit -m B
1116 $ hg -q commit -m B
1225 $ echo "new line" >> file
1117 $ echo "new line" >> file
1226 $ hg -q commit -m C
1118 $ hg -q commit -m C
1227
1119
1228 $ cat << EOF >> .hg/hgrc
1120 $ cat << EOF >> .hg/hgrc
1229 > [format]
1121 > [format]
1230 > maxchainlen = 9001
1122 > maxchainlen = 9001
1231 > EOF
1123 > EOF
1232 $ hg config format
1124 $ hg config format
1233 format.maxchainlen=9001
1125 format.maxchainlen=9001
1234 $ hg debugdeltachain file
1126 $ hg debugdeltachain file
1235 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1127 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1236 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1128 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1237 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1129 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1238 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1130 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1239
1131
1240 $ hg debugupgraderepo --run --optimize redeltaall
1132 $ hg debugupgraderepo --run --optimize redeltaall
1241 upgrade will perform the following actions:
1133 upgrade will perform the following actions:
1242
1134
1243 requirements
1135 requirements
1244 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1136 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1245
1137
1246 sidedata
1247 Allows storage of extra data alongside a revision.
1248
1249 copies-sdc
1250 Allows to use more efficient algorithm to deal with copy tracing.
1251
1252 re-delta-all
1138 re-delta-all
1253 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1139 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1254
1140
1255 beginning upgrade...
1141 beginning upgrade...
1256 repository locked and read-only
1142 repository locked and read-only
1257 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1143 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1258 (it is safe to interrupt this process any time before data migration completes)
1144 (it is safe to interrupt this process any time before data migration completes)
1259 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1145 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1260 migrating 1019 bytes in store; 882 bytes tracked data
1146 migrating 1019 bytes in store; 882 bytes tracked data
1261 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1147 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1262 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1148 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1263 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1149 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1264 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1150 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1265 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1151 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1266 finished migrating 3 changelog revisions; change in size: 0 bytes
1152 finished migrating 3 changelog revisions; change in size: 0 bytes
1267 finished migrating 9 total revisions; total change in store size: -9 bytes
1153 finished migrating 9 total revisions; total change in store size: -9 bytes
1268 copying phaseroots
1154 copying phaseroots
1269 data fully migrated to temporary repository
1155 data fully migrated to temporary repository
1270 marking source repository as being upgraded; clients will be unable to read from repository
1156 marking source repository as being upgraded; clients will be unable to read from repository
1271 starting in-place swap of repository data
1157 starting in-place swap of repository data
1272 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1158 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1273 replacing store...
1159 replacing store...
1274 store replacement complete; repository was inconsistent for *s (glob)
1160 store replacement complete; repository was inconsistent for *s (glob)
1275 finalizing requirements file and making repository readable again
1161 finalizing requirements file and making repository readable again
1276 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1162 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1277 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1163 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1278 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1164 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1279 $ hg debugdeltachain file
1165 $ hg debugdeltachain file
1280 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1166 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1281 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1167 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1282 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1168 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1283 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1169 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1284 $ cd ..
1170 $ cd ..
1285
1171
1286 $ cat << EOF >> $HGRCPATH
1172 $ cat << EOF >> $HGRCPATH
1287 > [format]
1173 > [format]
1288 > maxchainlen = 9001
1174 > maxchainlen = 9001
1289 > EOF
1175 > EOF
1290
1176
1291 Check upgrading a sparse-revlog repository
1177 Check upgrading a sparse-revlog repository
1292 ---------------------------------------
1178 ---------------------------------------
1293
1179
1294 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1180 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1295 $ cd sparserevlogrepo
1181 $ cd sparserevlogrepo
1296 $ touch foo
1182 $ touch foo
1297 $ hg add foo
1183 $ hg add foo
1298 $ hg -q commit -m "foo"
1184 $ hg -q commit -m "foo"
1299 $ cat .hg/requires
1185 $ cat .hg/requires
1300 dotencode
1186 dotencode
1301 fncache
1187 fncache
1302 generaldelta
1188 generaldelta
1303 revlogv1
1189 revlogv1
1304 store
1190 store
1305
1191
1306 Check that we can add the sparse-revlog format requirement
1192 Check that we can add the sparse-revlog format requirement
1307 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
1193 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
1308 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1194 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1309 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1195 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1310 $ cat .hg/requires
1196 $ cat .hg/requires
1311 dotencode
1197 dotencode
1312 fncache
1198 fncache
1313 generaldelta
1199 generaldelta
1314 revlogv1
1200 revlogv1
1315 sparserevlog
1201 sparserevlog
1316 store
1202 store
1317
1203
1318 Check that we can remove the sparse-revlog format requirement
1204 Check that we can remove the sparse-revlog format requirement
1319 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
1205 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
1320 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1206 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1321 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1207 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1322 $ cat .hg/requires
1208 $ cat .hg/requires
1323 dotencode
1209 dotencode
1324 fncache
1210 fncache
1325 generaldelta
1211 generaldelta
1326 revlogv1
1212 revlogv1
1327 store
1213 store
1328
1214
1329 #if zstd
1215 #if zstd
1330
1216
1331 Check upgrading to a zstd revlog
1217 Check upgrading to a zstd revlog
1332 --------------------------------
1218 --------------------------------
1333
1219
1334 upgrade
1220 upgrade
1335
1221
1336 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
1222 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
1337 $ hg debugformat -v
1223 $ hg debugformat -v
1338 format-variant repo config default
1224 format-variant repo config default
1339 fncache: yes yes yes
1225 fncache: yes yes yes
1340 dotencode: yes yes yes
1226 dotencode: yes yes yes
1341 generaldelta: yes yes yes
1227 generaldelta: yes yes yes
1342 sparserevlog: yes yes yes
1228 sparserevlog: yes yes yes
1343 sidedata: no no no
1229 sidedata: no no no
1344 copies-sdc: no no no
1230 copies-sdc: no no no
1345 plain-cl-delta: yes yes yes
1231 plain-cl-delta: yes yes yes
1346 compression: zstd zlib zlib
1232 compression: zstd zlib zlib
1347 compression-level: default default default
1233 compression-level: default default default
1348 $ cat .hg/requires
1234 $ cat .hg/requires
1349 dotencode
1235 dotencode
1350 fncache
1236 fncache
1351 generaldelta
1237 generaldelta
1352 revlog-compression-zstd
1238 revlog-compression-zstd
1353 revlogv1
1239 revlogv1
1354 sparserevlog
1240 sparserevlog
1355 store
1241 store
1356
1242
1357 downgrade
1243 downgrade
1358
1244
1359 $ hg debugupgraderepo --run --no-backup > /dev/null
1245 $ hg debugupgraderepo --run --no-backup > /dev/null
1360 $ hg debugformat -v
1246 $ hg debugformat -v
1361 format-variant repo config default
1247 format-variant repo config default
1362 fncache: yes yes yes
1248 fncache: yes yes yes
1363 dotencode: yes yes yes
1249 dotencode: yes yes yes
1364 generaldelta: yes yes yes
1250 generaldelta: yes yes yes
1365 sparserevlog: yes yes yes
1251 sparserevlog: yes yes yes
1366 sidedata: no no no
1252 sidedata: no no no
1367 copies-sdc: no no no
1253 copies-sdc: no no no
1368 plain-cl-delta: yes yes yes
1254 plain-cl-delta: yes yes yes
1369 compression: zlib zlib zlib
1255 compression: zlib zlib zlib
1370 compression-level: default default default
1256 compression-level: default default default
1371 $ cat .hg/requires
1257 $ cat .hg/requires
1372 dotencode
1258 dotencode
1373 fncache
1259 fncache
1374 generaldelta
1260 generaldelta
1375 revlogv1
1261 revlogv1
1376 sparserevlog
1262 sparserevlog
1377 store
1263 store
1378
1264
1379 upgrade from hgrc
1265 upgrade from hgrc
1380
1266
1381 $ cat >> .hg/hgrc << EOF
1267 $ cat >> .hg/hgrc << EOF
1382 > [format]
1268 > [format]
1383 > revlog-compression=zstd
1269 > revlog-compression=zstd
1384 > EOF
1270 > EOF
1385 $ hg debugupgraderepo --run --no-backup > /dev/null
1271 $ hg debugupgraderepo --run --no-backup > /dev/null
1386 $ hg debugformat -v
1272 $ hg debugformat -v
1387 format-variant repo config default
1273 format-variant repo config default
1388 fncache: yes yes yes
1274 fncache: yes yes yes
1389 dotencode: yes yes yes
1275 dotencode: yes yes yes
1390 generaldelta: yes yes yes
1276 generaldelta: yes yes yes
1391 sparserevlog: yes yes yes
1277 sparserevlog: yes yes yes
1392 sidedata: no no no
1278 sidedata: no no no
1393 copies-sdc: no no no
1279 copies-sdc: no no no
1394 plain-cl-delta: yes yes yes
1280 plain-cl-delta: yes yes yes
1395 compression: zstd zstd zlib
1281 compression: zstd zstd zlib
1396 compression-level: default default default
1282 compression-level: default default default
1397 $ cat .hg/requires
1283 $ cat .hg/requires
1398 dotencode
1284 dotencode
1399 fncache
1285 fncache
1400 generaldelta
1286 generaldelta
1401 revlog-compression-zstd
1287 revlog-compression-zstd
1402 revlogv1
1288 revlogv1
1403 sparserevlog
1289 sparserevlog
1404 store
1290 store
1405
1291
1406 #endif
1292 #endif
1407
1293
1408 Check upgrading to a side-data revlog
1294 Check upgrading to a side-data revlog
1409 -------------------------------------
1295 -------------------------------------
1410
1296
1411 upgrade
1297 upgrade
1412
1298
1413 $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" >/dev/null
1299 $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" >/dev/null
1414 $ hg debugformat -v
1300 $ hg debugformat -v
1415 format-variant repo config default
1301 format-variant repo config default
1416 fncache: yes yes yes
1302 fncache: yes yes yes
1417 dotencode: yes yes yes
1303 dotencode: yes yes yes
1418 generaldelta: yes yes yes
1304 generaldelta: yes yes yes
1419 sparserevlog: yes yes yes
1305 sparserevlog: yes yes yes
1420 sidedata: yes no no
1306 sidedata: yes no no
1421 copies-sdc: no no no
1307 copies-sdc: no no no
1422 plain-cl-delta: yes yes yes
1308 plain-cl-delta: yes yes yes
1423 compression: zstd zstd zlib (zstd !)
1309 compression: zstd zstd zlib (zstd !)
1424 compression: zlib zlib zlib (no-zstd !)
1310 compression: zlib zlib zlib (no-zstd !)
1425 compression-level: default default default
1311 compression-level: default default default
1426 $ cat .hg/requires
1312 $ cat .hg/requires
1427 dotencode
1313 dotencode
1428 exp-sidedata-flag
1314 exp-sidedata-flag
1429 fncache
1315 fncache
1430 generaldelta
1316 generaldelta
1431 revlog-compression-zstd (zstd !)
1317 revlog-compression-zstd (zstd !)
1432 revlogv1
1318 revlogv1
1433 sparserevlog
1319 sparserevlog
1434 store
1320 store
1435 $ hg debugsidedata -c 0
1321 $ hg debugsidedata -c 0
1436 2 sidedata entries
1322 2 sidedata entries
1437 entry-0001 size 4
1323 entry-0001 size 4
1438 entry-0002 size 32
1324 entry-0002 size 32
1439
1325
1440 downgrade
1326 downgrade
1441
1327
1442 $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup > /dev/null
1328 $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup > /dev/null
1443 $ hg debugformat -v
1329 $ hg debugformat -v
1444 format-variant repo config default
1330 format-variant repo config default
1445 fncache: yes yes yes
1331 fncache: yes yes yes
1446 dotencode: yes yes yes
1332 dotencode: yes yes yes
1447 generaldelta: yes yes yes
1333 generaldelta: yes yes yes
1448 sparserevlog: yes yes yes
1334 sparserevlog: yes yes yes
1449 sidedata: no no no
1335 sidedata: no no no
1450 copies-sdc: no no no
1336 copies-sdc: no no no
1451 plain-cl-delta: yes yes yes
1337 plain-cl-delta: yes yes yes
1452 compression: zstd zstd zlib (zstd !)
1338 compression: zstd zstd zlib (zstd !)
1453 compression: zlib zlib zlib (no-zstd !)
1339 compression: zlib zlib zlib (no-zstd !)
1454 compression-level: default default default
1340 compression-level: default default default
1455 $ cat .hg/requires
1341 $ cat .hg/requires
1456 dotencode
1342 dotencode
1457 fncache
1343 fncache
1458 generaldelta
1344 generaldelta
1459 revlog-compression-zstd (zstd !)
1345 revlog-compression-zstd (zstd !)
1460 revlogv1
1346 revlogv1
1461 sparserevlog
1347 sparserevlog
1462 store
1348 store
1463 $ hg debugsidedata -c 0
1349 $ hg debugsidedata -c 0
1464
1350
1465 upgrade from hgrc
1351 upgrade from hgrc
1466
1352
1467 $ cat >> .hg/hgrc << EOF
1353 $ cat >> .hg/hgrc << EOF
1468 > [format]
1354 > [format]
1469 > exp-use-side-data=yes
1355 > exp-use-side-data=yes
1470 > EOF
1356 > EOF
1471 $ hg debugupgraderepo --run --no-backup > /dev/null
1357 $ hg debugupgraderepo --run --no-backup > /dev/null
1472 $ hg debugformat -v
1358 $ hg debugformat -v
1473 format-variant repo config default
1359 format-variant repo config default
1474 fncache: yes yes yes
1360 fncache: yes yes yes
1475 dotencode: yes yes yes
1361 dotencode: yes yes yes
1476 generaldelta: yes yes yes
1362 generaldelta: yes yes yes
1477 sparserevlog: yes yes yes
1363 sparserevlog: yes yes yes
1478 sidedata: yes yes no
1364 sidedata: yes yes no
1479 copies-sdc: no no no
1365 copies-sdc: no no no
1480 plain-cl-delta: yes yes yes
1366 plain-cl-delta: yes yes yes
1481 compression: zstd zstd zlib (zstd !)
1367 compression: zstd zstd zlib (zstd !)
1482 compression: zlib zlib zlib (no-zstd !)
1368 compression: zlib zlib zlib (no-zstd !)
1483 compression-level: default default default
1369 compression-level: default default default
1484 $ cat .hg/requires
1370 $ cat .hg/requires
1485 dotencode
1371 dotencode
1486 exp-sidedata-flag
1372 exp-sidedata-flag
1487 fncache
1373 fncache
1488 generaldelta
1374 generaldelta
1489 revlog-compression-zstd (zstd !)
1375 revlog-compression-zstd (zstd !)
1490 revlogv1
1376 revlogv1
1491 sparserevlog
1377 sparserevlog
1492 store
1378 store
1493 $ hg debugsidedata -c 0
1379 $ hg debugsidedata -c 0
General Comments 0
You need to be logged in to leave comments. Login now