##// END OF EJS Templates
upgrade: support running upgrade if repository has share-safe requirement...
Pulkit Goyal -
r46059:78f0bb37 default
parent child Browse files
Show More
@@ -1,1432 +1,1433 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 hg,
18 hg,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 requirements,
23 requirements,
24 revlog,
24 revlog,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29
29
30 from .utils import compression
30 from .utils import compression
31
31
32 # list of requirements that request a clone of all revlog if added/removed
32 # list of requirements that request a clone of all revlog if added/removed
33 RECLONES_REQUIREMENTS = {
33 RECLONES_REQUIREMENTS = {
34 b'generaldelta',
34 b'generaldelta',
35 requirements.SPARSEREVLOG_REQUIREMENT,
35 requirements.SPARSEREVLOG_REQUIREMENT,
36 }
36 }
37
37
38
38
39 def requiredsourcerequirements(repo):
39 def requiredsourcerequirements(repo):
40 """Obtain requirements required to be present to upgrade a repo.
40 """Obtain requirements required to be present to upgrade a repo.
41
41
42 An upgrade will not be allowed if the repository doesn't have the
42 An upgrade will not be allowed if the repository doesn't have the
43 requirements returned by this function.
43 requirements returned by this function.
44 """
44 """
45 return {
45 return {
46 # Introduced in Mercurial 0.9.2.
46 # Introduced in Mercurial 0.9.2.
47 b'revlogv1',
47 b'revlogv1',
48 # Introduced in Mercurial 0.9.2.
48 # Introduced in Mercurial 0.9.2.
49 b'store',
49 b'store',
50 }
50 }
51
51
52
52
53 def blocksourcerequirements(repo):
53 def blocksourcerequirements(repo):
54 """Obtain requirements that will prevent an upgrade from occurring.
54 """Obtain requirements that will prevent an upgrade from occurring.
55
55
56 An upgrade cannot be performed if the source repository contains a
56 An upgrade cannot be performed if the source repository contains a
57 requirements in the returned set.
57 requirements in the returned set.
58 """
58 """
59 return {
59 return {
60 # The upgrade code does not yet support these experimental features.
60 # The upgrade code does not yet support these experimental features.
61 # This is an artificial limitation.
61 # This is an artificial limitation.
62 requirements.TREEMANIFEST_REQUIREMENT,
62 requirements.TREEMANIFEST_REQUIREMENT,
63 # This was a precursor to generaldelta and was never enabled by default.
63 # This was a precursor to generaldelta and was never enabled by default.
64 # It should (hopefully) not exist in the wild.
64 # It should (hopefully) not exist in the wild.
65 b'parentdelta',
65 b'parentdelta',
66 # Upgrade should operate on the actual store, not the shared link.
66 # Upgrade should operate on the actual store, not the shared link.
67 requirements.SHARED_REQUIREMENT,
67 requirements.SHARED_REQUIREMENT,
68 }
68 }
69
69
70
70
71 def supportremovedrequirements(repo):
71 def supportremovedrequirements(repo):
72 """Obtain requirements that can be removed during an upgrade.
72 """Obtain requirements that can be removed during an upgrade.
73
73
74 If an upgrade were to create a repository that dropped a requirement,
74 If an upgrade were to create a repository that dropped a requirement,
75 the dropped requirement must appear in the returned set for the upgrade
75 the dropped requirement must appear in the returned set for the upgrade
76 to be allowed.
76 to be allowed.
77 """
77 """
78 supported = {
78 supported = {
79 requirements.SPARSEREVLOG_REQUIREMENT,
79 requirements.SPARSEREVLOG_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
83 }
83 }
84 for name in compression.compengines:
84 for name in compression.compengines:
85 engine = compression.compengines[name]
85 engine = compression.compengines[name]
86 if engine.available() and engine.revlogheader():
86 if engine.available() and engine.revlogheader():
87 supported.add(b'exp-compression-%s' % name)
87 supported.add(b'exp-compression-%s' % name)
88 if engine.name() == b'zstd':
88 if engine.name() == b'zstd':
89 supported.add(b'revlog-compression-zstd')
89 supported.add(b'revlog-compression-zstd')
90 return supported
90 return supported
91
91
92
92
93 def supporteddestrequirements(repo):
93 def supporteddestrequirements(repo):
94 """Obtain requirements that upgrade supports in the destination.
94 """Obtain requirements that upgrade supports in the destination.
95
95
96 If the result of the upgrade would create requirements not in this set,
96 If the result of the upgrade would create requirements not in this set,
97 the upgrade is disallowed.
97 the upgrade is disallowed.
98
98
99 Extensions should monkeypatch this to add their custom requirements.
99 Extensions should monkeypatch this to add their custom requirements.
100 """
100 """
101 supported = {
101 supported = {
102 b'dotencode',
102 b'dotencode',
103 b'fncache',
103 b'fncache',
104 b'generaldelta',
104 b'generaldelta',
105 b'revlogv1',
105 b'revlogv1',
106 b'store',
106 b'store',
107 requirements.SPARSEREVLOG_REQUIREMENT,
107 requirements.SPARSEREVLOG_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
111 requirements.SHARESAFE_REQUIREMENT,
111 }
112 }
112 for name in compression.compengines:
113 for name in compression.compengines:
113 engine = compression.compengines[name]
114 engine = compression.compengines[name]
114 if engine.available() and engine.revlogheader():
115 if engine.available() and engine.revlogheader():
115 supported.add(b'exp-compression-%s' % name)
116 supported.add(b'exp-compression-%s' % name)
116 if engine.name() == b'zstd':
117 if engine.name() == b'zstd':
117 supported.add(b'revlog-compression-zstd')
118 supported.add(b'revlog-compression-zstd')
118 return supported
119 return supported
119
120
120
121
121 def allowednewrequirements(repo):
122 def allowednewrequirements(repo):
122 """Obtain requirements that can be added to a repository during upgrade.
123 """Obtain requirements that can be added to a repository during upgrade.
123
124
124 This is used to disallow proposed requirements from being added when
125 This is used to disallow proposed requirements from being added when
125 they weren't present before.
126 they weren't present before.
126
127
127 We use a list of allowed requirement additions instead of a list of known
128 We use a list of allowed requirement additions instead of a list of known
128 bad additions because the whitelist approach is safer and will prevent
129 bad additions because the whitelist approach is safer and will prevent
129 future, unknown requirements from accidentally being added.
130 future, unknown requirements from accidentally being added.
130 """
131 """
131 supported = {
132 supported = {
132 b'dotencode',
133 b'dotencode',
133 b'fncache',
134 b'fncache',
134 b'generaldelta',
135 b'generaldelta',
135 requirements.SPARSEREVLOG_REQUIREMENT,
136 requirements.SPARSEREVLOG_REQUIREMENT,
136 requirements.SIDEDATA_REQUIREMENT,
137 requirements.SIDEDATA_REQUIREMENT,
137 requirements.COPIESSDC_REQUIREMENT,
138 requirements.COPIESSDC_REQUIREMENT,
138 requirements.NODEMAP_REQUIREMENT,
139 requirements.NODEMAP_REQUIREMENT,
139 }
140 }
140 for name in compression.compengines:
141 for name in compression.compengines:
141 engine = compression.compengines[name]
142 engine = compression.compengines[name]
142 if engine.available() and engine.revlogheader():
143 if engine.available() and engine.revlogheader():
143 supported.add(b'exp-compression-%s' % name)
144 supported.add(b'exp-compression-%s' % name)
144 if engine.name() == b'zstd':
145 if engine.name() == b'zstd':
145 supported.add(b'revlog-compression-zstd')
146 supported.add(b'revlog-compression-zstd')
146 return supported
147 return supported
147
148
148
149
149 def preservedrequirements(repo):
150 def preservedrequirements(repo):
150 return set()
151 return set()
151
152
152
153
153 deficiency = b'deficiency'
154 deficiency = b'deficiency'
154 optimisation = b'optimization'
155 optimisation = b'optimization'
155
156
156
157
157 class improvement(object):
158 class improvement(object):
158 """Represents an improvement that can be made as part of an upgrade.
159 """Represents an improvement that can be made as part of an upgrade.
159
160
160 The following attributes are defined on each instance:
161 The following attributes are defined on each instance:
161
162
162 name
163 name
163 Machine-readable string uniquely identifying this improvement. It
164 Machine-readable string uniquely identifying this improvement. It
164 will be mapped to an action later in the upgrade process.
165 will be mapped to an action later in the upgrade process.
165
166
166 type
167 type
167 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
168 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
168 problem. An optimization is an action (sometimes optional) that
169 problem. An optimization is an action (sometimes optional) that
169 can be taken to further improve the state of the repository.
170 can be taken to further improve the state of the repository.
170
171
171 description
172 description
172 Message intended for humans explaining the improvement in more detail,
173 Message intended for humans explaining the improvement in more detail,
173 including the implications of it. For ``deficiency`` types, should be
174 including the implications of it. For ``deficiency`` types, should be
174 worded in the present tense. For ``optimisation`` types, should be
175 worded in the present tense. For ``optimisation`` types, should be
175 worded in the future tense.
176 worded in the future tense.
176
177
177 upgrademessage
178 upgrademessage
178 Message intended for humans explaining what an upgrade addressing this
179 Message intended for humans explaining what an upgrade addressing this
179 issue will do. Should be worded in the future tense.
180 issue will do. Should be worded in the future tense.
180 """
181 """
181
182
182 def __init__(self, name, type, description, upgrademessage):
183 def __init__(self, name, type, description, upgrademessage):
183 self.name = name
184 self.name = name
184 self.type = type
185 self.type = type
185 self.description = description
186 self.description = description
186 self.upgrademessage = upgrademessage
187 self.upgrademessage = upgrademessage
187
188
188 def __eq__(self, other):
189 def __eq__(self, other):
189 if not isinstance(other, improvement):
190 if not isinstance(other, improvement):
190 # This is what python tell use to do
191 # This is what python tell use to do
191 return NotImplemented
192 return NotImplemented
192 return self.name == other.name
193 return self.name == other.name
193
194
194 def __ne__(self, other):
195 def __ne__(self, other):
195 return not (self == other)
196 return not (self == other)
196
197
197 def __hash__(self):
198 def __hash__(self):
198 return hash(self.name)
199 return hash(self.name)
199
200
200
201
201 allformatvariant = []
202 allformatvariant = []
202
203
203
204
204 def registerformatvariant(cls):
205 def registerformatvariant(cls):
205 allformatvariant.append(cls)
206 allformatvariant.append(cls)
206 return cls
207 return cls
207
208
208
209
209 class formatvariant(improvement):
210 class formatvariant(improvement):
210 """an improvement subclass dedicated to repository format"""
211 """an improvement subclass dedicated to repository format"""
211
212
212 type = deficiency
213 type = deficiency
213 ### The following attributes should be defined for each class:
214 ### The following attributes should be defined for each class:
214
215
215 # machine-readable string uniquely identifying this improvement. it will be
216 # machine-readable string uniquely identifying this improvement. it will be
216 # mapped to an action later in the upgrade process.
217 # mapped to an action later in the upgrade process.
217 name = None
218 name = None
218
219
219 # message intended for humans explaining the improvement in more detail,
220 # message intended for humans explaining the improvement in more detail,
220 # including the implications of it ``deficiency`` types, should be worded
221 # including the implications of it ``deficiency`` types, should be worded
221 # in the present tense.
222 # in the present tense.
222 description = None
223 description = None
223
224
224 # message intended for humans explaining what an upgrade addressing this
225 # message intended for humans explaining what an upgrade addressing this
225 # issue will do. should be worded in the future tense.
226 # issue will do. should be worded in the future tense.
226 upgrademessage = None
227 upgrademessage = None
227
228
228 # value of current Mercurial default for new repository
229 # value of current Mercurial default for new repository
229 default = None
230 default = None
230
231
231 def __init__(self):
232 def __init__(self):
232 raise NotImplementedError()
233 raise NotImplementedError()
233
234
234 @staticmethod
235 @staticmethod
235 def fromrepo(repo):
236 def fromrepo(repo):
236 """current value of the variant in the repository"""
237 """current value of the variant in the repository"""
237 raise NotImplementedError()
238 raise NotImplementedError()
238
239
239 @staticmethod
240 @staticmethod
240 def fromconfig(repo):
241 def fromconfig(repo):
241 """current value of the variant in the configuration"""
242 """current value of the variant in the configuration"""
242 raise NotImplementedError()
243 raise NotImplementedError()
243
244
244
245
245 class requirementformatvariant(formatvariant):
246 class requirementformatvariant(formatvariant):
246 """formatvariant based on a 'requirement' name.
247 """formatvariant based on a 'requirement' name.
247
248
248 Many format variant are controlled by a 'requirement'. We define a small
249 Many format variant are controlled by a 'requirement'. We define a small
249 subclass to factor the code.
250 subclass to factor the code.
250 """
251 """
251
252
252 # the requirement that control this format variant
253 # the requirement that control this format variant
253 _requirement = None
254 _requirement = None
254
255
255 @staticmethod
256 @staticmethod
256 def _newreporequirements(ui):
257 def _newreporequirements(ui):
257 return localrepo.newreporequirements(
258 return localrepo.newreporequirements(
258 ui, localrepo.defaultcreateopts(ui)
259 ui, localrepo.defaultcreateopts(ui)
259 )
260 )
260
261
261 @classmethod
262 @classmethod
262 def fromrepo(cls, repo):
263 def fromrepo(cls, repo):
263 assert cls._requirement is not None
264 assert cls._requirement is not None
264 return cls._requirement in repo.requirements
265 return cls._requirement in repo.requirements
265
266
266 @classmethod
267 @classmethod
267 def fromconfig(cls, repo):
268 def fromconfig(cls, repo):
268 assert cls._requirement is not None
269 assert cls._requirement is not None
269 return cls._requirement in cls._newreporequirements(repo.ui)
270 return cls._requirement in cls._newreporequirements(repo.ui)
270
271
271
272
272 @registerformatvariant
273 @registerformatvariant
273 class fncache(requirementformatvariant):
274 class fncache(requirementformatvariant):
274 name = b'fncache'
275 name = b'fncache'
275
276
276 _requirement = b'fncache'
277 _requirement = b'fncache'
277
278
278 default = True
279 default = True
279
280
280 description = _(
281 description = _(
281 b'long and reserved filenames may not work correctly; '
282 b'long and reserved filenames may not work correctly; '
282 b'repository performance is sub-optimal'
283 b'repository performance is sub-optimal'
283 )
284 )
284
285
285 upgrademessage = _(
286 upgrademessage = _(
286 b'repository will be more resilient to storing '
287 b'repository will be more resilient to storing '
287 b'certain paths and performance of certain '
288 b'certain paths and performance of certain '
288 b'operations should be improved'
289 b'operations should be improved'
289 )
290 )
290
291
291
292
292 @registerformatvariant
293 @registerformatvariant
293 class dotencode(requirementformatvariant):
294 class dotencode(requirementformatvariant):
294 name = b'dotencode'
295 name = b'dotencode'
295
296
296 _requirement = b'dotencode'
297 _requirement = b'dotencode'
297
298
298 default = True
299 default = True
299
300
300 description = _(
301 description = _(
301 b'storage of filenames beginning with a period or '
302 b'storage of filenames beginning with a period or '
302 b'space may not work correctly'
303 b'space may not work correctly'
303 )
304 )
304
305
305 upgrademessage = _(
306 upgrademessage = _(
306 b'repository will be better able to store files '
307 b'repository will be better able to store files '
307 b'beginning with a space or period'
308 b'beginning with a space or period'
308 )
309 )
309
310
310
311
311 @registerformatvariant
312 @registerformatvariant
312 class generaldelta(requirementformatvariant):
313 class generaldelta(requirementformatvariant):
313 name = b'generaldelta'
314 name = b'generaldelta'
314
315
315 _requirement = b'generaldelta'
316 _requirement = b'generaldelta'
316
317
317 default = True
318 default = True
318
319
319 description = _(
320 description = _(
320 b'deltas within internal storage are unable to '
321 b'deltas within internal storage are unable to '
321 b'choose optimal revisions; repository is larger and '
322 b'choose optimal revisions; repository is larger and '
322 b'slower than it could be; interaction with other '
323 b'slower than it could be; interaction with other '
323 b'repositories may require extra network and CPU '
324 b'repositories may require extra network and CPU '
324 b'resources, making "hg push" and "hg pull" slower'
325 b'resources, making "hg push" and "hg pull" slower'
325 )
326 )
326
327
327 upgrademessage = _(
328 upgrademessage = _(
328 b'repository storage will be able to create '
329 b'repository storage will be able to create '
329 b'optimal deltas; new repository data will be '
330 b'optimal deltas; new repository data will be '
330 b'smaller and read times should decrease; '
331 b'smaller and read times should decrease; '
331 b'interacting with other repositories using this '
332 b'interacting with other repositories using this '
332 b'storage model should require less network and '
333 b'storage model should require less network and '
333 b'CPU resources, making "hg push" and "hg pull" '
334 b'CPU resources, making "hg push" and "hg pull" '
334 b'faster'
335 b'faster'
335 )
336 )
336
337
337
338
338 @registerformatvariant
339 @registerformatvariant
339 class sparserevlog(requirementformatvariant):
340 class sparserevlog(requirementformatvariant):
340 name = b'sparserevlog'
341 name = b'sparserevlog'
341
342
342 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
343
344
344 default = True
345 default = True
345
346
346 description = _(
347 description = _(
347 b'in order to limit disk reading and memory usage on older '
348 b'in order to limit disk reading and memory usage on older '
348 b'version, the span of a delta chain from its root to its '
349 b'version, the span of a delta chain from its root to its '
349 b'end is limited, whatever the relevant data in this span. '
350 b'end is limited, whatever the relevant data in this span. '
350 b'This can severly limit Mercurial ability to build good '
351 b'This can severly limit Mercurial ability to build good '
351 b'chain of delta resulting is much more storage space being '
352 b'chain of delta resulting is much more storage space being '
352 b'taken and limit reusability of on disk delta during '
353 b'taken and limit reusability of on disk delta during '
353 b'exchange.'
354 b'exchange.'
354 )
355 )
355
356
356 upgrademessage = _(
357 upgrademessage = _(
357 b'Revlog supports delta chain with more unused data '
358 b'Revlog supports delta chain with more unused data '
358 b'between payload. These gaps will be skipped at read '
359 b'between payload. These gaps will be skipped at read '
359 b'time. This allows for better delta chains, making a '
360 b'time. This allows for better delta chains, making a '
360 b'better compression and faster exchange with server.'
361 b'better compression and faster exchange with server.'
361 )
362 )
362
363
363
364
364 @registerformatvariant
365 @registerformatvariant
365 class sidedata(requirementformatvariant):
366 class sidedata(requirementformatvariant):
366 name = b'sidedata'
367 name = b'sidedata'
367
368
368 _requirement = requirements.SIDEDATA_REQUIREMENT
369 _requirement = requirements.SIDEDATA_REQUIREMENT
369
370
370 default = False
371 default = False
371
372
372 description = _(
373 description = _(
373 b'Allows storage of extra data alongside a revision, '
374 b'Allows storage of extra data alongside a revision, '
374 b'unlocking various caching options.'
375 b'unlocking various caching options.'
375 )
376 )
376
377
377 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
378
379
379
380
380 @registerformatvariant
381 @registerformatvariant
381 class persistentnodemap(requirementformatvariant):
382 class persistentnodemap(requirementformatvariant):
382 name = b'persistent-nodemap'
383 name = b'persistent-nodemap'
383
384
384 _requirement = requirements.NODEMAP_REQUIREMENT
385 _requirement = requirements.NODEMAP_REQUIREMENT
385
386
386 default = False
387 default = False
387
388
388 description = _(
389 description = _(
389 b'persist the node -> rev mapping on disk to speedup lookup'
390 b'persist the node -> rev mapping on disk to speedup lookup'
390 )
391 )
391
392
392 upgrademessage = _(b'Speedup revision lookup by node id.')
393 upgrademessage = _(b'Speedup revision lookup by node id.')
393
394
394
395
395 @registerformatvariant
396 @registerformatvariant
396 class copiessdc(requirementformatvariant):
397 class copiessdc(requirementformatvariant):
397 name = b'copies-sdc'
398 name = b'copies-sdc'
398
399
399 _requirement = requirements.COPIESSDC_REQUIREMENT
400 _requirement = requirements.COPIESSDC_REQUIREMENT
400
401
401 default = False
402 default = False
402
403
403 description = _(b'Stores copies information alongside changesets.')
404 description = _(b'Stores copies information alongside changesets.')
404
405
405 upgrademessage = _(
406 upgrademessage = _(
406 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
407 )
408 )
408
409
409
410
410 @registerformatvariant
411 @registerformatvariant
411 class removecldeltachain(formatvariant):
412 class removecldeltachain(formatvariant):
412 name = b'plain-cl-delta'
413 name = b'plain-cl-delta'
413
414
414 default = True
415 default = True
415
416
416 description = _(
417 description = _(
417 b'changelog storage is using deltas instead of '
418 b'changelog storage is using deltas instead of '
418 b'raw entries; changelog reading and any '
419 b'raw entries; changelog reading and any '
419 b'operation relying on changelog data are slower '
420 b'operation relying on changelog data are slower '
420 b'than they could be'
421 b'than they could be'
421 )
422 )
422
423
423 upgrademessage = _(
424 upgrademessage = _(
424 b'changelog storage will be reformated to '
425 b'changelog storage will be reformated to '
425 b'store raw entries; changelog reading will be '
426 b'store raw entries; changelog reading will be '
426 b'faster; changelog size may be reduced'
427 b'faster; changelog size may be reduced'
427 )
428 )
428
429
429 @staticmethod
430 @staticmethod
430 def fromrepo(repo):
431 def fromrepo(repo):
431 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
432 # changelogs with deltas.
433 # changelogs with deltas.
433 cl = repo.changelog
434 cl = repo.changelog
434 chainbase = cl.chainbase
435 chainbase = cl.chainbase
435 return all(rev == chainbase(rev) for rev in cl)
436 return all(rev == chainbase(rev) for rev in cl)
436
437
437 @staticmethod
438 @staticmethod
438 def fromconfig(repo):
439 def fromconfig(repo):
439 return True
440 return True
440
441
441
442
442 @registerformatvariant
443 @registerformatvariant
443 class compressionengine(formatvariant):
444 class compressionengine(formatvariant):
444 name = b'compression'
445 name = b'compression'
445 default = b'zlib'
446 default = b'zlib'
446
447
447 description = _(
448 description = _(
448 b'Compresion algorithm used to compress data. '
449 b'Compresion algorithm used to compress data. '
449 b'Some engine are faster than other'
450 b'Some engine are faster than other'
450 )
451 )
451
452
452 upgrademessage = _(
453 upgrademessage = _(
453 b'revlog content will be recompressed with the new algorithm.'
454 b'revlog content will be recompressed with the new algorithm.'
454 )
455 )
455
456
456 @classmethod
457 @classmethod
457 def fromrepo(cls, repo):
458 def fromrepo(cls, repo):
458 # we allow multiple compression engine requirement to co-exist because
459 # we allow multiple compression engine requirement to co-exist because
459 # strickly speaking, revlog seems to support mixed compression style.
460 # strickly speaking, revlog seems to support mixed compression style.
460 #
461 #
461 # The compression used for new entries will be "the last one"
462 # The compression used for new entries will be "the last one"
462 compression = b'zlib'
463 compression = b'zlib'
463 for req in repo.requirements:
464 for req in repo.requirements:
464 prefix = req.startswith
465 prefix = req.startswith
465 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
466 compression = req.split(b'-', 2)[2]
467 compression = req.split(b'-', 2)[2]
467 return compression
468 return compression
468
469
469 @classmethod
470 @classmethod
470 def fromconfig(cls, repo):
471 def fromconfig(cls, repo):
471 compengines = repo.ui.configlist(b'format', b'revlog-compression')
472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
472 # return the first valid value as the selection code would do
473 # return the first valid value as the selection code would do
473 for comp in compengines:
474 for comp in compengines:
474 if comp in util.compengines:
475 if comp in util.compengines:
475 return comp
476 return comp
476
477
477 # no valide compression found lets display it all for clarity
478 # no valide compression found lets display it all for clarity
478 return b','.join(compengines)
479 return b','.join(compengines)
479
480
480
481
481 @registerformatvariant
482 @registerformatvariant
482 class compressionlevel(formatvariant):
483 class compressionlevel(formatvariant):
483 name = b'compression-level'
484 name = b'compression-level'
484 default = b'default'
485 default = b'default'
485
486
486 description = _(b'compression level')
487 description = _(b'compression level')
487
488
488 upgrademessage = _(b'revlog content will be recompressed')
489 upgrademessage = _(b'revlog content will be recompressed')
489
490
490 @classmethod
491 @classmethod
491 def fromrepo(cls, repo):
492 def fromrepo(cls, repo):
492 comp = compressionengine.fromrepo(repo)
493 comp = compressionengine.fromrepo(repo)
493 level = None
494 level = None
494 if comp == b'zlib':
495 if comp == b'zlib':
495 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
496 elif comp == b'zstd':
497 elif comp == b'zstd':
497 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
498 if level is None:
499 if level is None:
499 return b'default'
500 return b'default'
500 return bytes(level)
501 return bytes(level)
501
502
502 @classmethod
503 @classmethod
503 def fromconfig(cls, repo):
504 def fromconfig(cls, repo):
504 comp = compressionengine.fromconfig(repo)
505 comp = compressionengine.fromconfig(repo)
505 level = None
506 level = None
506 if comp == b'zlib':
507 if comp == b'zlib':
507 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
508 elif comp == b'zstd':
509 elif comp == b'zstd':
509 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
510 if level is None:
511 if level is None:
511 return b'default'
512 return b'default'
512 return bytes(level)
513 return bytes(level)
513
514
514
515
515 def finddeficiencies(repo):
516 def finddeficiencies(repo):
516 """returns a list of deficiencies that the repo suffer from"""
517 """returns a list of deficiencies that the repo suffer from"""
517 deficiencies = []
518 deficiencies = []
518
519
519 # We could detect lack of revlogv1 and store here, but they were added
520 # We could detect lack of revlogv1 and store here, but they were added
520 # in 0.9.2 and we don't support upgrading repos without these
521 # in 0.9.2 and we don't support upgrading repos without these
521 # requirements, so let's not bother.
522 # requirements, so let's not bother.
522
523
523 for fv in allformatvariant:
524 for fv in allformatvariant:
524 if not fv.fromrepo(repo):
525 if not fv.fromrepo(repo):
525 deficiencies.append(fv)
526 deficiencies.append(fv)
526
527
527 return deficiencies
528 return deficiencies
528
529
529
530
530 # search without '-' to support older form on newer client.
531 # search without '-' to support older form on newer client.
531 #
532 #
532 # We don't enforce backward compatibility for debug command so this
533 # We don't enforce backward compatibility for debug command so this
533 # might eventually be dropped. However, having to use two different
534 # might eventually be dropped. However, having to use two different
534 # forms in script when comparing result is anoying enough to add
535 # forms in script when comparing result is anoying enough to add
535 # backward compatibility for a while.
536 # backward compatibility for a while.
536 legacy_opts_map = {
537 legacy_opts_map = {
537 b'redeltaparent': b're-delta-parent',
538 b'redeltaparent': b're-delta-parent',
538 b'redeltamultibase': b're-delta-multibase',
539 b'redeltamultibase': b're-delta-multibase',
539 b'redeltaall': b're-delta-all',
540 b'redeltaall': b're-delta-all',
540 b'redeltafulladd': b're-delta-fulladd',
541 b'redeltafulladd': b're-delta-fulladd',
541 }
542 }
542
543
543
544
544 def findoptimizations(repo):
545 def findoptimizations(repo):
545 """Determine optimisation that could be used during upgrade"""
546 """Determine optimisation that could be used during upgrade"""
546 # These are unconditionally added. There is logic later that figures out
547 # These are unconditionally added. There is logic later that figures out
547 # which ones to apply.
548 # which ones to apply.
548 optimizations = []
549 optimizations = []
549
550
550 optimizations.append(
551 optimizations.append(
551 improvement(
552 improvement(
552 name=b're-delta-parent',
553 name=b're-delta-parent',
553 type=optimisation,
554 type=optimisation,
554 description=_(
555 description=_(
555 b'deltas within internal storage will be recalculated to '
556 b'deltas within internal storage will be recalculated to '
556 b'choose an optimal base revision where this was not '
557 b'choose an optimal base revision where this was not '
557 b'already done; the size of the repository may shrink and '
558 b'already done; the size of the repository may shrink and '
558 b'various operations may become faster; the first time '
559 b'various operations may become faster; the first time '
559 b'this optimization is performed could slow down upgrade '
560 b'this optimization is performed could slow down upgrade '
560 b'execution considerably; subsequent invocations should '
561 b'execution considerably; subsequent invocations should '
561 b'not run noticeably slower'
562 b'not run noticeably slower'
562 ),
563 ),
563 upgrademessage=_(
564 upgrademessage=_(
564 b'deltas within internal storage will choose a new '
565 b'deltas within internal storage will choose a new '
565 b'base revision if needed'
566 b'base revision if needed'
566 ),
567 ),
567 )
568 )
568 )
569 )
569
570
570 optimizations.append(
571 optimizations.append(
571 improvement(
572 improvement(
572 name=b're-delta-multibase',
573 name=b're-delta-multibase',
573 type=optimisation,
574 type=optimisation,
574 description=_(
575 description=_(
575 b'deltas within internal storage will be recalculated '
576 b'deltas within internal storage will be recalculated '
576 b'against multiple base revision and the smallest '
577 b'against multiple base revision and the smallest '
577 b'difference will be used; the size of the repository may '
578 b'difference will be used; the size of the repository may '
578 b'shrink significantly when there are many merges; this '
579 b'shrink significantly when there are many merges; this '
579 b'optimization will slow down execution in proportion to '
580 b'optimization will slow down execution in proportion to '
580 b'the number of merges in the repository and the amount '
581 b'the number of merges in the repository and the amount '
581 b'of files in the repository; this slow down should not '
582 b'of files in the repository; this slow down should not '
582 b'be significant unless there are tens of thousands of '
583 b'be significant unless there are tens of thousands of '
583 b'files and thousands of merges'
584 b'files and thousands of merges'
584 ),
585 ),
585 upgrademessage=_(
586 upgrademessage=_(
586 b'deltas within internal storage will choose an '
587 b'deltas within internal storage will choose an '
587 b'optimal delta by computing deltas against multiple '
588 b'optimal delta by computing deltas against multiple '
588 b'parents; may slow down execution time '
589 b'parents; may slow down execution time '
589 b'significantly'
590 b'significantly'
590 ),
591 ),
591 )
592 )
592 )
593 )
593
594
594 optimizations.append(
595 optimizations.append(
595 improvement(
596 improvement(
596 name=b're-delta-all',
597 name=b're-delta-all',
597 type=optimisation,
598 type=optimisation,
598 description=_(
599 description=_(
599 b'deltas within internal storage will always be '
600 b'deltas within internal storage will always be '
600 b'recalculated without reusing prior deltas; this will '
601 b'recalculated without reusing prior deltas; this will '
601 b'likely make execution run several times slower; this '
602 b'likely make execution run several times slower; this '
602 b'optimization is typically not needed'
603 b'optimization is typically not needed'
603 ),
604 ),
604 upgrademessage=_(
605 upgrademessage=_(
605 b'deltas within internal storage will be fully '
606 b'deltas within internal storage will be fully '
606 b'recomputed; this will likely drastically slow down '
607 b'recomputed; this will likely drastically slow down '
607 b'execution time'
608 b'execution time'
608 ),
609 ),
609 )
610 )
610 )
611 )
611
612
612 optimizations.append(
613 optimizations.append(
613 improvement(
614 improvement(
614 name=b're-delta-fulladd',
615 name=b're-delta-fulladd',
615 type=optimisation,
616 type=optimisation,
616 description=_(
617 description=_(
617 b'every revision will be re-added as if it was new '
618 b'every revision will be re-added as if it was new '
618 b'content. It will go through the full storage '
619 b'content. It will go through the full storage '
619 b'mechanism giving extensions a chance to process it '
620 b'mechanism giving extensions a chance to process it '
620 b'(eg. lfs). This is similar to "re-delta-all" but even '
621 b'(eg. lfs). This is similar to "re-delta-all" but even '
621 b'slower since more logic is involved.'
622 b'slower since more logic is involved.'
622 ),
623 ),
623 upgrademessage=_(
624 upgrademessage=_(
624 b'each revision will be added as new content to the '
625 b'each revision will be added as new content to the '
625 b'internal storage; this will likely drastically slow '
626 b'internal storage; this will likely drastically slow '
626 b'down execution time, but some extensions might need '
627 b'down execution time, but some extensions might need '
627 b'it'
628 b'it'
628 ),
629 ),
629 )
630 )
630 )
631 )
631
632
632 return optimizations
633 return optimizations
633
634
634
635
635 def determineactions(repo, deficiencies, sourcereqs, destreqs):
636 def determineactions(repo, deficiencies, sourcereqs, destreqs):
636 """Determine upgrade actions that will be performed.
637 """Determine upgrade actions that will be performed.
637
638
638 Given a list of improvements as returned by ``finddeficiencies`` and
639 Given a list of improvements as returned by ``finddeficiencies`` and
639 ``findoptimizations``, determine the list of upgrade actions that
640 ``findoptimizations``, determine the list of upgrade actions that
640 will be performed.
641 will be performed.
641
642
642 The role of this function is to filter improvements if needed, apply
643 The role of this function is to filter improvements if needed, apply
643 recommended optimizations from the improvements list that make sense,
644 recommended optimizations from the improvements list that make sense,
644 etc.
645 etc.
645
646
646 Returns a list of action names.
647 Returns a list of action names.
647 """
648 """
648 newactions = []
649 newactions = []
649
650
650 for d in deficiencies:
651 for d in deficiencies:
651 name = d._requirement
652 name = d._requirement
652
653
653 # If the action is a requirement that doesn't show up in the
654 # If the action is a requirement that doesn't show up in the
654 # destination requirements, prune the action.
655 # destination requirements, prune the action.
655 if name is not None and name not in destreqs:
656 if name is not None and name not in destreqs:
656 continue
657 continue
657
658
658 newactions.append(d)
659 newactions.append(d)
659
660
660 # FUTURE consider adding some optimizations here for certain transitions.
661 # FUTURE consider adding some optimizations here for certain transitions.
661 # e.g. adding generaldelta could schedule parent redeltas.
662 # e.g. adding generaldelta could schedule parent redeltas.
662
663
663 return newactions
664 return newactions
664
665
665
666
666 def _revlogfrompath(repo, path):
667 def _revlogfrompath(repo, path):
667 """Obtain a revlog from a repo path.
668 """Obtain a revlog from a repo path.
668
669
669 An instance of the appropriate class is returned.
670 An instance of the appropriate class is returned.
670 """
671 """
671 if path == b'00changelog.i':
672 if path == b'00changelog.i':
672 return changelog.changelog(repo.svfs)
673 return changelog.changelog(repo.svfs)
673 elif path.endswith(b'00manifest.i'):
674 elif path.endswith(b'00manifest.i'):
674 mandir = path[: -len(b'00manifest.i')]
675 mandir = path[: -len(b'00manifest.i')]
675 return manifest.manifestrevlog(repo.svfs, tree=mandir)
676 return manifest.manifestrevlog(repo.svfs, tree=mandir)
676 else:
677 else:
677 # reverse of "/".join(("data", path + ".i"))
678 # reverse of "/".join(("data", path + ".i"))
678 return filelog.filelog(repo.svfs, path[5:-2])
679 return filelog.filelog(repo.svfs, path[5:-2])
679
680
680
681
681 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
682 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
682 """copy all relevant files for `oldrl` into `destrepo` store
683 """copy all relevant files for `oldrl` into `destrepo` store
683
684
684 Files are copied "as is" without any transformation. The copy is performed
685 Files are copied "as is" without any transformation. The copy is performed
685 without extra checks. Callers are responsible for making sure the copied
686 without extra checks. Callers are responsible for making sure the copied
686 content is compatible with format of the destination repository.
687 content is compatible with format of the destination repository.
687 """
688 """
688 oldrl = getattr(oldrl, '_revlog', oldrl)
689 oldrl = getattr(oldrl, '_revlog', oldrl)
689 newrl = _revlogfrompath(destrepo, unencodedname)
690 newrl = _revlogfrompath(destrepo, unencodedname)
690 newrl = getattr(newrl, '_revlog', newrl)
691 newrl = getattr(newrl, '_revlog', newrl)
691
692
692 oldvfs = oldrl.opener
693 oldvfs = oldrl.opener
693 newvfs = newrl.opener
694 newvfs = newrl.opener
694 oldindex = oldvfs.join(oldrl.indexfile)
695 oldindex = oldvfs.join(oldrl.indexfile)
695 newindex = newvfs.join(newrl.indexfile)
696 newindex = newvfs.join(newrl.indexfile)
696 olddata = oldvfs.join(oldrl.datafile)
697 olddata = oldvfs.join(oldrl.datafile)
697 newdata = newvfs.join(newrl.datafile)
698 newdata = newvfs.join(newrl.datafile)
698
699
699 with newvfs(newrl.indexfile, b'w'):
700 with newvfs(newrl.indexfile, b'w'):
700 pass # create all the directories
701 pass # create all the directories
701
702
702 util.copyfile(oldindex, newindex)
703 util.copyfile(oldindex, newindex)
703 copydata = oldrl.opener.exists(oldrl.datafile)
704 copydata = oldrl.opener.exists(oldrl.datafile)
704 if copydata:
705 if copydata:
705 util.copyfile(olddata, newdata)
706 util.copyfile(olddata, newdata)
706
707
707 if not (
708 if not (
708 unencodedname.endswith(b'00changelog.i')
709 unencodedname.endswith(b'00changelog.i')
709 or unencodedname.endswith(b'00manifest.i')
710 or unencodedname.endswith(b'00manifest.i')
710 ):
711 ):
711 destrepo.svfs.fncache.add(unencodedname)
712 destrepo.svfs.fncache.add(unencodedname)
712 if copydata:
713 if copydata:
713 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
714 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
714
715
715
716
716 UPGRADE_CHANGELOG = object()
717 UPGRADE_CHANGELOG = object()
717 UPGRADE_MANIFEST = object()
718 UPGRADE_MANIFEST = object()
718 UPGRADE_FILELOG = object()
719 UPGRADE_FILELOG = object()
719
720
720 UPGRADE_ALL_REVLOGS = frozenset(
721 UPGRADE_ALL_REVLOGS = frozenset(
721 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
722 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
722 )
723 )
723
724
724
725
725 def getsidedatacompanion(srcrepo, dstrepo):
726 def getsidedatacompanion(srcrepo, dstrepo):
726 sidedatacompanion = None
727 sidedatacompanion = None
727 removedreqs = srcrepo.requirements - dstrepo.requirements
728 removedreqs = srcrepo.requirements - dstrepo.requirements
728 addedreqs = dstrepo.requirements - srcrepo.requirements
729 addedreqs = dstrepo.requirements - srcrepo.requirements
729 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
730 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
730
731
731 def sidedatacompanion(rl, rev):
732 def sidedatacompanion(rl, rev):
732 rl = getattr(rl, '_revlog', rl)
733 rl = getattr(rl, '_revlog', rl)
733 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
734 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
734 return True, (), {}
735 return True, (), {}
735 return False, (), {}
736 return False, (), {}
736
737
737 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
738 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
738 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
739 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
739 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
740 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
740 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
741 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
741 return sidedatacompanion
742 return sidedatacompanion
742
743
743
744
744 def matchrevlog(revlogfilter, entry):
745 def matchrevlog(revlogfilter, entry):
745 """check is a revlog is selected for cloning
746 """check is a revlog is selected for cloning
746
747
747 The store entry is checked against the passed filter"""
748 The store entry is checked against the passed filter"""
748 if entry.endswith(b'00changelog.i'):
749 if entry.endswith(b'00changelog.i'):
749 return UPGRADE_CHANGELOG in revlogfilter
750 return UPGRADE_CHANGELOG in revlogfilter
750 elif entry.endswith(b'00manifest.i'):
751 elif entry.endswith(b'00manifest.i'):
751 return UPGRADE_MANIFEST in revlogfilter
752 return UPGRADE_MANIFEST in revlogfilter
752 return UPGRADE_FILELOG in revlogfilter
753 return UPGRADE_FILELOG in revlogfilter
753
754
754
755
755 def _clonerevlogs(
756 def _clonerevlogs(
756 ui,
757 ui,
757 srcrepo,
758 srcrepo,
758 dstrepo,
759 dstrepo,
759 tr,
760 tr,
760 deltareuse,
761 deltareuse,
761 forcedeltabothparents,
762 forcedeltabothparents,
762 revlogs=UPGRADE_ALL_REVLOGS,
763 revlogs=UPGRADE_ALL_REVLOGS,
763 ):
764 ):
764 """Copy revlogs between 2 repos."""
765 """Copy revlogs between 2 repos."""
765 revcount = 0
766 revcount = 0
766 srcsize = 0
767 srcsize = 0
767 srcrawsize = 0
768 srcrawsize = 0
768 dstsize = 0
769 dstsize = 0
769 fcount = 0
770 fcount = 0
770 frevcount = 0
771 frevcount = 0
771 fsrcsize = 0
772 fsrcsize = 0
772 frawsize = 0
773 frawsize = 0
773 fdstsize = 0
774 fdstsize = 0
774 mcount = 0
775 mcount = 0
775 mrevcount = 0
776 mrevcount = 0
776 msrcsize = 0
777 msrcsize = 0
777 mrawsize = 0
778 mrawsize = 0
778 mdstsize = 0
779 mdstsize = 0
779 crevcount = 0
780 crevcount = 0
780 csrcsize = 0
781 csrcsize = 0
781 crawsize = 0
782 crawsize = 0
782 cdstsize = 0
783 cdstsize = 0
783
784
784 alldatafiles = list(srcrepo.store.walk())
785 alldatafiles = list(srcrepo.store.walk())
785
786
786 # Perform a pass to collect metadata. This validates we can open all
787 # Perform a pass to collect metadata. This validates we can open all
787 # source files and allows a unified progress bar to be displayed.
788 # source files and allows a unified progress bar to be displayed.
788 for unencoded, encoded, size in alldatafiles:
789 for unencoded, encoded, size in alldatafiles:
789 if unencoded.endswith(b'.d'):
790 if unencoded.endswith(b'.d'):
790 continue
791 continue
791
792
792 rl = _revlogfrompath(srcrepo, unencoded)
793 rl = _revlogfrompath(srcrepo, unencoded)
793
794
794 info = rl.storageinfo(
795 info = rl.storageinfo(
795 exclusivefiles=True,
796 exclusivefiles=True,
796 revisionscount=True,
797 revisionscount=True,
797 trackedsize=True,
798 trackedsize=True,
798 storedsize=True,
799 storedsize=True,
799 )
800 )
800
801
801 revcount += info[b'revisionscount'] or 0
802 revcount += info[b'revisionscount'] or 0
802 datasize = info[b'storedsize'] or 0
803 datasize = info[b'storedsize'] or 0
803 rawsize = info[b'trackedsize'] or 0
804 rawsize = info[b'trackedsize'] or 0
804
805
805 srcsize += datasize
806 srcsize += datasize
806 srcrawsize += rawsize
807 srcrawsize += rawsize
807
808
808 # This is for the separate progress bars.
809 # This is for the separate progress bars.
809 if isinstance(rl, changelog.changelog):
810 if isinstance(rl, changelog.changelog):
810 crevcount += len(rl)
811 crevcount += len(rl)
811 csrcsize += datasize
812 csrcsize += datasize
812 crawsize += rawsize
813 crawsize += rawsize
813 elif isinstance(rl, manifest.manifestrevlog):
814 elif isinstance(rl, manifest.manifestrevlog):
814 mcount += 1
815 mcount += 1
815 mrevcount += len(rl)
816 mrevcount += len(rl)
816 msrcsize += datasize
817 msrcsize += datasize
817 mrawsize += rawsize
818 mrawsize += rawsize
818 elif isinstance(rl, filelog.filelog):
819 elif isinstance(rl, filelog.filelog):
819 fcount += 1
820 fcount += 1
820 frevcount += len(rl)
821 frevcount += len(rl)
821 fsrcsize += datasize
822 fsrcsize += datasize
822 frawsize += rawsize
823 frawsize += rawsize
823 else:
824 else:
824 error.ProgrammingError(b'unknown revlog type')
825 error.ProgrammingError(b'unknown revlog type')
825
826
826 if not revcount:
827 if not revcount:
827 return
828 return
828
829
829 ui.status(
830 ui.status(
830 _(
831 _(
831 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
832 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
832 b'%d in changelog)\n'
833 b'%d in changelog)\n'
833 )
834 )
834 % (revcount, frevcount, mrevcount, crevcount)
835 % (revcount, frevcount, mrevcount, crevcount)
835 )
836 )
836 ui.status(
837 ui.status(
837 _(b'migrating %s in store; %s tracked data\n')
838 _(b'migrating %s in store; %s tracked data\n')
838 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
839 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
839 )
840 )
840
841
841 # Used to keep track of progress.
842 # Used to keep track of progress.
842 progress = None
843 progress = None
843
844
844 def oncopiedrevision(rl, rev, node):
845 def oncopiedrevision(rl, rev, node):
845 progress.increment()
846 progress.increment()
846
847
847 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
848 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
848
849
849 # Do the actual copying.
850 # Do the actual copying.
850 # FUTURE this operation can be farmed off to worker processes.
851 # FUTURE this operation can be farmed off to worker processes.
851 seen = set()
852 seen = set()
852 for unencoded, encoded, size in alldatafiles:
853 for unencoded, encoded, size in alldatafiles:
853 if unencoded.endswith(b'.d'):
854 if unencoded.endswith(b'.d'):
854 continue
855 continue
855
856
856 oldrl = _revlogfrompath(srcrepo, unencoded)
857 oldrl = _revlogfrompath(srcrepo, unencoded)
857
858
858 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
859 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
859 ui.status(
860 ui.status(
860 _(
861 _(
861 b'finished migrating %d manifest revisions across %d '
862 b'finished migrating %d manifest revisions across %d '
862 b'manifests; change in size: %s\n'
863 b'manifests; change in size: %s\n'
863 )
864 )
864 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
865 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
865 )
866 )
866
867
867 ui.status(
868 ui.status(
868 _(
869 _(
869 b'migrating changelog containing %d revisions '
870 b'migrating changelog containing %d revisions '
870 b'(%s in store; %s tracked data)\n'
871 b'(%s in store; %s tracked data)\n'
871 )
872 )
872 % (
873 % (
873 crevcount,
874 crevcount,
874 util.bytecount(csrcsize),
875 util.bytecount(csrcsize),
875 util.bytecount(crawsize),
876 util.bytecount(crawsize),
876 )
877 )
877 )
878 )
878 seen.add(b'c')
879 seen.add(b'c')
879 progress = srcrepo.ui.makeprogress(
880 progress = srcrepo.ui.makeprogress(
880 _(b'changelog revisions'), total=crevcount
881 _(b'changelog revisions'), total=crevcount
881 )
882 )
882 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
883 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
883 ui.status(
884 ui.status(
884 _(
885 _(
885 b'finished migrating %d filelog revisions across %d '
886 b'finished migrating %d filelog revisions across %d '
886 b'filelogs; change in size: %s\n'
887 b'filelogs; change in size: %s\n'
887 )
888 )
888 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
889 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
889 )
890 )
890
891
891 ui.status(
892 ui.status(
892 _(
893 _(
893 b'migrating %d manifests containing %d revisions '
894 b'migrating %d manifests containing %d revisions '
894 b'(%s in store; %s tracked data)\n'
895 b'(%s in store; %s tracked data)\n'
895 )
896 )
896 % (
897 % (
897 mcount,
898 mcount,
898 mrevcount,
899 mrevcount,
899 util.bytecount(msrcsize),
900 util.bytecount(msrcsize),
900 util.bytecount(mrawsize),
901 util.bytecount(mrawsize),
901 )
902 )
902 )
903 )
903 seen.add(b'm')
904 seen.add(b'm')
904 if progress:
905 if progress:
905 progress.complete()
906 progress.complete()
906 progress = srcrepo.ui.makeprogress(
907 progress = srcrepo.ui.makeprogress(
907 _(b'manifest revisions'), total=mrevcount
908 _(b'manifest revisions'), total=mrevcount
908 )
909 )
909 elif b'f' not in seen:
910 elif b'f' not in seen:
910 ui.status(
911 ui.status(
911 _(
912 _(
912 b'migrating %d filelogs containing %d revisions '
913 b'migrating %d filelogs containing %d revisions '
913 b'(%s in store; %s tracked data)\n'
914 b'(%s in store; %s tracked data)\n'
914 )
915 )
915 % (
916 % (
916 fcount,
917 fcount,
917 frevcount,
918 frevcount,
918 util.bytecount(fsrcsize),
919 util.bytecount(fsrcsize),
919 util.bytecount(frawsize),
920 util.bytecount(frawsize),
920 )
921 )
921 )
922 )
922 seen.add(b'f')
923 seen.add(b'f')
923 if progress:
924 if progress:
924 progress.complete()
925 progress.complete()
925 progress = srcrepo.ui.makeprogress(
926 progress = srcrepo.ui.makeprogress(
926 _(b'file revisions'), total=frevcount
927 _(b'file revisions'), total=frevcount
927 )
928 )
928
929
929 if matchrevlog(revlogs, unencoded):
930 if matchrevlog(revlogs, unencoded):
930 ui.note(
931 ui.note(
931 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
932 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
932 )
933 )
933 newrl = _revlogfrompath(dstrepo, unencoded)
934 newrl = _revlogfrompath(dstrepo, unencoded)
934 oldrl.clone(
935 oldrl.clone(
935 tr,
936 tr,
936 newrl,
937 newrl,
937 addrevisioncb=oncopiedrevision,
938 addrevisioncb=oncopiedrevision,
938 deltareuse=deltareuse,
939 deltareuse=deltareuse,
939 forcedeltabothparents=forcedeltabothparents,
940 forcedeltabothparents=forcedeltabothparents,
940 sidedatacompanion=sidedatacompanion,
941 sidedatacompanion=sidedatacompanion,
941 )
942 )
942 else:
943 else:
943 msg = _(b'blindly copying %s containing %i revisions\n')
944 msg = _(b'blindly copying %s containing %i revisions\n')
944 ui.note(msg % (unencoded, len(oldrl)))
945 ui.note(msg % (unencoded, len(oldrl)))
945 _copyrevlog(tr, dstrepo, oldrl, unencoded)
946 _copyrevlog(tr, dstrepo, oldrl, unencoded)
946
947
947 newrl = _revlogfrompath(dstrepo, unencoded)
948 newrl = _revlogfrompath(dstrepo, unencoded)
948
949
949 info = newrl.storageinfo(storedsize=True)
950 info = newrl.storageinfo(storedsize=True)
950 datasize = info[b'storedsize'] or 0
951 datasize = info[b'storedsize'] or 0
951
952
952 dstsize += datasize
953 dstsize += datasize
953
954
954 if isinstance(newrl, changelog.changelog):
955 if isinstance(newrl, changelog.changelog):
955 cdstsize += datasize
956 cdstsize += datasize
956 elif isinstance(newrl, manifest.manifestrevlog):
957 elif isinstance(newrl, manifest.manifestrevlog):
957 mdstsize += datasize
958 mdstsize += datasize
958 else:
959 else:
959 fdstsize += datasize
960 fdstsize += datasize
960
961
961 progress.complete()
962 progress.complete()
962
963
963 ui.status(
964 ui.status(
964 _(
965 _(
965 b'finished migrating %d changelog revisions; change in size: '
966 b'finished migrating %d changelog revisions; change in size: '
966 b'%s\n'
967 b'%s\n'
967 )
968 )
968 % (crevcount, util.bytecount(cdstsize - csrcsize))
969 % (crevcount, util.bytecount(cdstsize - csrcsize))
969 )
970 )
970
971
971 ui.status(
972 ui.status(
972 _(
973 _(
973 b'finished migrating %d total revisions; total change in store '
974 b'finished migrating %d total revisions; total change in store '
974 b'size: %s\n'
975 b'size: %s\n'
975 )
976 )
976 % (revcount, util.bytecount(dstsize - srcsize))
977 % (revcount, util.bytecount(dstsize - srcsize))
977 )
978 )
978
979
979
980
980 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
981 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
981 """Determine whether to copy a store file during upgrade.
982 """Determine whether to copy a store file during upgrade.
982
983
983 This function is called when migrating store files from ``srcrepo`` to
984 This function is called when migrating store files from ``srcrepo`` to
984 ``dstrepo`` as part of upgrading a repository.
985 ``dstrepo`` as part of upgrading a repository.
985
986
986 Args:
987 Args:
987 srcrepo: repo we are copying from
988 srcrepo: repo we are copying from
988 dstrepo: repo we are copying to
989 dstrepo: repo we are copying to
989 requirements: set of requirements for ``dstrepo``
990 requirements: set of requirements for ``dstrepo``
990 path: store file being examined
991 path: store file being examined
991 mode: the ``ST_MODE`` file type of ``path``
992 mode: the ``ST_MODE`` file type of ``path``
992 st: ``stat`` data structure for ``path``
993 st: ``stat`` data structure for ``path``
993
994
994 Function should return ``True`` if the file is to be copied.
995 Function should return ``True`` if the file is to be copied.
995 """
996 """
996 # Skip revlogs.
997 # Skip revlogs.
997 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
998 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
998 return False
999 return False
999 # Skip transaction related files.
1000 # Skip transaction related files.
1000 if path.startswith(b'undo'):
1001 if path.startswith(b'undo'):
1001 return False
1002 return False
1002 # Only copy regular files.
1003 # Only copy regular files.
1003 if mode != stat.S_IFREG:
1004 if mode != stat.S_IFREG:
1004 return False
1005 return False
1005 # Skip other skipped files.
1006 # Skip other skipped files.
1006 if path in (b'lock', b'fncache'):
1007 if path in (b'lock', b'fncache'):
1007 return False
1008 return False
1008
1009
1009 return True
1010 return True
1010
1011
1011
1012
1012 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1013 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1013 """Hook point for extensions to perform additional actions during upgrade.
1014 """Hook point for extensions to perform additional actions during upgrade.
1014
1015
1015 This function is called after revlogs and store files have been copied but
1016 This function is called after revlogs and store files have been copied but
1016 before the new store is swapped into the original location.
1017 before the new store is swapped into the original location.
1017 """
1018 """
1018
1019
1019
1020
1020 def _upgraderepo(
1021 def _upgraderepo(
1021 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1022 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1022 ):
1023 ):
1023 """Do the low-level work of upgrading a repository.
1024 """Do the low-level work of upgrading a repository.
1024
1025
1025 The upgrade is effectively performed as a copy between a source
1026 The upgrade is effectively performed as a copy between a source
1026 repository and a temporary destination repository.
1027 repository and a temporary destination repository.
1027
1028
1028 The source repository is unmodified for as long as possible so the
1029 The source repository is unmodified for as long as possible so the
1029 upgrade can abort at any time without causing loss of service for
1030 upgrade can abort at any time without causing loss of service for
1030 readers and without corrupting the source repository.
1031 readers and without corrupting the source repository.
1031 """
1032 """
1032 assert srcrepo.currentwlock()
1033 assert srcrepo.currentwlock()
1033 assert dstrepo.currentwlock()
1034 assert dstrepo.currentwlock()
1034
1035
1035 ui.status(
1036 ui.status(
1036 _(
1037 _(
1037 b'(it is safe to interrupt this process any time before '
1038 b'(it is safe to interrupt this process any time before '
1038 b'data migration completes)\n'
1039 b'data migration completes)\n'
1039 )
1040 )
1040 )
1041 )
1041
1042
1042 if b're-delta-all' in actions:
1043 if b're-delta-all' in actions:
1043 deltareuse = revlog.revlog.DELTAREUSENEVER
1044 deltareuse = revlog.revlog.DELTAREUSENEVER
1044 elif b're-delta-parent' in actions:
1045 elif b're-delta-parent' in actions:
1045 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1046 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1046 elif b're-delta-multibase' in actions:
1047 elif b're-delta-multibase' in actions:
1047 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1048 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1048 elif b're-delta-fulladd' in actions:
1049 elif b're-delta-fulladd' in actions:
1049 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1050 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1050 else:
1051 else:
1051 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1052 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1052
1053
1053 with dstrepo.transaction(b'upgrade') as tr:
1054 with dstrepo.transaction(b'upgrade') as tr:
1054 _clonerevlogs(
1055 _clonerevlogs(
1055 ui,
1056 ui,
1056 srcrepo,
1057 srcrepo,
1057 dstrepo,
1058 dstrepo,
1058 tr,
1059 tr,
1059 deltareuse,
1060 deltareuse,
1060 b're-delta-multibase' in actions,
1061 b're-delta-multibase' in actions,
1061 revlogs=revlogs,
1062 revlogs=revlogs,
1062 )
1063 )
1063
1064
1064 # Now copy other files in the store directory.
1065 # Now copy other files in the store directory.
1065 # The sorted() makes execution deterministic.
1066 # The sorted() makes execution deterministic.
1066 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1067 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1067 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1068 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1068 continue
1069 continue
1069
1070
1070 srcrepo.ui.status(_(b'copying %s\n') % p)
1071 srcrepo.ui.status(_(b'copying %s\n') % p)
1071 src = srcrepo.store.rawvfs.join(p)
1072 src = srcrepo.store.rawvfs.join(p)
1072 dst = dstrepo.store.rawvfs.join(p)
1073 dst = dstrepo.store.rawvfs.join(p)
1073 util.copyfile(src, dst, copystat=True)
1074 util.copyfile(src, dst, copystat=True)
1074
1075
1075 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1076 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1076
1077
1077 ui.status(_(b'data fully migrated to temporary repository\n'))
1078 ui.status(_(b'data fully migrated to temporary repository\n'))
1078
1079
1079 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1080 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1080 backupvfs = vfsmod.vfs(backuppath)
1081 backupvfs = vfsmod.vfs(backuppath)
1081
1082
1082 # Make a backup of requires file first, as it is the first to be modified.
1083 # Make a backup of requires file first, as it is the first to be modified.
1083 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1084 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1084
1085
1085 # We install an arbitrary requirement that clients must not support
1086 # We install an arbitrary requirement that clients must not support
1086 # as a mechanism to lock out new clients during the data swap. This is
1087 # as a mechanism to lock out new clients during the data swap. This is
1087 # better than allowing a client to continue while the repository is in
1088 # better than allowing a client to continue while the repository is in
1088 # an inconsistent state.
1089 # an inconsistent state.
1089 ui.status(
1090 ui.status(
1090 _(
1091 _(
1091 b'marking source repository as being upgraded; clients will be '
1092 b'marking source repository as being upgraded; clients will be '
1092 b'unable to read from repository\n'
1093 b'unable to read from repository\n'
1093 )
1094 )
1094 )
1095 )
1095 scmutil.writereporequirements(
1096 scmutil.writereporequirements(
1096 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1097 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1097 )
1098 )
1098
1099
1099 ui.status(_(b'starting in-place swap of repository data\n'))
1100 ui.status(_(b'starting in-place swap of repository data\n'))
1100 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1101 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1101
1102
1102 # Now swap in the new store directory. Doing it as a rename should make
1103 # Now swap in the new store directory. Doing it as a rename should make
1103 # the operation nearly instantaneous and atomic (at least in well-behaved
1104 # the operation nearly instantaneous and atomic (at least in well-behaved
1104 # environments).
1105 # environments).
1105 ui.status(_(b'replacing store...\n'))
1106 ui.status(_(b'replacing store...\n'))
1106 tstart = util.timer()
1107 tstart = util.timer()
1107 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1108 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1108 util.rename(dstrepo.spath, srcrepo.spath)
1109 util.rename(dstrepo.spath, srcrepo.spath)
1109 elapsed = util.timer() - tstart
1110 elapsed = util.timer() - tstart
1110 ui.status(
1111 ui.status(
1111 _(
1112 _(
1112 b'store replacement complete; repository was inconsistent for '
1113 b'store replacement complete; repository was inconsistent for '
1113 b'%0.1fs\n'
1114 b'%0.1fs\n'
1114 )
1115 )
1115 % elapsed
1116 % elapsed
1116 )
1117 )
1117
1118
1118 # We first write the requirements file. Any new requirements will lock
1119 # We first write the requirements file. Any new requirements will lock
1119 # out legacy clients.
1120 # out legacy clients.
1120 ui.status(
1121 ui.status(
1121 _(
1122 _(
1122 b'finalizing requirements file and making repository readable '
1123 b'finalizing requirements file and making repository readable '
1123 b'again\n'
1124 b'again\n'
1124 )
1125 )
1125 )
1126 )
1126 scmutil.writereporequirements(srcrepo, requirements)
1127 scmutil.writereporequirements(srcrepo, requirements)
1127
1128
1128 # The lock file from the old store won't be removed because nothing has a
1129 # The lock file from the old store won't be removed because nothing has a
1129 # reference to its new location. So clean it up manually. Alternatively, we
1130 # reference to its new location. So clean it up manually. Alternatively, we
1130 # could update srcrepo.svfs and other variables to point to the new
1131 # could update srcrepo.svfs and other variables to point to the new
1131 # location. This is simpler.
1132 # location. This is simpler.
1132 backupvfs.unlink(b'store/lock')
1133 backupvfs.unlink(b'store/lock')
1133
1134
1134 return backuppath
1135 return backuppath
1135
1136
1136
1137
1137 def upgraderepo(
1138 def upgraderepo(
1138 ui,
1139 ui,
1139 repo,
1140 repo,
1140 run=False,
1141 run=False,
1141 optimize=None,
1142 optimize=None,
1142 backup=True,
1143 backup=True,
1143 manifest=None,
1144 manifest=None,
1144 changelog=None,
1145 changelog=None,
1145 ):
1146 ):
1146 """Upgrade a repository in place."""
1147 """Upgrade a repository in place."""
1147 if optimize is None:
1148 if optimize is None:
1148 optimize = []
1149 optimize = []
1149 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1150 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1150 repo = repo.unfiltered()
1151 repo = repo.unfiltered()
1151
1152
1152 revlogs = set(UPGRADE_ALL_REVLOGS)
1153 revlogs = set(UPGRADE_ALL_REVLOGS)
1153 specentries = ((b'c', changelog), (b'm', manifest))
1154 specentries = ((b'c', changelog), (b'm', manifest))
1154 specified = [(y, x) for (y, x) in specentries if x is not None]
1155 specified = [(y, x) for (y, x) in specentries if x is not None]
1155 if specified:
1156 if specified:
1156 # we have some limitation on revlogs to be recloned
1157 # we have some limitation on revlogs to be recloned
1157 if any(x for y, x in specified):
1158 if any(x for y, x in specified):
1158 revlogs = set()
1159 revlogs = set()
1159 for r, enabled in specified:
1160 for r, enabled in specified:
1160 if enabled:
1161 if enabled:
1161 if r == b'c':
1162 if r == b'c':
1162 revlogs.add(UPGRADE_CHANGELOG)
1163 revlogs.add(UPGRADE_CHANGELOG)
1163 elif r == b'm':
1164 elif r == b'm':
1164 revlogs.add(UPGRADE_MANIFEST)
1165 revlogs.add(UPGRADE_MANIFEST)
1165 else:
1166 else:
1166 # none are enabled
1167 # none are enabled
1167 for r, __ in specified:
1168 for r, __ in specified:
1168 if r == b'c':
1169 if r == b'c':
1169 revlogs.discard(UPGRADE_CHANGELOG)
1170 revlogs.discard(UPGRADE_CHANGELOG)
1170 elif r == b'm':
1171 elif r == b'm':
1171 revlogs.discard(UPGRADE_MANIFEST)
1172 revlogs.discard(UPGRADE_MANIFEST)
1172
1173
1173 # Ensure the repository can be upgraded.
1174 # Ensure the repository can be upgraded.
1174 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1175 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1175 if missingreqs:
1176 if missingreqs:
1176 raise error.Abort(
1177 raise error.Abort(
1177 _(b'cannot upgrade repository; requirement missing: %s')
1178 _(b'cannot upgrade repository; requirement missing: %s')
1178 % _(b', ').join(sorted(missingreqs))
1179 % _(b', ').join(sorted(missingreqs))
1179 )
1180 )
1180
1181
1181 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1182 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1182 if blockedreqs:
1183 if blockedreqs:
1183 raise error.Abort(
1184 raise error.Abort(
1184 _(
1185 _(
1185 b'cannot upgrade repository; unsupported source '
1186 b'cannot upgrade repository; unsupported source '
1186 b'requirement: %s'
1187 b'requirement: %s'
1187 )
1188 )
1188 % _(b', ').join(sorted(blockedreqs))
1189 % _(b', ').join(sorted(blockedreqs))
1189 )
1190 )
1190
1191
1191 # FUTURE there is potentially a need to control the wanted requirements via
1192 # FUTURE there is potentially a need to control the wanted requirements via
1192 # command arguments or via an extension hook point.
1193 # command arguments or via an extension hook point.
1193 newreqs = localrepo.newreporequirements(
1194 newreqs = localrepo.newreporequirements(
1194 repo.ui, localrepo.defaultcreateopts(repo.ui)
1195 repo.ui, localrepo.defaultcreateopts(repo.ui)
1195 )
1196 )
1196 newreqs.update(preservedrequirements(repo))
1197 newreqs.update(preservedrequirements(repo))
1197
1198
1198 noremovereqs = (
1199 noremovereqs = (
1199 repo.requirements - newreqs - supportremovedrequirements(repo)
1200 repo.requirements - newreqs - supportremovedrequirements(repo)
1200 )
1201 )
1201 if noremovereqs:
1202 if noremovereqs:
1202 raise error.Abort(
1203 raise error.Abort(
1203 _(
1204 _(
1204 b'cannot upgrade repository; requirement would be '
1205 b'cannot upgrade repository; requirement would be '
1205 b'removed: %s'
1206 b'removed: %s'
1206 )
1207 )
1207 % _(b', ').join(sorted(noremovereqs))
1208 % _(b', ').join(sorted(noremovereqs))
1208 )
1209 )
1209
1210
1210 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1211 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1211 if noaddreqs:
1212 if noaddreqs:
1212 raise error.Abort(
1213 raise error.Abort(
1213 _(
1214 _(
1214 b'cannot upgrade repository; do not support adding '
1215 b'cannot upgrade repository; do not support adding '
1215 b'requirement: %s'
1216 b'requirement: %s'
1216 )
1217 )
1217 % _(b', ').join(sorted(noaddreqs))
1218 % _(b', ').join(sorted(noaddreqs))
1218 )
1219 )
1219
1220
1220 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1221 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1221 if unsupportedreqs:
1222 if unsupportedreqs:
1222 raise error.Abort(
1223 raise error.Abort(
1223 _(
1224 _(
1224 b'cannot upgrade repository; do not support '
1225 b'cannot upgrade repository; do not support '
1225 b'destination requirement: %s'
1226 b'destination requirement: %s'
1226 )
1227 )
1227 % _(b', ').join(sorted(unsupportedreqs))
1228 % _(b', ').join(sorted(unsupportedreqs))
1228 )
1229 )
1229
1230
1230 # Find and validate all improvements that can be made.
1231 # Find and validate all improvements that can be made.
1231 alloptimizations = findoptimizations(repo)
1232 alloptimizations = findoptimizations(repo)
1232
1233
1233 # Apply and Validate arguments.
1234 # Apply and Validate arguments.
1234 optimizations = []
1235 optimizations = []
1235 for o in alloptimizations:
1236 for o in alloptimizations:
1236 if o.name in optimize:
1237 if o.name in optimize:
1237 optimizations.append(o)
1238 optimizations.append(o)
1238 optimize.discard(o.name)
1239 optimize.discard(o.name)
1239
1240
1240 if optimize: # anything left is unknown
1241 if optimize: # anything left is unknown
1241 raise error.Abort(
1242 raise error.Abort(
1242 _(b'unknown optimization action requested: %s')
1243 _(b'unknown optimization action requested: %s')
1243 % b', '.join(sorted(optimize)),
1244 % b', '.join(sorted(optimize)),
1244 hint=_(b'run without arguments to see valid optimizations'),
1245 hint=_(b'run without arguments to see valid optimizations'),
1245 )
1246 )
1246
1247
1247 deficiencies = finddeficiencies(repo)
1248 deficiencies = finddeficiencies(repo)
1248 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1249 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1249 actions.extend(
1250 actions.extend(
1250 o
1251 o
1251 for o in sorted(optimizations)
1252 for o in sorted(optimizations)
1252 # determineactions could have added optimisation
1253 # determineactions could have added optimisation
1253 if o not in actions
1254 if o not in actions
1254 )
1255 )
1255
1256
1256 removedreqs = repo.requirements - newreqs
1257 removedreqs = repo.requirements - newreqs
1257 addedreqs = newreqs - repo.requirements
1258 addedreqs = newreqs - repo.requirements
1258
1259
1259 if revlogs != UPGRADE_ALL_REVLOGS:
1260 if revlogs != UPGRADE_ALL_REVLOGS:
1260 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1261 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1261 if incompatible:
1262 if incompatible:
1262 msg = _(
1263 msg = _(
1263 b'ignoring revlogs selection flags, format requirements '
1264 b'ignoring revlogs selection flags, format requirements '
1264 b'change: %s\n'
1265 b'change: %s\n'
1265 )
1266 )
1266 ui.warn(msg % b', '.join(sorted(incompatible)))
1267 ui.warn(msg % b', '.join(sorted(incompatible)))
1267 revlogs = UPGRADE_ALL_REVLOGS
1268 revlogs = UPGRADE_ALL_REVLOGS
1268
1269
1269 def write_labeled(l, label):
1270 def write_labeled(l, label):
1270 first = True
1271 first = True
1271 for r in sorted(l):
1272 for r in sorted(l):
1272 if not first:
1273 if not first:
1273 ui.write(b', ')
1274 ui.write(b', ')
1274 ui.write(r, label=label)
1275 ui.write(r, label=label)
1275 first = False
1276 first = False
1276
1277
1277 def printrequirements():
1278 def printrequirements():
1278 ui.write(_(b'requirements\n'))
1279 ui.write(_(b'requirements\n'))
1279 ui.write(_(b' preserved: '))
1280 ui.write(_(b' preserved: '))
1280 write_labeled(
1281 write_labeled(
1281 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1282 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1282 )
1283 )
1283 ui.write((b'\n'))
1284 ui.write((b'\n'))
1284 removed = repo.requirements - newreqs
1285 removed = repo.requirements - newreqs
1285 if repo.requirements - newreqs:
1286 if repo.requirements - newreqs:
1286 ui.write(_(b' removed: '))
1287 ui.write(_(b' removed: '))
1287 write_labeled(removed, "upgrade-repo.requirement.removed")
1288 write_labeled(removed, "upgrade-repo.requirement.removed")
1288 ui.write((b'\n'))
1289 ui.write((b'\n'))
1289 added = newreqs - repo.requirements
1290 added = newreqs - repo.requirements
1290 if added:
1291 if added:
1291 ui.write(_(b' added: '))
1292 ui.write(_(b' added: '))
1292 write_labeled(added, "upgrade-repo.requirement.added")
1293 write_labeled(added, "upgrade-repo.requirement.added")
1293 ui.write((b'\n'))
1294 ui.write((b'\n'))
1294 ui.write(b'\n')
1295 ui.write(b'\n')
1295
1296
1296 def printoptimisations():
1297 def printoptimisations():
1297 optimisations = [a for a in actions if a.type == optimisation]
1298 optimisations = [a for a in actions if a.type == optimisation]
1298 optimisations.sort(key=lambda a: a.name)
1299 optimisations.sort(key=lambda a: a.name)
1299 if optimisations:
1300 if optimisations:
1300 ui.write(_(b'optimisations: '))
1301 ui.write(_(b'optimisations: '))
1301 write_labeled(
1302 write_labeled(
1302 [a.name for a in optimisations],
1303 [a.name for a in optimisations],
1303 "upgrade-repo.optimisation.performed",
1304 "upgrade-repo.optimisation.performed",
1304 )
1305 )
1305 ui.write(b'\n\n')
1306 ui.write(b'\n\n')
1306
1307
1307 def printupgradeactions():
1308 def printupgradeactions():
1308 for a in actions:
1309 for a in actions:
1309 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1310 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1310
1311
1311 if not run:
1312 if not run:
1312 fromconfig = []
1313 fromconfig = []
1313 onlydefault = []
1314 onlydefault = []
1314
1315
1315 for d in deficiencies:
1316 for d in deficiencies:
1316 if d.fromconfig(repo):
1317 if d.fromconfig(repo):
1317 fromconfig.append(d)
1318 fromconfig.append(d)
1318 elif d.default:
1319 elif d.default:
1319 onlydefault.append(d)
1320 onlydefault.append(d)
1320
1321
1321 if fromconfig or onlydefault:
1322 if fromconfig or onlydefault:
1322
1323
1323 if fromconfig:
1324 if fromconfig:
1324 ui.status(
1325 ui.status(
1325 _(
1326 _(
1326 b'repository lacks features recommended by '
1327 b'repository lacks features recommended by '
1327 b'current config options:\n\n'
1328 b'current config options:\n\n'
1328 )
1329 )
1329 )
1330 )
1330 for i in fromconfig:
1331 for i in fromconfig:
1331 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1332 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1332
1333
1333 if onlydefault:
1334 if onlydefault:
1334 ui.status(
1335 ui.status(
1335 _(
1336 _(
1336 b'repository lacks features used by the default '
1337 b'repository lacks features used by the default '
1337 b'config options:\n\n'
1338 b'config options:\n\n'
1338 )
1339 )
1339 )
1340 )
1340 for i in onlydefault:
1341 for i in onlydefault:
1341 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1342 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1342
1343
1343 ui.status(b'\n')
1344 ui.status(b'\n')
1344 else:
1345 else:
1345 ui.status(
1346 ui.status(
1346 _(
1347 _(
1347 b'(no feature deficiencies found in existing '
1348 b'(no feature deficiencies found in existing '
1348 b'repository)\n'
1349 b'repository)\n'
1349 )
1350 )
1350 )
1351 )
1351
1352
1352 ui.status(
1353 ui.status(
1353 _(
1354 _(
1354 b'performing an upgrade with "--run" will make the following '
1355 b'performing an upgrade with "--run" will make the following '
1355 b'changes:\n\n'
1356 b'changes:\n\n'
1356 )
1357 )
1357 )
1358 )
1358
1359
1359 printrequirements()
1360 printrequirements()
1360 printoptimisations()
1361 printoptimisations()
1361 printupgradeactions()
1362 printupgradeactions()
1362
1363
1363 unusedoptimize = [i for i in alloptimizations if i not in actions]
1364 unusedoptimize = [i for i in alloptimizations if i not in actions]
1364
1365
1365 if unusedoptimize:
1366 if unusedoptimize:
1366 ui.status(
1367 ui.status(
1367 _(
1368 _(
1368 b'additional optimizations are available by specifying '
1369 b'additional optimizations are available by specifying '
1369 b'"--optimize <name>":\n\n'
1370 b'"--optimize <name>":\n\n'
1370 )
1371 )
1371 )
1372 )
1372 for i in unusedoptimize:
1373 for i in unusedoptimize:
1373 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1374 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1374 return
1375 return
1375
1376
1376 # Else we're in the run=true case.
1377 # Else we're in the run=true case.
1377 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1378 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1378 printrequirements()
1379 printrequirements()
1379 printoptimisations()
1380 printoptimisations()
1380 printupgradeactions()
1381 printupgradeactions()
1381
1382
1382 upgradeactions = [a.name for a in actions]
1383 upgradeactions = [a.name for a in actions]
1383
1384
1384 ui.status(_(b'beginning upgrade...\n'))
1385 ui.status(_(b'beginning upgrade...\n'))
1385 with repo.wlock(), repo.lock():
1386 with repo.wlock(), repo.lock():
1386 ui.status(_(b'repository locked and read-only\n'))
1387 ui.status(_(b'repository locked and read-only\n'))
1387 # Our strategy for upgrading the repository is to create a new,
1388 # Our strategy for upgrading the repository is to create a new,
1388 # temporary repository, write data to it, then do a swap of the
1389 # temporary repository, write data to it, then do a swap of the
1389 # data. There are less heavyweight ways to do this, but it is easier
1390 # data. There are less heavyweight ways to do this, but it is easier
1390 # to create a new repo object than to instantiate all the components
1391 # to create a new repo object than to instantiate all the components
1391 # (like the store) separately.
1392 # (like the store) separately.
1392 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1393 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1393 backuppath = None
1394 backuppath = None
1394 try:
1395 try:
1395 ui.status(
1396 ui.status(
1396 _(
1397 _(
1397 b'creating temporary repository to stage migrated '
1398 b'creating temporary repository to stage migrated '
1398 b'data: %s\n'
1399 b'data: %s\n'
1399 )
1400 )
1400 % tmppath
1401 % tmppath
1401 )
1402 )
1402
1403
1403 # clone ui without using ui.copy because repo.ui is protected
1404 # clone ui without using ui.copy because repo.ui is protected
1404 repoui = repo.ui.__class__(repo.ui)
1405 repoui = repo.ui.__class__(repo.ui)
1405 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1406 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1406
1407
1407 with dstrepo.wlock(), dstrepo.lock():
1408 with dstrepo.wlock(), dstrepo.lock():
1408 backuppath = _upgraderepo(
1409 backuppath = _upgraderepo(
1409 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1410 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1410 )
1411 )
1411 if not (backup or backuppath is None):
1412 if not (backup or backuppath is None):
1412 ui.status(
1413 ui.status(
1413 _(b'removing old repository content%s\n') % backuppath
1414 _(b'removing old repository content%s\n') % backuppath
1414 )
1415 )
1415 repo.vfs.rmtree(backuppath, forcibly=True)
1416 repo.vfs.rmtree(backuppath, forcibly=True)
1416 backuppath = None
1417 backuppath = None
1417
1418
1418 finally:
1419 finally:
1419 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1420 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1420 repo.vfs.rmtree(tmppath, forcibly=True)
1421 repo.vfs.rmtree(tmppath, forcibly=True)
1421
1422
1422 if backuppath and not ui.quiet:
1423 if backuppath and not ui.quiet:
1423 ui.warn(
1424 ui.warn(
1424 _(b'copy of old repository backed up at %s\n') % backuppath
1425 _(b'copy of old repository backed up at %s\n') % backuppath
1425 )
1426 )
1426 ui.warn(
1427 ui.warn(
1427 _(
1428 _(
1428 b'the old repository will not be deleted; remove '
1429 b'the old repository will not be deleted; remove '
1429 b'it to free up disk space once the upgraded '
1430 b'it to free up disk space once the upgraded '
1430 b'repository is verified\n'
1431 b'repository is verified\n'
1431 )
1432 )
1432 )
1433 )
@@ -1,189 +1,221 b''
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > [format]
6 > [format]
7 > exp-share-safe = True
7 > exp-share-safe = True
8 > EOF
8 > EOF
9
9
10 prepare source repo
10 prepare source repo
11
11
12 $ hg init source
12 $ hg init source
13 $ cd source
13 $ cd source
14 $ cat .hg/requires
14 $ cat .hg/requires
15 exp-sharesafe
15 exp-sharesafe
16 $ cat .hg/store/requires
16 $ cat .hg/store/requires
17 dotencode
17 dotencode
18 fncache
18 fncache
19 generaldelta
19 generaldelta
20 revlogv1
20 revlogv1
21 sparserevlog
21 sparserevlog
22 store
22 store
23 $ hg debugrequirements
23 $ hg debugrequirements
24 dotencode
24 dotencode
25 exp-sharesafe
25 exp-sharesafe
26 fncache
26 fncache
27 generaldelta
27 generaldelta
28 revlogv1
28 revlogv1
29 sparserevlog
29 sparserevlog
30 store
30 store
31
31
32 $ echo a > a
32 $ echo a > a
33 $ hg ci -Aqm "added a"
33 $ hg ci -Aqm "added a"
34 $ echo b > b
34 $ echo b > b
35 $ hg ci -Aqm "added b"
35 $ hg ci -Aqm "added b"
36
36
37 $ HGEDITOR=cat hg config --shared
37 $ HGEDITOR=cat hg config --shared
38 abort: repository is not shared; can't use --shared
38 abort: repository is not shared; can't use --shared
39 [255]
39 [255]
40 $ cd ..
40 $ cd ..
41
41
42 Create a shared repo and check the requirements are shared and read correctly
42 Create a shared repo and check the requirements are shared and read correctly
43 $ hg share source shared1
43 $ hg share source shared1
44 updating working directory
44 updating working directory
45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 $ cd shared1
46 $ cd shared1
47 $ cat .hg/requires
47 $ cat .hg/requires
48 exp-sharesafe
48 exp-sharesafe
49 shared
49 shared
50
50
51 $ hg debugrequirements -R ../source
51 $ hg debugrequirements -R ../source
52 dotencode
52 dotencode
53 exp-sharesafe
53 exp-sharesafe
54 fncache
54 fncache
55 generaldelta
55 generaldelta
56 revlogv1
56 revlogv1
57 sparserevlog
57 sparserevlog
58 store
58 store
59
59
60 $ hg debugrequirements
60 $ hg debugrequirements
61 dotencode
61 dotencode
62 exp-sharesafe
62 exp-sharesafe
63 fncache
63 fncache
64 generaldelta
64 generaldelta
65 revlogv1
65 revlogv1
66 shared
66 shared
67 sparserevlog
67 sparserevlog
68 store
68 store
69
69
70 $ echo c > c
70 $ echo c > c
71 $ hg ci -Aqm "added c"
71 $ hg ci -Aqm "added c"
72
72
73 Check that config of the source repository is also loaded
73 Check that config of the source repository is also loaded
74
74
75 $ hg showconfig ui.curses
75 $ hg showconfig ui.curses
76 [1]
76 [1]
77
77
78 $ echo "[ui]" >> ../source/.hg/hgrc
78 $ echo "[ui]" >> ../source/.hg/hgrc
79 $ echo "curses=true" >> ../source/.hg/hgrc
79 $ echo "curses=true" >> ../source/.hg/hgrc
80
80
81 $ hg showconfig ui.curses
81 $ hg showconfig ui.curses
82 true
82 true
83
83
84 However, local .hg/hgrc should override the config set by share source
84 However, local .hg/hgrc should override the config set by share source
85
85
86 $ echo "[ui]" >> .hg/hgrc
86 $ echo "[ui]" >> .hg/hgrc
87 $ echo "curses=false" >> .hg/hgrc
87 $ echo "curses=false" >> .hg/hgrc
88
88
89 $ hg showconfig ui.curses
89 $ hg showconfig ui.curses
90 false
90 false
91
91
92 $ HGEDITOR=cat hg config --shared
92 $ HGEDITOR=cat hg config --shared
93 [ui]
93 [ui]
94 curses=true
94 curses=true
95
95
96 $ HGEDITOR=cat hg config --local
96 $ HGEDITOR=cat hg config --local
97 [ui]
97 [ui]
98 curses=false
98 curses=false
99
99
100 Testing that hooks set in source repository also runs in shared repo
100 Testing that hooks set in source repository also runs in shared repo
101
101
102 $ cd ../source
102 $ cd ../source
103 $ cat <<EOF >> .hg/hgrc
103 $ cat <<EOF >> .hg/hgrc
104 > [extensions]
104 > [extensions]
105 > hooklib=
105 > hooklib=
106 > [hooks]
106 > [hooks]
107 > pretxnchangegroup.reject_merge_commits = \
107 > pretxnchangegroup.reject_merge_commits = \
108 > python:hgext.hooklib.reject_merge_commits.hook
108 > python:hgext.hooklib.reject_merge_commits.hook
109 > EOF
109 > EOF
110
110
111 $ cd ..
111 $ cd ..
112 $ hg clone source cloned
112 $ hg clone source cloned
113 updating to branch default
113 updating to branch default
114 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
115 $ cd cloned
115 $ cd cloned
116 $ hg up 0
116 $ hg up 0
117 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
117 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
118 $ echo bar > bar
118 $ echo bar > bar
119 $ hg ci -Aqm "added bar"
119 $ hg ci -Aqm "added bar"
120 $ hg merge
120 $ hg merge
121 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
121 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 (branch merge, don't forget to commit)
122 (branch merge, don't forget to commit)
123 $ hg ci -m "merge commit"
123 $ hg ci -m "merge commit"
124
124
125 $ hg push ../source
125 $ hg push ../source
126 pushing to ../source
126 pushing to ../source
127 searching for changes
127 searching for changes
128 adding changesets
128 adding changesets
129 adding manifests
129 adding manifests
130 adding file changes
130 adding file changes
131 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
131 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
132 transaction abort!
132 transaction abort!
133 rollback completed
133 rollback completed
134 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
134 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
135 [255]
135 [255]
136
136
137 $ hg push ../shared1
137 $ hg push ../shared1
138 pushing to ../shared1
138 pushing to ../shared1
139 searching for changes
139 searching for changes
140 adding changesets
140 adding changesets
141 adding manifests
141 adding manifests
142 adding file changes
142 adding file changes
143 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
143 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
144 transaction abort!
144 transaction abort!
145 rollback completed
145 rollback completed
146 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
146 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
147 [255]
147 [255]
148
148
149 Test that if share source config is untrusted, we dont read it
149 Test that if share source config is untrusted, we dont read it
150
150
151 $ cd ../shared1
151 $ cd ../shared1
152
152
153 $ cat << EOF > $TESTTMP/untrusted.py
153 $ cat << EOF > $TESTTMP/untrusted.py
154 > from mercurial import scmutil, util
154 > from mercurial import scmutil, util
155 > def uisetup(ui):
155 > def uisetup(ui):
156 > class untrustedui(ui.__class__):
156 > class untrustedui(ui.__class__):
157 > def _trusted(self, fp, f):
157 > def _trusted(self, fp, f):
158 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
158 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
159 > return False
159 > return False
160 > return super(untrustedui, self)._trusted(fp, f)
160 > return super(untrustedui, self)._trusted(fp, f)
161 > ui.__class__ = untrustedui
161 > ui.__class__ = untrustedui
162 > EOF
162 > EOF
163
163
164 $ hg showconfig hooks
164 $ hg showconfig hooks
165 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
165 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
166
166
167 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
167 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
168 [1]
168 [1]
169
169
170 Update the source repository format and check that shared repo works
171
172 $ cd ../source
173 $ echo "[format]" >> .hg/hgrc
174 $ echo "revlog-compression=zstd" >> .hg/hgrc
175
176 $ hg debugupgraderepo --run -q -R ../shared1
177 abort: cannot upgrade repository; unsupported source requirement: shared
178 [255]
179
180 $ hg debugupgraderepo --run -q
181 upgrade will perform the following actions:
182
183 requirements
184 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
185 added: revlog-compression-zstd
186
187 $ hg log -r .
188 changeset: 1:5f6d8a4bf34a
189 user: test
190 date: Thu Jan 01 00:00:00 1970 +0000
191 summary: added b
192
193 Shared one should work
194 $ cd ../shared1
195 $ hg log -r .
196 changeset: 2:155349b645be
197 tag: tip
198 user: test
199 date: Thu Jan 01 00:00:00 1970 +0000
200 summary: added c
201
170 Unsharing works
202 Unsharing works
171
203
172 $ hg unshare
204 $ hg unshare
173
205
174 Test that source config is added to the shared one after unshare, and the config
206 Test that source config is added to the shared one after unshare, and the config
175 of current repo is still respected over the config which came from source config
207 of current repo is still respected over the config which came from source config
176 $ cd ../cloned
208 $ cd ../cloned
177 $ hg push ../shared1
209 $ hg push ../shared1
178 pushing to ../shared1
210 pushing to ../shared1
179 searching for changes
211 searching for changes
180 adding changesets
212 adding changesets
181 adding manifests
213 adding manifests
182 adding file changes
214 adding file changes
183 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
215 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
184 transaction abort!
216 transaction abort!
185 rollback completed
217 rollback completed
186 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
218 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
187 [255]
219 [255]
188 $ hg showconfig ui.curses -R ../shared1
220 $ hg showconfig ui.curses -R ../shared1
189 false
221 false
General Comments 0
You need to be logged in to leave comments. Login now