Show More
@@ -37,11 +37,14 b' from mercurial import (' | |||||
37 | scmutil, |
|
37 | scmutil, | |
38 | smartset, |
|
38 | smartset, | |
39 | subrepo, |
|
39 | subrepo, | |
40 | upgrade, |
|
|||
41 | url as urlmod, |
|
40 | url as urlmod, | |
42 | util, |
|
41 | util, | |
43 | ) |
|
42 | ) | |
44 |
|
43 | |||
|
44 | from mercurial.upgrade_utils import ( | |||
|
45 | actions as upgrade_actions, | |||
|
46 | ) | |||
|
47 | ||||
45 | from . import ( |
|
48 | from . import ( | |
46 | lfcommands, |
|
49 | lfcommands, | |
47 | lfutil, |
|
50 | lfutil, | |
@@ -1837,8 +1840,8 b' def scmutilmarktouched(orig, repo, files' | |||||
1837 | return result |
|
1840 | return result | |
1838 |
|
1841 | |||
1839 |
|
1842 | |||
1840 | @eh.wrapfunction(upgrade, b'preservedrequirements') |
|
1843 | @eh.wrapfunction(upgrade_actions, b'preservedrequirements') | |
1841 | @eh.wrapfunction(upgrade, b'supporteddestrequirements') |
|
1844 | @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements') | |
1842 | def upgraderequirements(orig, repo): |
|
1845 | def upgraderequirements(orig, repo): | |
1843 | reqs = orig(repo) |
|
1846 | reqs = orig(repo) | |
1844 | if b'largefiles' in repo.requirements: |
|
1847 | if b'largefiles' in repo.requirements: |
@@ -28,13 +28,15 b' from mercurial import (' | |||||
28 | pycompat, |
|
28 | pycompat, | |
29 | revlog, |
|
29 | revlog, | |
30 | scmutil, |
|
30 | scmutil, | |
31 | upgrade, |
|
|||
32 | util, |
|
31 | util, | |
33 | vfs as vfsmod, |
|
32 | vfs as vfsmod, | |
34 | wireprotov1server, |
|
33 | wireprotov1server, | |
35 | ) |
|
34 | ) | |
36 |
|
35 | |||
37 |
from mercurial.upgrade_utils import |
|
36 | from mercurial.upgrade_utils import ( | |
|
37 | actions as upgrade_actions, | |||
|
38 | engine as upgrade_engine, | |||
|
39 | ) | |||
38 |
|
40 | |||
39 | from mercurial.interfaces import repository |
|
41 | from mercurial.interfaces import repository | |
40 |
|
42 | |||
@@ -539,8 +541,8 b' def upgradefinishdatamigration(orig, ui,' | |||||
539 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
541 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) | |
540 |
|
542 | |||
541 |
|
543 | |||
542 | @eh.wrapfunction(upgrade, b'preservedrequirements') |
|
544 | @eh.wrapfunction(upgrade_actions, b'preservedrequirements') | |
543 | @eh.wrapfunction(upgrade, b'supporteddestrequirements') |
|
545 | @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements') | |
544 | def upgraderequirements(orig, repo): |
|
546 | def upgraderequirements(orig, repo): | |
545 | reqs = orig(repo) |
|
547 | reqs = orig(repo) | |
546 | if b'lfs' in repo.requirements: |
|
548 | if b'lfs' in repo.requirements: |
This diff has been collapsed as it changes many lines, (696 lines changed) Show them Hide them | |||||
@@ -13,536 +13,14 b' from . import (' | |||||
13 | hg, |
|
13 | hg, | |
14 | localrepo, |
|
14 | localrepo, | |
15 | pycompat, |
|
15 | pycompat, | |
16 | requirements, |
|
|||
17 | util, |
|
|||
18 | ) |
|
16 | ) | |
19 |
|
17 | |||
20 | from .upgrade_utils import ( |
|
18 | from .upgrade_utils import ( | |
|
19 | actions as upgrade_actions, | |||
21 | engine as upgrade_engine, |
|
20 | engine as upgrade_engine, | |
22 | ) |
|
21 | ) | |
23 |
|
22 | |||
24 | from .utils import compression |
|
23 | allformatvariant = upgrade_actions.allformatvariant | |
25 |
|
||||
26 | # list of requirements that request a clone of all revlog if added/removed |
|
|||
27 | RECLONES_REQUIREMENTS = { |
|
|||
28 | b'generaldelta', |
|
|||
29 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
30 | } |
|
|||
31 |
|
||||
32 |
|
||||
33 | def requiredsourcerequirements(repo): |
|
|||
34 | """Obtain requirements required to be present to upgrade a repo. |
|
|||
35 |
|
||||
36 | An upgrade will not be allowed if the repository doesn't have the |
|
|||
37 | requirements returned by this function. |
|
|||
38 | """ |
|
|||
39 | return { |
|
|||
40 | # Introduced in Mercurial 0.9.2. |
|
|||
41 | b'revlogv1', |
|
|||
42 | # Introduced in Mercurial 0.9.2. |
|
|||
43 | b'store', |
|
|||
44 | } |
|
|||
45 |
|
||||
46 |
|
||||
47 | def blocksourcerequirements(repo): |
|
|||
48 | """Obtain requirements that will prevent an upgrade from occurring. |
|
|||
49 |
|
||||
50 | An upgrade cannot be performed if the source repository contains a |
|
|||
51 | requirements in the returned set. |
|
|||
52 | """ |
|
|||
53 | return { |
|
|||
54 | # The upgrade code does not yet support these experimental features. |
|
|||
55 | # This is an artificial limitation. |
|
|||
56 | requirements.TREEMANIFEST_REQUIREMENT, |
|
|||
57 | # This was a precursor to generaldelta and was never enabled by default. |
|
|||
58 | # It should (hopefully) not exist in the wild. |
|
|||
59 | b'parentdelta', |
|
|||
60 | # Upgrade should operate on the actual store, not the shared link. |
|
|||
61 | requirements.SHARED_REQUIREMENT, |
|
|||
62 | } |
|
|||
63 |
|
||||
64 |
|
||||
65 | def supportremovedrequirements(repo): |
|
|||
66 | """Obtain requirements that can be removed during an upgrade. |
|
|||
67 |
|
||||
68 | If an upgrade were to create a repository that dropped a requirement, |
|
|||
69 | the dropped requirement must appear in the returned set for the upgrade |
|
|||
70 | to be allowed. |
|
|||
71 | """ |
|
|||
72 | supported = { |
|
|||
73 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
74 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
75 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
76 | requirements.NODEMAP_REQUIREMENT, |
|
|||
77 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
78 | } |
|
|||
79 | for name in compression.compengines: |
|
|||
80 | engine = compression.compengines[name] |
|
|||
81 | if engine.available() and engine.revlogheader(): |
|
|||
82 | supported.add(b'exp-compression-%s' % name) |
|
|||
83 | if engine.name() == b'zstd': |
|
|||
84 | supported.add(b'revlog-compression-zstd') |
|
|||
85 | return supported |
|
|||
86 |
|
||||
87 |
|
||||
88 | def supporteddestrequirements(repo): |
|
|||
89 | """Obtain requirements that upgrade supports in the destination. |
|
|||
90 |
|
||||
91 | If the result of the upgrade would create requirements not in this set, |
|
|||
92 | the upgrade is disallowed. |
|
|||
93 |
|
||||
94 | Extensions should monkeypatch this to add their custom requirements. |
|
|||
95 | """ |
|
|||
96 | supported = { |
|
|||
97 | b'dotencode', |
|
|||
98 | b'fncache', |
|
|||
99 | b'generaldelta', |
|
|||
100 | b'revlogv1', |
|
|||
101 | b'store', |
|
|||
102 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
103 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
104 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
105 | requirements.NODEMAP_REQUIREMENT, |
|
|||
106 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
107 | } |
|
|||
108 | for name in compression.compengines: |
|
|||
109 | engine = compression.compengines[name] |
|
|||
110 | if engine.available() and engine.revlogheader(): |
|
|||
111 | supported.add(b'exp-compression-%s' % name) |
|
|||
112 | if engine.name() == b'zstd': |
|
|||
113 | supported.add(b'revlog-compression-zstd') |
|
|||
114 | return supported |
|
|||
115 |
|
||||
116 |
|
||||
117 | def allowednewrequirements(repo): |
|
|||
118 | """Obtain requirements that can be added to a repository during upgrade. |
|
|||
119 |
|
||||
120 | This is used to disallow proposed requirements from being added when |
|
|||
121 | they weren't present before. |
|
|||
122 |
|
||||
123 | We use a list of allowed requirement additions instead of a list of known |
|
|||
124 | bad additions because the whitelist approach is safer and will prevent |
|
|||
125 | future, unknown requirements from accidentally being added. |
|
|||
126 | """ |
|
|||
127 | supported = { |
|
|||
128 | b'dotencode', |
|
|||
129 | b'fncache', |
|
|||
130 | b'generaldelta', |
|
|||
131 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
132 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
133 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
134 | requirements.NODEMAP_REQUIREMENT, |
|
|||
135 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
136 | } |
|
|||
137 | for name in compression.compengines: |
|
|||
138 | engine = compression.compengines[name] |
|
|||
139 | if engine.available() and engine.revlogheader(): |
|
|||
140 | supported.add(b'exp-compression-%s' % name) |
|
|||
141 | if engine.name() == b'zstd': |
|
|||
142 | supported.add(b'revlog-compression-zstd') |
|
|||
143 | return supported |
|
|||
144 |
|
||||
145 |
|
||||
146 | def preservedrequirements(repo): |
|
|||
147 | return set() |
|
|||
148 |
|
||||
149 |
|
||||
150 | DEFICIENCY = b'deficiency' |
|
|||
151 | OPTIMISATION = b'optimization' |
|
|||
152 |
|
||||
153 |
|
||||
154 | class improvement(object): |
|
|||
155 | """Represents an improvement that can be made as part of an upgrade. |
|
|||
156 |
|
||||
157 | The following attributes are defined on each instance: |
|
|||
158 |
|
||||
159 | name |
|
|||
160 | Machine-readable string uniquely identifying this improvement. It |
|
|||
161 | will be mapped to an action later in the upgrade process. |
|
|||
162 |
|
||||
163 | type |
|
|||
164 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious |
|
|||
165 | problem. An optimization is an action (sometimes optional) that |
|
|||
166 | can be taken to further improve the state of the repository. |
|
|||
167 |
|
||||
168 | description |
|
|||
169 | Message intended for humans explaining the improvement in more detail, |
|
|||
170 | including the implications of it. For ``DEFICIENCY`` types, should be |
|
|||
171 | worded in the present tense. For ``OPTIMISATION`` types, should be |
|
|||
172 | worded in the future tense. |
|
|||
173 |
|
||||
174 | upgrademessage |
|
|||
175 | Message intended for humans explaining what an upgrade addressing this |
|
|||
176 | issue will do. Should be worded in the future tense. |
|
|||
177 | """ |
|
|||
178 |
|
||||
179 | def __init__(self, name, type, description, upgrademessage): |
|
|||
180 | self.name = name |
|
|||
181 | self.type = type |
|
|||
182 | self.description = description |
|
|||
183 | self.upgrademessage = upgrademessage |
|
|||
184 |
|
||||
185 | def __eq__(self, other): |
|
|||
186 | if not isinstance(other, improvement): |
|
|||
187 | # This is what python tell use to do |
|
|||
188 | return NotImplemented |
|
|||
189 | return self.name == other.name |
|
|||
190 |
|
||||
191 | def __ne__(self, other): |
|
|||
192 | return not (self == other) |
|
|||
193 |
|
||||
194 | def __hash__(self): |
|
|||
195 | return hash(self.name) |
|
|||
196 |
|
||||
197 |
|
||||
198 | allformatvariant = [] |
|
|||
199 |
|
||||
200 |
|
||||
201 | def registerformatvariant(cls): |
|
|||
202 | allformatvariant.append(cls) |
|
|||
203 | return cls |
|
|||
204 |
|
||||
205 |
|
||||
206 | class formatvariant(improvement): |
|
|||
207 | """an improvement subclass dedicated to repository format""" |
|
|||
208 |
|
||||
209 | type = DEFICIENCY |
|
|||
210 | ### The following attributes should be defined for each class: |
|
|||
211 |
|
||||
212 | # machine-readable string uniquely identifying this improvement. it will be |
|
|||
213 | # mapped to an action later in the upgrade process. |
|
|||
214 | name = None |
|
|||
215 |
|
||||
216 | # message intended for humans explaining the improvement in more detail, |
|
|||
217 | # including the implications of it ``DEFICIENCY`` types, should be worded |
|
|||
218 | # in the present tense. |
|
|||
219 | description = None |
|
|||
220 |
|
||||
221 | # message intended for humans explaining what an upgrade addressing this |
|
|||
222 | # issue will do. should be worded in the future tense. |
|
|||
223 | upgrademessage = None |
|
|||
224 |
|
||||
225 | # value of current Mercurial default for new repository |
|
|||
226 | default = None |
|
|||
227 |
|
||||
228 | def __init__(self): |
|
|||
229 | raise NotImplementedError() |
|
|||
230 |
|
||||
231 | @staticmethod |
|
|||
232 | def fromrepo(repo): |
|
|||
233 | """current value of the variant in the repository""" |
|
|||
234 | raise NotImplementedError() |
|
|||
235 |
|
||||
236 | @staticmethod |
|
|||
237 | def fromconfig(repo): |
|
|||
238 | """current value of the variant in the configuration""" |
|
|||
239 | raise NotImplementedError() |
|
|||
240 |
|
||||
241 |
|
||||
242 | class requirementformatvariant(formatvariant): |
|
|||
243 | """formatvariant based on a 'requirement' name. |
|
|||
244 |
|
||||
245 | Many format variant are controlled by a 'requirement'. We define a small |
|
|||
246 | subclass to factor the code. |
|
|||
247 | """ |
|
|||
248 |
|
||||
249 | # the requirement that control this format variant |
|
|||
250 | _requirement = None |
|
|||
251 |
|
||||
252 | @staticmethod |
|
|||
253 | def _newreporequirements(ui): |
|
|||
254 | return localrepo.newreporequirements( |
|
|||
255 | ui, localrepo.defaultcreateopts(ui) |
|
|||
256 | ) |
|
|||
257 |
|
||||
258 | @classmethod |
|
|||
259 | def fromrepo(cls, repo): |
|
|||
260 | assert cls._requirement is not None |
|
|||
261 | return cls._requirement in repo.requirements |
|
|||
262 |
|
||||
263 | @classmethod |
|
|||
264 | def fromconfig(cls, repo): |
|
|||
265 | assert cls._requirement is not None |
|
|||
266 | return cls._requirement in cls._newreporequirements(repo.ui) |
|
|||
267 |
|
||||
268 |
|
||||
269 | @registerformatvariant |
|
|||
270 | class fncache(requirementformatvariant): |
|
|||
271 | name = b'fncache' |
|
|||
272 |
|
||||
273 | _requirement = b'fncache' |
|
|||
274 |
|
||||
275 | default = True |
|
|||
276 |
|
||||
277 | description = _( |
|
|||
278 | b'long and reserved filenames may not work correctly; ' |
|
|||
279 | b'repository performance is sub-optimal' |
|
|||
280 | ) |
|
|||
281 |
|
||||
282 | upgrademessage = _( |
|
|||
283 | b'repository will be more resilient to storing ' |
|
|||
284 | b'certain paths and performance of certain ' |
|
|||
285 | b'operations should be improved' |
|
|||
286 | ) |
|
|||
287 |
|
||||
288 |
|
||||
289 | @registerformatvariant |
|
|||
290 | class dotencode(requirementformatvariant): |
|
|||
291 | name = b'dotencode' |
|
|||
292 |
|
||||
293 | _requirement = b'dotencode' |
|
|||
294 |
|
||||
295 | default = True |
|
|||
296 |
|
||||
297 | description = _( |
|
|||
298 | b'storage of filenames beginning with a period or ' |
|
|||
299 | b'space may not work correctly' |
|
|||
300 | ) |
|
|||
301 |
|
||||
302 | upgrademessage = _( |
|
|||
303 | b'repository will be better able to store files ' |
|
|||
304 | b'beginning with a space or period' |
|
|||
305 | ) |
|
|||
306 |
|
||||
307 |
|
||||
308 | @registerformatvariant |
|
|||
309 | class generaldelta(requirementformatvariant): |
|
|||
310 | name = b'generaldelta' |
|
|||
311 |
|
||||
312 | _requirement = b'generaldelta' |
|
|||
313 |
|
||||
314 | default = True |
|
|||
315 |
|
||||
316 | description = _( |
|
|||
317 | b'deltas within internal storage are unable to ' |
|
|||
318 | b'choose optimal revisions; repository is larger and ' |
|
|||
319 | b'slower than it could be; interaction with other ' |
|
|||
320 | b'repositories may require extra network and CPU ' |
|
|||
321 | b'resources, making "hg push" and "hg pull" slower' |
|
|||
322 | ) |
|
|||
323 |
|
||||
324 | upgrademessage = _( |
|
|||
325 | b'repository storage will be able to create ' |
|
|||
326 | b'optimal deltas; new repository data will be ' |
|
|||
327 | b'smaller and read times should decrease; ' |
|
|||
328 | b'interacting with other repositories using this ' |
|
|||
329 | b'storage model should require less network and ' |
|
|||
330 | b'CPU resources, making "hg push" and "hg pull" ' |
|
|||
331 | b'faster' |
|
|||
332 | ) |
|
|||
333 |
|
||||
334 |
|
||||
335 | @registerformatvariant |
|
|||
336 | class sharedsafe(requirementformatvariant): |
|
|||
337 | name = b'exp-sharesafe' |
|
|||
338 | _requirement = requirements.SHARESAFE_REQUIREMENT |
|
|||
339 |
|
||||
340 | default = False |
|
|||
341 |
|
||||
342 | description = _( |
|
|||
343 | b'old shared repositories do not share source repository ' |
|
|||
344 | b'requirements and config. This leads to various problems ' |
|
|||
345 | b'when the source repository format is upgraded or some new ' |
|
|||
346 | b'extensions are enabled.' |
|
|||
347 | ) |
|
|||
348 |
|
||||
349 | upgrademessage = _( |
|
|||
350 | b'Upgrades a repository to share-safe format so that future ' |
|
|||
351 | b'shares of this repository share its requirements and configs.' |
|
|||
352 | ) |
|
|||
353 |
|
||||
354 |
|
||||
355 | @registerformatvariant |
|
|||
356 | class sparserevlog(requirementformatvariant): |
|
|||
357 | name = b'sparserevlog' |
|
|||
358 |
|
||||
359 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT |
|
|||
360 |
|
||||
361 | default = True |
|
|||
362 |
|
||||
363 | description = _( |
|
|||
364 | b'in order to limit disk reading and memory usage on older ' |
|
|||
365 | b'version, the span of a delta chain from its root to its ' |
|
|||
366 | b'end is limited, whatever the relevant data in this span. ' |
|
|||
367 | b'This can severly limit Mercurial ability to build good ' |
|
|||
368 | b'chain of delta resulting is much more storage space being ' |
|
|||
369 | b'taken and limit reusability of on disk delta during ' |
|
|||
370 | b'exchange.' |
|
|||
371 | ) |
|
|||
372 |
|
||||
373 | upgrademessage = _( |
|
|||
374 | b'Revlog supports delta chain with more unused data ' |
|
|||
375 | b'between payload. These gaps will be skipped at read ' |
|
|||
376 | b'time. This allows for better delta chains, making a ' |
|
|||
377 | b'better compression and faster exchange with server.' |
|
|||
378 | ) |
|
|||
379 |
|
||||
380 |
|
||||
381 | @registerformatvariant |
|
|||
382 | class sidedata(requirementformatvariant): |
|
|||
383 | name = b'sidedata' |
|
|||
384 |
|
||||
385 | _requirement = requirements.SIDEDATA_REQUIREMENT |
|
|||
386 |
|
||||
387 | default = False |
|
|||
388 |
|
||||
389 | description = _( |
|
|||
390 | b'Allows storage of extra data alongside a revision, ' |
|
|||
391 | b'unlocking various caching options.' |
|
|||
392 | ) |
|
|||
393 |
|
||||
394 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') |
|
|||
395 |
|
||||
396 |
|
||||
397 | @registerformatvariant |
|
|||
398 | class persistentnodemap(requirementformatvariant): |
|
|||
399 | name = b'persistent-nodemap' |
|
|||
400 |
|
||||
401 | _requirement = requirements.NODEMAP_REQUIREMENT |
|
|||
402 |
|
||||
403 | default = False |
|
|||
404 |
|
||||
405 | description = _( |
|
|||
406 | b'persist the node -> rev mapping on disk to speedup lookup' |
|
|||
407 | ) |
|
|||
408 |
|
||||
409 | upgrademessage = _(b'Speedup revision lookup by node id.') |
|
|||
410 |
|
||||
411 |
|
||||
412 | @registerformatvariant |
|
|||
413 | class copiessdc(requirementformatvariant): |
|
|||
414 | name = b'copies-sdc' |
|
|||
415 |
|
||||
416 | _requirement = requirements.COPIESSDC_REQUIREMENT |
|
|||
417 |
|
||||
418 | default = False |
|
|||
419 |
|
||||
420 | description = _(b'Stores copies information alongside changesets.') |
|
|||
421 |
|
||||
422 | upgrademessage = _( |
|
|||
423 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' |
|
|||
424 | ) |
|
|||
425 |
|
||||
426 |
|
||||
427 | @registerformatvariant |
|
|||
428 | class removecldeltachain(formatvariant): |
|
|||
429 | name = b'plain-cl-delta' |
|
|||
430 |
|
||||
431 | default = True |
|
|||
432 |
|
||||
433 | description = _( |
|
|||
434 | b'changelog storage is using deltas instead of ' |
|
|||
435 | b'raw entries; changelog reading and any ' |
|
|||
436 | b'operation relying on changelog data are slower ' |
|
|||
437 | b'than they could be' |
|
|||
438 | ) |
|
|||
439 |
|
||||
440 | upgrademessage = _( |
|
|||
441 | b'changelog storage will be reformated to ' |
|
|||
442 | b'store raw entries; changelog reading will be ' |
|
|||
443 | b'faster; changelog size may be reduced' |
|
|||
444 | ) |
|
|||
445 |
|
||||
446 | @staticmethod |
|
|||
447 | def fromrepo(repo): |
|
|||
448 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
|
|||
449 | # changelogs with deltas. |
|
|||
450 | cl = repo.changelog |
|
|||
451 | chainbase = cl.chainbase |
|
|||
452 | return all(rev == chainbase(rev) for rev in cl) |
|
|||
453 |
|
||||
454 | @staticmethod |
|
|||
455 | def fromconfig(repo): |
|
|||
456 | return True |
|
|||
457 |
|
||||
458 |
|
||||
459 | @registerformatvariant |
|
|||
460 | class compressionengine(formatvariant): |
|
|||
461 | name = b'compression' |
|
|||
462 | default = b'zlib' |
|
|||
463 |
|
||||
464 | description = _( |
|
|||
465 | b'Compresion algorithm used to compress data. ' |
|
|||
466 | b'Some engine are faster than other' |
|
|||
467 | ) |
|
|||
468 |
|
||||
469 | upgrademessage = _( |
|
|||
470 | b'revlog content will be recompressed with the new algorithm.' |
|
|||
471 | ) |
|
|||
472 |
|
||||
473 | @classmethod |
|
|||
474 | def fromrepo(cls, repo): |
|
|||
475 | # we allow multiple compression engine requirement to co-exist because |
|
|||
476 | # strickly speaking, revlog seems to support mixed compression style. |
|
|||
477 | # |
|
|||
478 | # The compression used for new entries will be "the last one" |
|
|||
479 | compression = b'zlib' |
|
|||
480 | for req in repo.requirements: |
|
|||
481 | prefix = req.startswith |
|
|||
482 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): |
|
|||
483 | compression = req.split(b'-', 2)[2] |
|
|||
484 | return compression |
|
|||
485 |
|
||||
486 | @classmethod |
|
|||
487 | def fromconfig(cls, repo): |
|
|||
488 | compengines = repo.ui.configlist(b'format', b'revlog-compression') |
|
|||
489 | # return the first valid value as the selection code would do |
|
|||
490 | for comp in compengines: |
|
|||
491 | if comp in util.compengines: |
|
|||
492 | return comp |
|
|||
493 |
|
||||
494 | # no valide compression found lets display it all for clarity |
|
|||
495 | return b','.join(compengines) |
|
|||
496 |
|
||||
497 |
|
||||
498 | @registerformatvariant |
|
|||
499 | class compressionlevel(formatvariant): |
|
|||
500 | name = b'compression-level' |
|
|||
501 | default = b'default' |
|
|||
502 |
|
||||
503 | description = _(b'compression level') |
|
|||
504 |
|
||||
505 | upgrademessage = _(b'revlog content will be recompressed') |
|
|||
506 |
|
||||
507 | @classmethod |
|
|||
508 | def fromrepo(cls, repo): |
|
|||
509 | comp = compressionengine.fromrepo(repo) |
|
|||
510 | level = None |
|
|||
511 | if comp == b'zlib': |
|
|||
512 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
|||
513 | elif comp == b'zstd': |
|
|||
514 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
|||
515 | if level is None: |
|
|||
516 | return b'default' |
|
|||
517 | return bytes(level) |
|
|||
518 |
|
||||
519 | @classmethod |
|
|||
520 | def fromconfig(cls, repo): |
|
|||
521 | comp = compressionengine.fromconfig(repo) |
|
|||
522 | level = None |
|
|||
523 | if comp == b'zlib': |
|
|||
524 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
|||
525 | elif comp == b'zstd': |
|
|||
526 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
|||
527 | if level is None: |
|
|||
528 | return b'default' |
|
|||
529 | return bytes(level) |
|
|||
530 |
|
||||
531 |
|
||||
532 | def finddeficiencies(repo): |
|
|||
533 | """returns a list of deficiencies that the repo suffer from""" |
|
|||
534 | deficiencies = [] |
|
|||
535 |
|
||||
536 | # We could detect lack of revlogv1 and store here, but they were added |
|
|||
537 | # in 0.9.2 and we don't support upgrading repos without these |
|
|||
538 | # requirements, so let's not bother. |
|
|||
539 |
|
||||
540 | for fv in allformatvariant: |
|
|||
541 | if not fv.fromrepo(repo): |
|
|||
542 | deficiencies.append(fv) |
|
|||
543 |
|
||||
544 | return deficiencies |
|
|||
545 |
|
||||
546 |
|
24 | |||
547 | # search without '-' to support older form on newer client. |
|
25 | # search without '-' to support older form on newer client. | |
548 | # |
|
26 | # | |
@@ -557,134 +35,6 b' legacy_opts_map = {' | |||||
557 | b'redeltafulladd': b're-delta-fulladd', |
|
35 | b'redeltafulladd': b're-delta-fulladd', | |
558 | } |
|
36 | } | |
559 |
|
37 | |||
560 | ALL_OPTIMISATIONS = [] |
|
|||
561 |
|
||||
562 |
|
||||
563 | def register_optimization(obj): |
|
|||
564 | ALL_OPTIMISATIONS.append(obj) |
|
|||
565 | return obj |
|
|||
566 |
|
||||
567 |
|
||||
568 | register_optimization( |
|
|||
569 | improvement( |
|
|||
570 | name=b're-delta-parent', |
|
|||
571 | type=OPTIMISATION, |
|
|||
572 | description=_( |
|
|||
573 | b'deltas within internal storage will be recalculated to ' |
|
|||
574 | b'choose an optimal base revision where this was not ' |
|
|||
575 | b'already done; the size of the repository may shrink and ' |
|
|||
576 | b'various operations may become faster; the first time ' |
|
|||
577 | b'this optimization is performed could slow down upgrade ' |
|
|||
578 | b'execution considerably; subsequent invocations should ' |
|
|||
579 | b'not run noticeably slower' |
|
|||
580 | ), |
|
|||
581 | upgrademessage=_( |
|
|||
582 | b'deltas within internal storage will choose a new ' |
|
|||
583 | b'base revision if needed' |
|
|||
584 | ), |
|
|||
585 | ) |
|
|||
586 | ) |
|
|||
587 |
|
||||
588 | register_optimization( |
|
|||
589 | improvement( |
|
|||
590 | name=b're-delta-multibase', |
|
|||
591 | type=OPTIMISATION, |
|
|||
592 | description=_( |
|
|||
593 | b'deltas within internal storage will be recalculated ' |
|
|||
594 | b'against multiple base revision and the smallest ' |
|
|||
595 | b'difference will be used; the size of the repository may ' |
|
|||
596 | b'shrink significantly when there are many merges; this ' |
|
|||
597 | b'optimization will slow down execution in proportion to ' |
|
|||
598 | b'the number of merges in the repository and the amount ' |
|
|||
599 | b'of files in the repository; this slow down should not ' |
|
|||
600 | b'be significant unless there are tens of thousands of ' |
|
|||
601 | b'files and thousands of merges' |
|
|||
602 | ), |
|
|||
603 | upgrademessage=_( |
|
|||
604 | b'deltas within internal storage will choose an ' |
|
|||
605 | b'optimal delta by computing deltas against multiple ' |
|
|||
606 | b'parents; may slow down execution time ' |
|
|||
607 | b'significantly' |
|
|||
608 | ), |
|
|||
609 | ) |
|
|||
610 | ) |
|
|||
611 |
|
||||
612 | register_optimization( |
|
|||
613 | improvement( |
|
|||
614 | name=b're-delta-all', |
|
|||
615 | type=OPTIMISATION, |
|
|||
616 | description=_( |
|
|||
617 | b'deltas within internal storage will always be ' |
|
|||
618 | b'recalculated without reusing prior deltas; this will ' |
|
|||
619 | b'likely make execution run several times slower; this ' |
|
|||
620 | b'optimization is typically not needed' |
|
|||
621 | ), |
|
|||
622 | upgrademessage=_( |
|
|||
623 | b'deltas within internal storage will be fully ' |
|
|||
624 | b'recomputed; this will likely drastically slow down ' |
|
|||
625 | b'execution time' |
|
|||
626 | ), |
|
|||
627 | ) |
|
|||
628 | ) |
|
|||
629 |
|
||||
630 | register_optimization( |
|
|||
631 | improvement( |
|
|||
632 | name=b're-delta-fulladd', |
|
|||
633 | type=OPTIMISATION, |
|
|||
634 | description=_( |
|
|||
635 | b'every revision will be re-added as if it was new ' |
|
|||
636 | b'content. It will go through the full storage ' |
|
|||
637 | b'mechanism giving extensions a chance to process it ' |
|
|||
638 | b'(eg. lfs). This is similar to "re-delta-all" but even ' |
|
|||
639 | b'slower since more logic is involved.' |
|
|||
640 | ), |
|
|||
641 | upgrademessage=_( |
|
|||
642 | b'each revision will be added as new content to the ' |
|
|||
643 | b'internal storage; this will likely drastically slow ' |
|
|||
644 | b'down execution time, but some extensions might need ' |
|
|||
645 | b'it' |
|
|||
646 | ), |
|
|||
647 | ) |
|
|||
648 | ) |
|
|||
649 |
|
||||
650 |
|
||||
651 | def findoptimizations(repo): |
|
|||
652 | """Determine optimisation that could be used during upgrade""" |
|
|||
653 | # These are unconditionally added. There is logic later that figures out |
|
|||
654 | # which ones to apply. |
|
|||
655 | return list(ALL_OPTIMISATIONS) |
|
|||
656 |
|
||||
657 |
|
||||
658 | def determineactions(repo, deficiencies, sourcereqs, destreqs): |
|
|||
659 | """Determine upgrade actions that will be performed. |
|
|||
660 |
|
||||
661 | Given a list of improvements as returned by ``finddeficiencies`` and |
|
|||
662 | ``findoptimizations``, determine the list of upgrade actions that |
|
|||
663 | will be performed. |
|
|||
664 |
|
||||
665 | The role of this function is to filter improvements if needed, apply |
|
|||
666 | recommended optimizations from the improvements list that make sense, |
|
|||
667 | etc. |
|
|||
668 |
|
||||
669 | Returns a list of action names. |
|
|||
670 | """ |
|
|||
671 | newactions = [] |
|
|||
672 |
|
||||
673 | for d in deficiencies: |
|
|||
674 | name = d._requirement |
|
|||
675 |
|
||||
676 | # If the action is a requirement that doesn't show up in the |
|
|||
677 | # destination requirements, prune the action. |
|
|||
678 | if name is not None and name not in destreqs: |
|
|||
679 | continue |
|
|||
680 |
|
||||
681 | newactions.append(d) |
|
|||
682 |
|
||||
683 | # FUTURE consider adding some optimizations here for certain transitions. |
|
|||
684 | # e.g. adding generaldelta could schedule parent redeltas. |
|
|||
685 |
|
||||
686 | return newactions |
|
|||
687 |
|
||||
688 |
|
38 | |||
689 | def upgraderepo( |
|
39 | def upgraderepo( | |
690 | ui, |
|
40 | ui, | |
@@ -722,14 +72,18 b' def upgraderepo(' | |||||
722 | revlogs.discard(upgrade) |
|
72 | revlogs.discard(upgrade) | |
723 |
|
73 | |||
724 | # Ensure the repository can be upgraded. |
|
74 | # Ensure the repository can be upgraded. | |
725 | missingreqs = requiredsourcerequirements(repo) - repo.requirements |
|
75 | missingreqs = ( | |
|
76 | upgrade_actions.requiredsourcerequirements(repo) - repo.requirements | |||
|
77 | ) | |||
726 | if missingreqs: |
|
78 | if missingreqs: | |
727 | raise error.Abort( |
|
79 | raise error.Abort( | |
728 | _(b'cannot upgrade repository; requirement missing: %s') |
|
80 | _(b'cannot upgrade repository; requirement missing: %s') | |
729 | % _(b', ').join(sorted(missingreqs)) |
|
81 | % _(b', ').join(sorted(missingreqs)) | |
730 | ) |
|
82 | ) | |
731 |
|
83 | |||
732 | blockedreqs = blocksourcerequirements(repo) & repo.requirements |
|
84 | blockedreqs = ( | |
|
85 | upgrade_actions.blocksourcerequirements(repo) & repo.requirements | |||
|
86 | ) | |||
733 | if blockedreqs: |
|
87 | if blockedreqs: | |
734 | raise error.Abort( |
|
88 | raise error.Abort( | |
735 | _( |
|
89 | _( | |
@@ -744,10 +98,12 b' def upgraderepo(' | |||||
744 | newreqs = localrepo.newreporequirements( |
|
98 | newreqs = localrepo.newreporequirements( | |
745 | repo.ui, localrepo.defaultcreateopts(repo.ui) |
|
99 | repo.ui, localrepo.defaultcreateopts(repo.ui) | |
746 | ) |
|
100 | ) | |
747 | newreqs.update(preservedrequirements(repo)) |
|
101 | newreqs.update(upgrade_actions.preservedrequirements(repo)) | |
748 |
|
102 | |||
749 | noremovereqs = ( |
|
103 | noremovereqs = ( | |
750 | repo.requirements - newreqs - supportremovedrequirements(repo) |
|
104 | repo.requirements | |
|
105 | - newreqs | |||
|
106 | - upgrade_actions.supportremovedrequirements(repo) | |||
751 | ) |
|
107 | ) | |
752 | if noremovereqs: |
|
108 | if noremovereqs: | |
753 | raise error.Abort( |
|
109 | raise error.Abort( | |
@@ -758,7 +114,11 b' def upgraderepo(' | |||||
758 | % _(b', ').join(sorted(noremovereqs)) |
|
114 | % _(b', ').join(sorted(noremovereqs)) | |
759 | ) |
|
115 | ) | |
760 |
|
116 | |||
761 | noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) |
|
117 | noaddreqs = ( | |
|
118 | newreqs | |||
|
119 | - repo.requirements | |||
|
120 | - upgrade_actions.allowednewrequirements(repo) | |||
|
121 | ) | |||
762 | if noaddreqs: |
|
122 | if noaddreqs: | |
763 | raise error.Abort( |
|
123 | raise error.Abort( | |
764 | _( |
|
124 | _( | |
@@ -768,7 +128,7 b' def upgraderepo(' | |||||
768 | % _(b', ').join(sorted(noaddreqs)) |
|
128 | % _(b', ').join(sorted(noaddreqs)) | |
769 | ) |
|
129 | ) | |
770 |
|
130 | |||
771 | unsupportedreqs = newreqs - supporteddestrequirements(repo) |
|
131 | unsupportedreqs = newreqs - upgrade_actions.supporteddestrequirements(repo) | |
772 | if unsupportedreqs: |
|
132 | if unsupportedreqs: | |
773 | raise error.Abort( |
|
133 | raise error.Abort( | |
774 | _( |
|
134 | _( | |
@@ -779,7 +139,7 b' def upgraderepo(' | |||||
779 | ) |
|
139 | ) | |
780 |
|
140 | |||
781 | # Find and validate all improvements that can be made. |
|
141 | # Find and validate all improvements that can be made. | |
782 | alloptimizations = findoptimizations(repo) |
|
142 | alloptimizations = upgrade_actions.findoptimizations(repo) | |
783 |
|
143 | |||
784 | # Apply and Validate arguments. |
|
144 | # Apply and Validate arguments. | |
785 | optimizations = [] |
|
145 | optimizations = [] | |
@@ -795,8 +155,10 b' def upgraderepo(' | |||||
795 | hint=_(b'run without arguments to see valid optimizations'), |
|
155 | hint=_(b'run without arguments to see valid optimizations'), | |
796 | ) |
|
156 | ) | |
797 |
|
157 | |||
798 | deficiencies = finddeficiencies(repo) |
|
158 | deficiencies = upgrade_actions.finddeficiencies(repo) | |
799 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
|
159 | actions = upgrade_actions.determineactions( | |
|
160 | repo, deficiencies, repo.requirements, newreqs | |||
|
161 | ) | |||
800 | actions.extend( |
|
162 | actions.extend( | |
801 | o |
|
163 | o | |
802 | for o in sorted(optimizations) |
|
164 | for o in sorted(optimizations) | |
@@ -808,7 +170,9 b' def upgraderepo(' | |||||
808 | addedreqs = newreqs - repo.requirements |
|
170 | addedreqs = newreqs - repo.requirements | |
809 |
|
171 | |||
810 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: |
|
172 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: | |
811 |
incompatible = RECLONES_REQUIREMENTS & ( |
|
173 | incompatible = upgrade_actions.RECLONES_REQUIREMENTS & ( | |
|
174 | removedreqs | addedreqs | |||
|
175 | ) | |||
812 | if incompatible: |
|
176 | if incompatible: | |
813 | msg = _( |
|
177 | msg = _( | |
814 | b'ignoring revlogs selection flags, format requirements ' |
|
178 | b'ignoring revlogs selection flags, format requirements ' | |
@@ -845,7 +209,9 b' def upgraderepo(' | |||||
845 | ui.write(b'\n') |
|
209 | ui.write(b'\n') | |
846 |
|
210 | |||
847 | def printoptimisations(): |
|
211 | def printoptimisations(): | |
848 | optimisations = [a for a in actions if a.type == OPTIMISATION] |
|
212 | optimisations = [ | |
|
213 | a for a in actions if a.type == upgrade_actions.OPTIMISATION | |||
|
214 | ] | |||
849 | optimisations.sort(key=lambda a: a.name) |
|
215 | optimisations.sort(key=lambda a: a.name) | |
850 | if optimisations: |
|
216 | if optimisations: | |
851 | ui.write(_(b'optimisations: ')) |
|
217 | ui.write(_(b'optimisations: ')) | |
@@ -993,7 +359,7 b' def upgraderepo(' | |||||
993 | ) |
|
359 | ) | |
994 | ) |
|
360 | ) | |
995 |
|
361 | |||
996 |
if share |
|
362 | if upgrade_actions.sharesafe.name in addedreqs: | |
997 | ui.warn( |
|
363 | ui.warn( | |
998 | _( |
|
364 | _( | |
999 | b'repository upgraded to share safe mode, existing' |
|
365 | b'repository upgraded to share safe mode, existing' | |
@@ -1002,7 +368,7 b' def upgraderepo(' | |||||
1002 | b' New shares will be created in safe mode.\n' |
|
368 | b' New shares will be created in safe mode.\n' | |
1003 | ) |
|
369 | ) | |
1004 | ) |
|
370 | ) | |
1005 |
if share |
|
371 | if upgrade_actions.sharesafe.name in removedreqs: | |
1006 | ui.warn( |
|
372 | ui.warn( | |
1007 | _( |
|
373 | _( | |
1008 | b'repository downgraded to not use share safe mode, ' |
|
374 | b'repository downgraded to not use share safe mode, ' |
@@ -7,21 +7,14 b'' | |||||
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | from .i18n import _ |
|
10 | from ..i18n import _ | |
11 | from . import ( |
|
11 | from .. import ( | |
12 | error, |
|
|||
13 | hg, |
|
|||
14 | localrepo, |
|
12 | localrepo, | |
15 | pycompat, |
|
|||
16 | requirements, |
|
13 | requirements, | |
17 | util, |
|
14 | util, | |
18 | ) |
|
15 | ) | |
19 |
|
16 | |||
20 |
from . |
|
17 | from ..utils import compression | |
21 | engine as upgrade_engine, |
|
|||
22 | ) |
|
|||
23 |
|
||||
24 | from .utils import compression |
|
|||
25 |
|
18 | |||
26 | # list of requirements that request a clone of all revlog if added/removed |
|
19 | # list of requirements that request a clone of all revlog if added/removed | |
27 | RECLONES_REQUIREMENTS = { |
|
20 | RECLONES_REQUIREMENTS = { | |
@@ -333,7 +326,7 b' class generaldelta(requirementformatvari' | |||||
333 |
|
326 | |||
334 |
|
327 | |||
335 | @registerformatvariant |
|
328 | @registerformatvariant | |
336 |
class share |
|
329 | class sharesafe(requirementformatvariant): | |
337 | name = b'exp-sharesafe' |
|
330 | name = b'exp-sharesafe' | |
338 | _requirement = requirements.SHARESAFE_REQUIREMENT |
|
331 | _requirement = requirements.SHARESAFE_REQUIREMENT | |
339 |
|
332 | |||
@@ -544,19 +537,6 b' def finddeficiencies(repo):' | |||||
544 | return deficiencies |
|
537 | return deficiencies | |
545 |
|
538 | |||
546 |
|
539 | |||
547 | # search without '-' to support older form on newer client. |
|
|||
548 | # |
|
|||
549 | # We don't enforce backward compatibility for debug command so this |
|
|||
550 | # might eventually be dropped. However, having to use two different |
|
|||
551 | # forms in script when comparing result is anoying enough to add |
|
|||
552 | # backward compatibility for a while. |
|
|||
553 | legacy_opts_map = { |
|
|||
554 | b'redeltaparent': b're-delta-parent', |
|
|||
555 | b'redeltamultibase': b're-delta-multibase', |
|
|||
556 | b'redeltaall': b're-delta-all', |
|
|||
557 | b'redeltafulladd': b're-delta-fulladd', |
|
|||
558 | } |
|
|||
559 |
|
||||
560 | ALL_OPTIMISATIONS = [] |
|
540 | ALL_OPTIMISATIONS = [] | |
561 |
|
541 | |||
562 |
|
542 | |||
@@ -684,329 +664,3 b' def determineactions(repo, deficiencies,' | |||||
684 | # e.g. adding generaldelta could schedule parent redeltas. |
|
664 | # e.g. adding generaldelta could schedule parent redeltas. | |
685 |
|
665 | |||
686 | return newactions |
|
666 | return newactions | |
687 |
|
||||
688 |
|
||||
689 | def upgraderepo( |
|
|||
690 | ui, |
|
|||
691 | repo, |
|
|||
692 | run=False, |
|
|||
693 | optimize=None, |
|
|||
694 | backup=True, |
|
|||
695 | manifest=None, |
|
|||
696 | changelog=None, |
|
|||
697 | filelogs=None, |
|
|||
698 | ): |
|
|||
699 | """Upgrade a repository in place.""" |
|
|||
700 | if optimize is None: |
|
|||
701 | optimize = [] |
|
|||
702 | optimize = {legacy_opts_map.get(o, o) for o in optimize} |
|
|||
703 | repo = repo.unfiltered() |
|
|||
704 |
|
||||
705 | revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) |
|
|||
706 | specentries = ( |
|
|||
707 | (upgrade_engine.UPGRADE_CHANGELOG, changelog), |
|
|||
708 | (upgrade_engine.UPGRADE_MANIFEST, manifest), |
|
|||
709 | (upgrade_engine.UPGRADE_FILELOGS, filelogs), |
|
|||
710 | ) |
|
|||
711 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
|||
712 | if specified: |
|
|||
713 | # we have some limitation on revlogs to be recloned |
|
|||
714 | if any(x for y, x in specified): |
|
|||
715 | revlogs = set() |
|
|||
716 | for upgrade, enabled in specified: |
|
|||
717 | if enabled: |
|
|||
718 | revlogs.add(upgrade) |
|
|||
719 | else: |
|
|||
720 | # none are enabled |
|
|||
721 | for upgrade, __ in specified: |
|
|||
722 | revlogs.discard(upgrade) |
|
|||
723 |
|
||||
724 | # Ensure the repository can be upgraded. |
|
|||
725 | missingreqs = requiredsourcerequirements(repo) - repo.requirements |
|
|||
726 | if missingreqs: |
|
|||
727 | raise error.Abort( |
|
|||
728 | _(b'cannot upgrade repository; requirement missing: %s') |
|
|||
729 | % _(b', ').join(sorted(missingreqs)) |
|
|||
730 | ) |
|
|||
731 |
|
||||
732 | blockedreqs = blocksourcerequirements(repo) & repo.requirements |
|
|||
733 | if blockedreqs: |
|
|||
734 | raise error.Abort( |
|
|||
735 | _( |
|
|||
736 | b'cannot upgrade repository; unsupported source ' |
|
|||
737 | b'requirement: %s' |
|
|||
738 | ) |
|
|||
739 | % _(b', ').join(sorted(blockedreqs)) |
|
|||
740 | ) |
|
|||
741 |
|
||||
742 | # FUTURE there is potentially a need to control the wanted requirements via |
|
|||
743 | # command arguments or via an extension hook point. |
|
|||
744 | newreqs = localrepo.newreporequirements( |
|
|||
745 | repo.ui, localrepo.defaultcreateopts(repo.ui) |
|
|||
746 | ) |
|
|||
747 | newreqs.update(preservedrequirements(repo)) |
|
|||
748 |
|
||||
749 | noremovereqs = ( |
|
|||
750 | repo.requirements - newreqs - supportremovedrequirements(repo) |
|
|||
751 | ) |
|
|||
752 | if noremovereqs: |
|
|||
753 | raise error.Abort( |
|
|||
754 | _( |
|
|||
755 | b'cannot upgrade repository; requirement would be ' |
|
|||
756 | b'removed: %s' |
|
|||
757 | ) |
|
|||
758 | % _(b', ').join(sorted(noremovereqs)) |
|
|||
759 | ) |
|
|||
760 |
|
||||
761 | noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) |
|
|||
762 | if noaddreqs: |
|
|||
763 | raise error.Abort( |
|
|||
764 | _( |
|
|||
765 | b'cannot upgrade repository; do not support adding ' |
|
|||
766 | b'requirement: %s' |
|
|||
767 | ) |
|
|||
768 | % _(b', ').join(sorted(noaddreqs)) |
|
|||
769 | ) |
|
|||
770 |
|
||||
771 | unsupportedreqs = newreqs - supporteddestrequirements(repo) |
|
|||
772 | if unsupportedreqs: |
|
|||
773 | raise error.Abort( |
|
|||
774 | _( |
|
|||
775 | b'cannot upgrade repository; do not support ' |
|
|||
776 | b'destination requirement: %s' |
|
|||
777 | ) |
|
|||
778 | % _(b', ').join(sorted(unsupportedreqs)) |
|
|||
779 | ) |
|
|||
780 |
|
||||
781 | # Find and validate all improvements that can be made. |
|
|||
782 | alloptimizations = findoptimizations(repo) |
|
|||
783 |
|
||||
784 | # Apply and Validate arguments. |
|
|||
785 | optimizations = [] |
|
|||
786 | for o in alloptimizations: |
|
|||
787 | if o.name in optimize: |
|
|||
788 | optimizations.append(o) |
|
|||
789 | optimize.discard(o.name) |
|
|||
790 |
|
||||
791 | if optimize: # anything left is unknown |
|
|||
792 | raise error.Abort( |
|
|||
793 | _(b'unknown optimization action requested: %s') |
|
|||
794 | % b', '.join(sorted(optimize)), |
|
|||
795 | hint=_(b'run without arguments to see valid optimizations'), |
|
|||
796 | ) |
|
|||
797 |
|
||||
798 | deficiencies = finddeficiencies(repo) |
|
|||
799 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
|
|||
800 | actions.extend( |
|
|||
801 | o |
|
|||
802 | for o in sorted(optimizations) |
|
|||
803 | # determineactions could have added optimisation |
|
|||
804 | if o not in actions |
|
|||
805 | ) |
|
|||
806 |
|
||||
807 | removedreqs = repo.requirements - newreqs |
|
|||
808 | addedreqs = newreqs - repo.requirements |
|
|||
809 |
|
||||
810 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: |
|
|||
811 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) |
|
|||
812 | if incompatible: |
|
|||
813 | msg = _( |
|
|||
814 | b'ignoring revlogs selection flags, format requirements ' |
|
|||
815 | b'change: %s\n' |
|
|||
816 | ) |
|
|||
817 | ui.warn(msg % b', '.join(sorted(incompatible))) |
|
|||
818 | revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS |
|
|||
819 |
|
||||
820 | def write_labeled(l, label): |
|
|||
821 | first = True |
|
|||
822 | for r in sorted(l): |
|
|||
823 | if not first: |
|
|||
824 | ui.write(b', ') |
|
|||
825 | ui.write(r, label=label) |
|
|||
826 | first = False |
|
|||
827 |
|
||||
828 | def printrequirements(): |
|
|||
829 | ui.write(_(b'requirements\n')) |
|
|||
830 | ui.write(_(b' preserved: ')) |
|
|||
831 | write_labeled( |
|
|||
832 | newreqs & repo.requirements, "upgrade-repo.requirement.preserved" |
|
|||
833 | ) |
|
|||
834 | ui.write((b'\n')) |
|
|||
835 | removed = repo.requirements - newreqs |
|
|||
836 | if repo.requirements - newreqs: |
|
|||
837 | ui.write(_(b' removed: ')) |
|
|||
838 | write_labeled(removed, "upgrade-repo.requirement.removed") |
|
|||
839 | ui.write((b'\n')) |
|
|||
840 | added = newreqs - repo.requirements |
|
|||
841 | if added: |
|
|||
842 | ui.write(_(b' added: ')) |
|
|||
843 | write_labeled(added, "upgrade-repo.requirement.added") |
|
|||
844 | ui.write((b'\n')) |
|
|||
845 | ui.write(b'\n') |
|
|||
846 |
|
||||
847 | def printoptimisations(): |
|
|||
848 | optimisations = [a for a in actions if a.type == OPTIMISATION] |
|
|||
849 | optimisations.sort(key=lambda a: a.name) |
|
|||
850 | if optimisations: |
|
|||
851 | ui.write(_(b'optimisations: ')) |
|
|||
852 | write_labeled( |
|
|||
853 | [a.name for a in optimisations], |
|
|||
854 | "upgrade-repo.optimisation.performed", |
|
|||
855 | ) |
|
|||
856 | ui.write(b'\n\n') |
|
|||
857 |
|
||||
858 | def printupgradeactions(): |
|
|||
859 | for a in actions: |
|
|||
860 | ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) |
|
|||
861 |
|
||||
862 | def print_affected_revlogs(): |
|
|||
863 | if not revlogs: |
|
|||
864 | ui.write((b'no revlogs to process\n')) |
|
|||
865 | else: |
|
|||
866 | ui.write((b'processed revlogs:\n')) |
|
|||
867 | for r in sorted(revlogs): |
|
|||
868 | ui.write((b' - %s\n' % r)) |
|
|||
869 | ui.write((b'\n')) |
|
|||
870 |
|
||||
871 | if not run: |
|
|||
872 | fromconfig = [] |
|
|||
873 | onlydefault = [] |
|
|||
874 |
|
||||
875 | for d in deficiencies: |
|
|||
876 | if d.fromconfig(repo): |
|
|||
877 | fromconfig.append(d) |
|
|||
878 | elif d.default: |
|
|||
879 | onlydefault.append(d) |
|
|||
880 |
|
||||
881 | if fromconfig or onlydefault: |
|
|||
882 |
|
||||
883 | if fromconfig: |
|
|||
884 | ui.status( |
|
|||
885 | _( |
|
|||
886 | b'repository lacks features recommended by ' |
|
|||
887 | b'current config options:\n\n' |
|
|||
888 | ) |
|
|||
889 | ) |
|
|||
890 | for i in fromconfig: |
|
|||
891 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
|||
892 |
|
||||
893 | if onlydefault: |
|
|||
894 | ui.status( |
|
|||
895 | _( |
|
|||
896 | b'repository lacks features used by the default ' |
|
|||
897 | b'config options:\n\n' |
|
|||
898 | ) |
|
|||
899 | ) |
|
|||
900 | for i in onlydefault: |
|
|||
901 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
|||
902 |
|
||||
903 | ui.status(b'\n') |
|
|||
904 | else: |
|
|||
905 | ui.status( |
|
|||
906 | _( |
|
|||
907 | b'(no feature deficiencies found in existing ' |
|
|||
908 | b'repository)\n' |
|
|||
909 | ) |
|
|||
910 | ) |
|
|||
911 |
|
||||
912 | ui.status( |
|
|||
913 | _( |
|
|||
914 | b'performing an upgrade with "--run" will make the following ' |
|
|||
915 | b'changes:\n\n' |
|
|||
916 | ) |
|
|||
917 | ) |
|
|||
918 |
|
||||
919 | printrequirements() |
|
|||
920 | printoptimisations() |
|
|||
921 | printupgradeactions() |
|
|||
922 | print_affected_revlogs() |
|
|||
923 |
|
||||
924 | unusedoptimize = [i for i in alloptimizations if i not in actions] |
|
|||
925 |
|
||||
926 | if unusedoptimize: |
|
|||
927 | ui.status( |
|
|||
928 | _( |
|
|||
929 | b'additional optimizations are available by specifying ' |
|
|||
930 | b'"--optimize <name>":\n\n' |
|
|||
931 | ) |
|
|||
932 | ) |
|
|||
933 | for i in unusedoptimize: |
|
|||
934 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) |
|
|||
935 | return |
|
|||
936 |
|
||||
937 | # Else we're in the run=true case. |
|
|||
938 | ui.write(_(b'upgrade will perform the following actions:\n\n')) |
|
|||
939 | printrequirements() |
|
|||
940 | printoptimisations() |
|
|||
941 | printupgradeactions() |
|
|||
942 | print_affected_revlogs() |
|
|||
943 |
|
||||
944 | upgradeactions = [a.name for a in actions] |
|
|||
945 |
|
||||
946 | ui.status(_(b'beginning upgrade...\n')) |
|
|||
947 | with repo.wlock(), repo.lock(): |
|
|||
948 | ui.status(_(b'repository locked and read-only\n')) |
|
|||
949 | # Our strategy for upgrading the repository is to create a new, |
|
|||
950 | # temporary repository, write data to it, then do a swap of the |
|
|||
951 | # data. There are less heavyweight ways to do this, but it is easier |
|
|||
952 | # to create a new repo object than to instantiate all the components |
|
|||
953 | # (like the store) separately. |
|
|||
954 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) |
|
|||
955 | backuppath = None |
|
|||
956 | try: |
|
|||
957 | ui.status( |
|
|||
958 | _( |
|
|||
959 | b'creating temporary repository to stage migrated ' |
|
|||
960 | b'data: %s\n' |
|
|||
961 | ) |
|
|||
962 | % tmppath |
|
|||
963 | ) |
|
|||
964 |
|
||||
965 | # clone ui without using ui.copy because repo.ui is protected |
|
|||
966 | repoui = repo.ui.__class__(repo.ui) |
|
|||
967 | dstrepo = hg.repository(repoui, path=tmppath, create=True) |
|
|||
968 |
|
||||
969 | with dstrepo.wlock(), dstrepo.lock(): |
|
|||
970 | backuppath = upgrade_engine.upgrade( |
|
|||
971 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs |
|
|||
972 | ) |
|
|||
973 | if not (backup or backuppath is None): |
|
|||
974 | ui.status( |
|
|||
975 | _(b'removing old repository content%s\n') % backuppath |
|
|||
976 | ) |
|
|||
977 | repo.vfs.rmtree(backuppath, forcibly=True) |
|
|||
978 | backuppath = None |
|
|||
979 |
|
||||
980 | finally: |
|
|||
981 | ui.status(_(b'removing temporary repository %s\n') % tmppath) |
|
|||
982 | repo.vfs.rmtree(tmppath, forcibly=True) |
|
|||
983 |
|
||||
984 | if backuppath and not ui.quiet: |
|
|||
985 | ui.warn( |
|
|||
986 | _(b'copy of old repository backed up at %s\n') % backuppath |
|
|||
987 | ) |
|
|||
988 | ui.warn( |
|
|||
989 | _( |
|
|||
990 | b'the old repository will not be deleted; remove ' |
|
|||
991 | b'it to free up disk space once the upgraded ' |
|
|||
992 | b'repository is verified\n' |
|
|||
993 | ) |
|
|||
994 | ) |
|
|||
995 |
|
||||
996 | if sharedsafe.name in addedreqs: |
|
|||
997 | ui.warn( |
|
|||
998 | _( |
|
|||
999 | b'repository upgraded to share safe mode, existing' |
|
|||
1000 | b' shares will still work in old non-safe mode. ' |
|
|||
1001 | b'Re-share existing shares to use them in safe mode' |
|
|||
1002 | b' New shares will be created in safe mode.\n' |
|
|||
1003 | ) |
|
|||
1004 | ) |
|
|||
1005 | if sharedsafe.name in removedreqs: |
|
|||
1006 | ui.warn( |
|
|||
1007 | _( |
|
|||
1008 | b'repository downgraded to not use share safe mode, ' |
|
|||
1009 | b'existing shares will not work and needs to' |
|
|||
1010 | b' be reshared.\n' |
|
|||
1011 | ) |
|
|||
1012 | ) |
|
General Comments 0
You need to be logged in to leave comments.
Login now