##// END OF EJS Templates
engine: prevent multiple checking of re-delta-multibase...
Pulkit Goyal -
r46835:3f92a9bb default
parent child Browse files
Show More
@@ -1,899 +1,904 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from ..i18n import _
11 11 from .. import (
12 12 error,
13 13 localrepo,
14 14 requirements,
15 15 revlog,
16 16 util,
17 17 )
18 18
19 19 from ..utils import compression
20 20
21 21 # list of requirements that request a clone of all revlog if added/removed
22 22 RECLONES_REQUIREMENTS = {
23 23 b'generaldelta',
24 24 requirements.SPARSEREVLOG_REQUIREMENT,
25 25 }
26 26
27 27
28 28 def preservedrequirements(repo):
29 29 return set()
30 30
31 31
32 32 FORMAT_VARIANT = b'deficiency'
33 33 OPTIMISATION = b'optimization'
34 34
35 35
36 36 class improvement(object):
37 37 """Represents an improvement that can be made as part of an upgrade.
38 38
39 39 The following attributes are defined on each instance:
40 40
41 41 name
42 42 Machine-readable string uniquely identifying this improvement. It
43 43 will be mapped to an action later in the upgrade process.
44 44
45 45 type
46 46 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
47 47 A format variant is where we change the storage format. Not all format
48 48 variant changes are an obvious problem.
49 49 An optimization is an action (sometimes optional) that
50 50 can be taken to further improve the state of the repository.
51 51
52 52 description
53 53 Message intended for humans explaining the improvement in more detail,
54 54 including the implications of it. For ``FORMAT_VARIANT`` types, should be
55 55 worded in the present tense. For ``OPTIMISATION`` types, should be
56 56 worded in the future tense.
57 57
58 58 upgrademessage
59 59 Message intended for humans explaining what an upgrade addressing this
60 60 issue will do. Should be worded in the future tense.
61 61
62 62 postupgrademessage
63 63 Message intended for humans which will be shown post an upgrade
64 64 operation when the improvement will be added
65 65
66 66 postdowngrademessage
67 67 Message intended for humans which will be shown post an upgrade
68 68 operation in which this improvement was removed
69 69 """
70 70
71 71 def __init__(self, name, type, description, upgrademessage):
72 72 self.name = name
73 73 self.type = type
74 74 self.description = description
75 75 self.upgrademessage = upgrademessage
76 76 self.postupgrademessage = None
77 77 self.postdowngrademessage = None
78 78
79 79 def __eq__(self, other):
80 80 if not isinstance(other, improvement):
81 81 # This is what python tell use to do
82 82 return NotImplemented
83 83 return self.name == other.name
84 84
85 85 def __ne__(self, other):
86 86 return not (self == other)
87 87
88 88 def __hash__(self):
89 89 return hash(self.name)
90 90
91 91
92 92 allformatvariant = []
93 93
94 94
95 95 def registerformatvariant(cls):
96 96 allformatvariant.append(cls)
97 97 return cls
98 98
99 99
100 100 class formatvariant(improvement):
101 101 """an improvement subclass dedicated to repository format"""
102 102
103 103 type = FORMAT_VARIANT
104 104 ### The following attributes should be defined for each class:
105 105
106 106 # machine-readable string uniquely identifying this improvement. it will be
107 107 # mapped to an action later in the upgrade process.
108 108 name = None
109 109
110 110 # message intended for humans explaining the improvement in more detail,
111 111 # including the implications of it ``FORMAT_VARIANT`` types, should be
112 112 # worded
113 113 # in the present tense.
114 114 description = None
115 115
116 116 # message intended for humans explaining what an upgrade addressing this
117 117 # issue will do. should be worded in the future tense.
118 118 upgrademessage = None
119 119
120 120 # value of current Mercurial default for new repository
121 121 default = None
122 122
123 123 # Message intended for humans which will be shown post an upgrade
124 124 # operation when the improvement will be added
125 125 postupgrademessage = None
126 126
127 127 # Message intended for humans which will be shown post an upgrade
128 128 # operation in which this improvement was removed
129 129 postdowngrademessage = None
130 130
131 131 def __init__(self):
132 132 raise NotImplementedError()
133 133
134 134 @staticmethod
135 135 def fromrepo(repo):
136 136 """current value of the variant in the repository"""
137 137 raise NotImplementedError()
138 138
139 139 @staticmethod
140 140 def fromconfig(repo):
141 141 """current value of the variant in the configuration"""
142 142 raise NotImplementedError()
143 143
144 144
145 145 class requirementformatvariant(formatvariant):
146 146 """formatvariant based on a 'requirement' name.
147 147
148 148 Many format variant are controlled by a 'requirement'. We define a small
149 149 subclass to factor the code.
150 150 """
151 151
152 152 # the requirement that control this format variant
153 153 _requirement = None
154 154
155 155 @staticmethod
156 156 def _newreporequirements(ui):
157 157 return localrepo.newreporequirements(
158 158 ui, localrepo.defaultcreateopts(ui)
159 159 )
160 160
161 161 @classmethod
162 162 def fromrepo(cls, repo):
163 163 assert cls._requirement is not None
164 164 return cls._requirement in repo.requirements
165 165
166 166 @classmethod
167 167 def fromconfig(cls, repo):
168 168 assert cls._requirement is not None
169 169 return cls._requirement in cls._newreporequirements(repo.ui)
170 170
171 171
172 172 @registerformatvariant
173 173 class fncache(requirementformatvariant):
174 174 name = b'fncache'
175 175
176 176 _requirement = b'fncache'
177 177
178 178 default = True
179 179
180 180 description = _(
181 181 b'long and reserved filenames may not work correctly; '
182 182 b'repository performance is sub-optimal'
183 183 )
184 184
185 185 upgrademessage = _(
186 186 b'repository will be more resilient to storing '
187 187 b'certain paths and performance of certain '
188 188 b'operations should be improved'
189 189 )
190 190
191 191
192 192 @registerformatvariant
193 193 class dotencode(requirementformatvariant):
194 194 name = b'dotencode'
195 195
196 196 _requirement = b'dotencode'
197 197
198 198 default = True
199 199
200 200 description = _(
201 201 b'storage of filenames beginning with a period or '
202 202 b'space may not work correctly'
203 203 )
204 204
205 205 upgrademessage = _(
206 206 b'repository will be better able to store files '
207 207 b'beginning with a space or period'
208 208 )
209 209
210 210
211 211 @registerformatvariant
212 212 class generaldelta(requirementformatvariant):
213 213 name = b'generaldelta'
214 214
215 215 _requirement = b'generaldelta'
216 216
217 217 default = True
218 218
219 219 description = _(
220 220 b'deltas within internal storage are unable to '
221 221 b'choose optimal revisions; repository is larger and '
222 222 b'slower than it could be; interaction with other '
223 223 b'repositories may require extra network and CPU '
224 224 b'resources, making "hg push" and "hg pull" slower'
225 225 )
226 226
227 227 upgrademessage = _(
228 228 b'repository storage will be able to create '
229 229 b'optimal deltas; new repository data will be '
230 230 b'smaller and read times should decrease; '
231 231 b'interacting with other repositories using this '
232 232 b'storage model should require less network and '
233 233 b'CPU resources, making "hg push" and "hg pull" '
234 234 b'faster'
235 235 )
236 236
237 237
238 238 @registerformatvariant
239 239 class sharesafe(requirementformatvariant):
240 240 name = b'exp-sharesafe'
241 241 _requirement = requirements.SHARESAFE_REQUIREMENT
242 242
243 243 default = False
244 244
245 245 description = _(
246 246 b'old shared repositories do not share source repository '
247 247 b'requirements and config. This leads to various problems '
248 248 b'when the source repository format is upgraded or some new '
249 249 b'extensions are enabled.'
250 250 )
251 251
252 252 upgrademessage = _(
253 253 b'Upgrades a repository to share-safe format so that future '
254 254 b'shares of this repository share its requirements and configs.'
255 255 )
256 256
257 257 postdowngrademessage = _(
258 258 b'repository downgraded to not use share safe mode, '
259 259 b'existing shares will not work and needs to'
260 260 b' be reshared.'
261 261 )
262 262
263 263 postupgrademessage = _(
264 264 b'repository upgraded to share safe mode, existing'
265 265 b' shares will still work in old non-safe mode. '
266 266 b'Re-share existing shares to use them in safe mode'
267 267 b' New shares will be created in safe mode.'
268 268 )
269 269
270 270
271 271 @registerformatvariant
272 272 class sparserevlog(requirementformatvariant):
273 273 name = b'sparserevlog'
274 274
275 275 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
276 276
277 277 default = True
278 278
279 279 description = _(
280 280 b'in order to limit disk reading and memory usage on older '
281 281 b'version, the span of a delta chain from its root to its '
282 282 b'end is limited, whatever the relevant data in this span. '
283 283 b'This can severly limit Mercurial ability to build good '
284 284 b'chain of delta resulting is much more storage space being '
285 285 b'taken and limit reusability of on disk delta during '
286 286 b'exchange.'
287 287 )
288 288
289 289 upgrademessage = _(
290 290 b'Revlog supports delta chain with more unused data '
291 291 b'between payload. These gaps will be skipped at read '
292 292 b'time. This allows for better delta chains, making a '
293 293 b'better compression and faster exchange with server.'
294 294 )
295 295
296 296
297 297 @registerformatvariant
298 298 class sidedata(requirementformatvariant):
299 299 name = b'sidedata'
300 300
301 301 _requirement = requirements.SIDEDATA_REQUIREMENT
302 302
303 303 default = False
304 304
305 305 description = _(
306 306 b'Allows storage of extra data alongside a revision, '
307 307 b'unlocking various caching options.'
308 308 )
309 309
310 310 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
311 311
312 312
313 313 @registerformatvariant
314 314 class persistentnodemap(requirementformatvariant):
315 315 name = b'persistent-nodemap'
316 316
317 317 _requirement = requirements.NODEMAP_REQUIREMENT
318 318
319 319 default = False
320 320
321 321 description = _(
322 322 b'persist the node -> rev mapping on disk to speedup lookup'
323 323 )
324 324
325 325 upgrademessage = _(b'Speedup revision lookup by node id.')
326 326
327 327
328 328 @registerformatvariant
329 329 class copiessdc(requirementformatvariant):
330 330 name = b'copies-sdc'
331 331
332 332 _requirement = requirements.COPIESSDC_REQUIREMENT
333 333
334 334 default = False
335 335
336 336 description = _(b'Stores copies information alongside changesets.')
337 337
338 338 upgrademessage = _(
339 339 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
340 340 )
341 341
342 342
343 343 @registerformatvariant
344 344 class removecldeltachain(formatvariant):
345 345 name = b'plain-cl-delta'
346 346
347 347 default = True
348 348
349 349 description = _(
350 350 b'changelog storage is using deltas instead of '
351 351 b'raw entries; changelog reading and any '
352 352 b'operation relying on changelog data are slower '
353 353 b'than they could be'
354 354 )
355 355
356 356 upgrademessage = _(
357 357 b'changelog storage will be reformated to '
358 358 b'store raw entries; changelog reading will be '
359 359 b'faster; changelog size may be reduced'
360 360 )
361 361
362 362 @staticmethod
363 363 def fromrepo(repo):
364 364 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
365 365 # changelogs with deltas.
366 366 cl = repo.changelog
367 367 chainbase = cl.chainbase
368 368 return all(rev == chainbase(rev) for rev in cl)
369 369
370 370 @staticmethod
371 371 def fromconfig(repo):
372 372 return True
373 373
374 374
375 375 @registerformatvariant
376 376 class compressionengine(formatvariant):
377 377 name = b'compression'
378 378 default = b'zlib'
379 379
380 380 description = _(
381 381 b'Compresion algorithm used to compress data. '
382 382 b'Some engine are faster than other'
383 383 )
384 384
385 385 upgrademessage = _(
386 386 b'revlog content will be recompressed with the new algorithm.'
387 387 )
388 388
389 389 @classmethod
390 390 def fromrepo(cls, repo):
391 391 # we allow multiple compression engine requirement to co-exist because
392 392 # strickly speaking, revlog seems to support mixed compression style.
393 393 #
394 394 # The compression used for new entries will be "the last one"
395 395 compression = b'zlib'
396 396 for req in repo.requirements:
397 397 prefix = req.startswith
398 398 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
399 399 compression = req.split(b'-', 2)[2]
400 400 return compression
401 401
402 402 @classmethod
403 403 def fromconfig(cls, repo):
404 404 compengines = repo.ui.configlist(b'format', b'revlog-compression')
405 405 # return the first valid value as the selection code would do
406 406 for comp in compengines:
407 407 if comp in util.compengines:
408 408 return comp
409 409
410 410 # no valide compression found lets display it all for clarity
411 411 return b','.join(compengines)
412 412
413 413
414 414 @registerformatvariant
415 415 class compressionlevel(formatvariant):
416 416 name = b'compression-level'
417 417 default = b'default'
418 418
419 419 description = _(b'compression level')
420 420
421 421 upgrademessage = _(b'revlog content will be recompressed')
422 422
423 423 @classmethod
424 424 def fromrepo(cls, repo):
425 425 comp = compressionengine.fromrepo(repo)
426 426 level = None
427 427 if comp == b'zlib':
428 428 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
429 429 elif comp == b'zstd':
430 430 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
431 431 if level is None:
432 432 return b'default'
433 433 return bytes(level)
434 434
435 435 @classmethod
436 436 def fromconfig(cls, repo):
437 437 comp = compressionengine.fromconfig(repo)
438 438 level = None
439 439 if comp == b'zlib':
440 440 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
441 441 elif comp == b'zstd':
442 442 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
443 443 if level is None:
444 444 return b'default'
445 445 return bytes(level)
446 446
447 447
448 448 def find_format_upgrades(repo):
449 449 """returns a list of format upgrades which can be perform on the repo"""
450 450 upgrades = []
451 451
452 452 # We could detect lack of revlogv1 and store here, but they were added
453 453 # in 0.9.2 and we don't support upgrading repos without these
454 454 # requirements, so let's not bother.
455 455
456 456 for fv in allformatvariant:
457 457 if not fv.fromrepo(repo):
458 458 upgrades.append(fv)
459 459
460 460 return upgrades
461 461
462 462
463 463 def find_format_downgrades(repo):
464 464 """returns a list of format downgrades which will be performed on the repo
465 465 because of disabled config option for them"""
466 466
467 467 downgrades = []
468 468
469 469 for fv in allformatvariant:
470 470 # format variant exist in repo but does not exist in new repository
471 471 # config
472 472 if fv.fromrepo(repo) and not fv.fromconfig(repo):
473 473 downgrades.append(fv)
474 474
475 475 return downgrades
476 476
477 477
478 478 ALL_OPTIMISATIONS = []
479 479
480 480
481 481 def register_optimization(obj):
482 482 ALL_OPTIMISATIONS.append(obj)
483 483 return obj
484 484
485 485
486 486 register_optimization(
487 487 improvement(
488 488 name=b're-delta-parent',
489 489 type=OPTIMISATION,
490 490 description=_(
491 491 b'deltas within internal storage will be recalculated to '
492 492 b'choose an optimal base revision where this was not '
493 493 b'already done; the size of the repository may shrink and '
494 494 b'various operations may become faster; the first time '
495 495 b'this optimization is performed could slow down upgrade '
496 496 b'execution considerably; subsequent invocations should '
497 497 b'not run noticeably slower'
498 498 ),
499 499 upgrademessage=_(
500 500 b'deltas within internal storage will choose a new '
501 501 b'base revision if needed'
502 502 ),
503 503 )
504 504 )
505 505
506 506 register_optimization(
507 507 improvement(
508 508 name=b're-delta-multibase',
509 509 type=OPTIMISATION,
510 510 description=_(
511 511 b'deltas within internal storage will be recalculated '
512 512 b'against multiple base revision and the smallest '
513 513 b'difference will be used; the size of the repository may '
514 514 b'shrink significantly when there are many merges; this '
515 515 b'optimization will slow down execution in proportion to '
516 516 b'the number of merges in the repository and the amount '
517 517 b'of files in the repository; this slow down should not '
518 518 b'be significant unless there are tens of thousands of '
519 519 b'files and thousands of merges'
520 520 ),
521 521 upgrademessage=_(
522 522 b'deltas within internal storage will choose an '
523 523 b'optimal delta by computing deltas against multiple '
524 524 b'parents; may slow down execution time '
525 525 b'significantly'
526 526 ),
527 527 )
528 528 )
529 529
530 530 register_optimization(
531 531 improvement(
532 532 name=b're-delta-all',
533 533 type=OPTIMISATION,
534 534 description=_(
535 535 b'deltas within internal storage will always be '
536 536 b'recalculated without reusing prior deltas; this will '
537 537 b'likely make execution run several times slower; this '
538 538 b'optimization is typically not needed'
539 539 ),
540 540 upgrademessage=_(
541 541 b'deltas within internal storage will be fully '
542 542 b'recomputed; this will likely drastically slow down '
543 543 b'execution time'
544 544 ),
545 545 )
546 546 )
547 547
548 548 register_optimization(
549 549 improvement(
550 550 name=b're-delta-fulladd',
551 551 type=OPTIMISATION,
552 552 description=_(
553 553 b'every revision will be re-added as if it was new '
554 554 b'content. It will go through the full storage '
555 555 b'mechanism giving extensions a chance to process it '
556 556 b'(eg. lfs). This is similar to "re-delta-all" but even '
557 557 b'slower since more logic is involved.'
558 558 ),
559 559 upgrademessage=_(
560 560 b'each revision will be added as new content to the '
561 561 b'internal storage; this will likely drastically slow '
562 562 b'down execution time, but some extensions might need '
563 563 b'it'
564 564 ),
565 565 )
566 566 )
567 567
568 568
569 569 def findoptimizations(repo):
570 570 """Determine optimisation that could be used during upgrade"""
571 571 # These are unconditionally added. There is logic later that figures out
572 572 # which ones to apply.
573 573 return list(ALL_OPTIMISATIONS)
574 574
575 575
576 576 def determine_upgrade_actions(
577 577 repo, format_upgrades, optimizations, sourcereqs, destreqs
578 578 ):
579 579 """Determine upgrade actions that will be performed.
580 580
581 581 Given a list of improvements as returned by ``find_format_upgrades`` and
582 582 ``findoptimizations``, determine the list of upgrade actions that
583 583 will be performed.
584 584
585 585 The role of this function is to filter improvements if needed, apply
586 586 recommended optimizations from the improvements list that make sense,
587 587 etc.
588 588
589 589 Returns a list of action names.
590 590 """
591 591 newactions = []
592 592
593 593 for d in format_upgrades:
594 594 name = d._requirement
595 595
596 596 # If the action is a requirement that doesn't show up in the
597 597 # destination requirements, prune the action.
598 598 if name is not None and name not in destreqs:
599 599 continue
600 600
601 601 newactions.append(d)
602 602
603 603 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
604 604
605 605 # FUTURE consider adding some optimizations here for certain transitions.
606 606 # e.g. adding generaldelta could schedule parent redeltas.
607 607
608 608 return newactions
609 609
610 610
611 611 class UpgradeOperation(object):
612 612 """represent the work to be done during an upgrade"""
613 613
614 614 def __init__(
615 615 self,
616 616 ui,
617 617 new_requirements,
618 618 current_requirements,
619 619 upgrade_actions,
620 620 removed_actions,
621 621 revlogs_to_process,
622 622 ):
623 623 self.ui = ui
624 624 self.new_requirements = new_requirements
625 625 self.current_requirements = current_requirements
626 626 # list of upgrade actions the operation will perform
627 627 self.upgrade_actions = upgrade_actions
628 628 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
629 629 self.removed_actions = removed_actions
630 630 self.revlogs_to_process = revlogs_to_process
631 631 # requirements which will be added by the operation
632 632 self._added_requirements = (
633 633 self.new_requirements - self.current_requirements
634 634 )
635 635 # requirements which will be removed by the operation
636 636 self._removed_requirements = (
637 637 self.current_requirements - self.new_requirements
638 638 )
639 639 # requirements which will be preserved by the operation
640 640 self._preserved_requirements = (
641 641 self.current_requirements & self.new_requirements
642 642 )
643 643 # optimizations which are not used and it's recommended that they
644 644 # should use them
645 645 all_optimizations = findoptimizations(None)
646 646 self.unused_optimizations = [
647 647 i for i in all_optimizations if i not in self.upgrade_actions
648 648 ]
649 649
650 650 # delta reuse mode of this upgrade operation
651 651 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
652 652 if b're-delta-all' in self._upgrade_actions_names:
653 653 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
654 654 elif b're-delta-parent' in self._upgrade_actions_names:
655 655 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
656 656 elif b're-delta-multibase' in self._upgrade_actions_names:
657 657 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
658 658 elif b're-delta-fulladd' in self._upgrade_actions_names:
659 659 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
660 660
661 # should this operation force re-delta of both parents
662 self.force_re_delta_both_parents = (
663 b're-delta-multibase' in self._upgrade_actions_names
664 )
665
661 666 def _write_labeled(self, l, label):
662 667 """
663 668 Utility function to aid writing of a list under one label
664 669 """
665 670 first = True
666 671 for r in sorted(l):
667 672 if not first:
668 673 self.ui.write(b', ')
669 674 self.ui.write(r, label=label)
670 675 first = False
671 676
672 677 def print_requirements(self):
673 678 self.ui.write(_(b'requirements\n'))
674 679 self.ui.write(_(b' preserved: '))
675 680 self._write_labeled(
676 681 self._preserved_requirements, "upgrade-repo.requirement.preserved"
677 682 )
678 683 self.ui.write((b'\n'))
679 684 if self._removed_requirements:
680 685 self.ui.write(_(b' removed: '))
681 686 self._write_labeled(
682 687 self._removed_requirements, "upgrade-repo.requirement.removed"
683 688 )
684 689 self.ui.write((b'\n'))
685 690 if self._added_requirements:
686 691 self.ui.write(_(b' added: '))
687 692 self._write_labeled(
688 693 self._added_requirements, "upgrade-repo.requirement.added"
689 694 )
690 695 self.ui.write((b'\n'))
691 696 self.ui.write(b'\n')
692 697
693 698 def print_optimisations(self):
694 699 optimisations = [
695 700 a for a in self.upgrade_actions if a.type == OPTIMISATION
696 701 ]
697 702 optimisations.sort(key=lambda a: a.name)
698 703 if optimisations:
699 704 self.ui.write(_(b'optimisations: '))
700 705 self._write_labeled(
701 706 [a.name for a in optimisations],
702 707 "upgrade-repo.optimisation.performed",
703 708 )
704 709 self.ui.write(b'\n\n')
705 710
706 711 def print_upgrade_actions(self):
707 712 for a in self.upgrade_actions:
708 713 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
709 714
710 715 def print_affected_revlogs(self):
711 716 if not self.revlogs_to_process:
712 717 self.ui.write((b'no revlogs to process\n'))
713 718 else:
714 719 self.ui.write((b'processed revlogs:\n'))
715 720 for r in sorted(self.revlogs_to_process):
716 721 self.ui.write((b' - %s\n' % r))
717 722 self.ui.write((b'\n'))
718 723
719 724 def print_unused_optimizations(self):
720 725 for i in self.unused_optimizations:
721 726 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
722 727
723 728 def has_upgrade_action(self, name):
724 729 """ Check whether the upgrade operation will perform this action """
725 730 return name in self._upgrade_actions_names
726 731
727 732 def print_post_op_messages(self):
728 733 """ print post upgrade operation warning messages """
729 734 for a in self.upgrade_actions:
730 735 if a.postupgrademessage is not None:
731 736 self.ui.warn(b'%s\n' % a.postupgrademessage)
732 737 for a in self.removed_actions:
733 738 if a.postdowngrademessage is not None:
734 739 self.ui.warn(b'%s\n' % a.postdowngrademessage)
735 740
736 741
737 742 ### Code checking if a repository can got through the upgrade process at all. #
738 743
739 744
740 745 def requiredsourcerequirements(repo):
741 746 """Obtain requirements required to be present to upgrade a repo.
742 747
743 748 An upgrade will not be allowed if the repository doesn't have the
744 749 requirements returned by this function.
745 750 """
746 751 return {
747 752 # Introduced in Mercurial 0.9.2.
748 753 b'revlogv1',
749 754 # Introduced in Mercurial 0.9.2.
750 755 b'store',
751 756 }
752 757
753 758
754 759 def blocksourcerequirements(repo):
755 760 """Obtain requirements that will prevent an upgrade from occurring.
756 761
757 762 An upgrade cannot be performed if the source repository contains a
758 763 requirements in the returned set.
759 764 """
760 765 return {
761 766 # The upgrade code does not yet support these experimental features.
762 767 # This is an artificial limitation.
763 768 requirements.TREEMANIFEST_REQUIREMENT,
764 769 # This was a precursor to generaldelta and was never enabled by default.
765 770 # It should (hopefully) not exist in the wild.
766 771 b'parentdelta',
767 772 # Upgrade should operate on the actual store, not the shared link.
768 773 requirements.SHARED_REQUIREMENT,
769 774 }
770 775
771 776
772 777 def check_source_requirements(repo):
773 778 """Ensure that no existing requirements prevent the repository upgrade"""
774 779
775 780 required = requiredsourcerequirements(repo)
776 781 missingreqs = required - repo.requirements
777 782 if missingreqs:
778 783 msg = _(b'cannot upgrade repository; requirement missing: %s')
779 784 missingreqs = b', '.join(sorted(missingreqs))
780 785 raise error.Abort(msg % missingreqs)
781 786
782 787 blocking = blocksourcerequirements(repo)
783 788 blockingreqs = blocking & repo.requirements
784 789 if blockingreqs:
785 790 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
786 791 blockingreqs = b', '.join(sorted(blockingreqs))
787 792 raise error.Abort(m % blockingreqs)
788 793
789 794
790 795 ### Verify the validity of the planned requirement changes ####################
791 796
792 797
793 798 def supportremovedrequirements(repo):
794 799 """Obtain requirements that can be removed during an upgrade.
795 800
796 801 If an upgrade were to create a repository that dropped a requirement,
797 802 the dropped requirement must appear in the returned set for the upgrade
798 803 to be allowed.
799 804 """
800 805 supported = {
801 806 requirements.SPARSEREVLOG_REQUIREMENT,
802 807 requirements.SIDEDATA_REQUIREMENT,
803 808 requirements.COPIESSDC_REQUIREMENT,
804 809 requirements.NODEMAP_REQUIREMENT,
805 810 requirements.SHARESAFE_REQUIREMENT,
806 811 }
807 812 for name in compression.compengines:
808 813 engine = compression.compengines[name]
809 814 if engine.available() and engine.revlogheader():
810 815 supported.add(b'exp-compression-%s' % name)
811 816 if engine.name() == b'zstd':
812 817 supported.add(b'revlog-compression-zstd')
813 818 return supported
814 819
815 820
816 821 def supporteddestrequirements(repo):
817 822 """Obtain requirements that upgrade supports in the destination.
818 823
819 824 If the result of the upgrade would create requirements not in this set,
820 825 the upgrade is disallowed.
821 826
822 827 Extensions should monkeypatch this to add their custom requirements.
823 828 """
824 829 supported = {
825 830 b'dotencode',
826 831 b'fncache',
827 832 b'generaldelta',
828 833 b'revlogv1',
829 834 b'store',
830 835 requirements.SPARSEREVLOG_REQUIREMENT,
831 836 requirements.SIDEDATA_REQUIREMENT,
832 837 requirements.COPIESSDC_REQUIREMENT,
833 838 requirements.NODEMAP_REQUIREMENT,
834 839 requirements.SHARESAFE_REQUIREMENT,
835 840 }
836 841 for name in compression.compengines:
837 842 engine = compression.compengines[name]
838 843 if engine.available() and engine.revlogheader():
839 844 supported.add(b'exp-compression-%s' % name)
840 845 if engine.name() == b'zstd':
841 846 supported.add(b'revlog-compression-zstd')
842 847 return supported
843 848
844 849
845 850 def allowednewrequirements(repo):
846 851 """Obtain requirements that can be added to a repository during upgrade.
847 852
848 853 This is used to disallow proposed requirements from being added when
849 854 they weren't present before.
850 855
851 856 We use a list of allowed requirement additions instead of a list of known
852 857 bad additions because the whitelist approach is safer and will prevent
853 858 future, unknown requirements from accidentally being added.
854 859 """
855 860 supported = {
856 861 b'dotencode',
857 862 b'fncache',
858 863 b'generaldelta',
859 864 requirements.SPARSEREVLOG_REQUIREMENT,
860 865 requirements.SIDEDATA_REQUIREMENT,
861 866 requirements.COPIESSDC_REQUIREMENT,
862 867 requirements.NODEMAP_REQUIREMENT,
863 868 requirements.SHARESAFE_REQUIREMENT,
864 869 }
865 870 for name in compression.compengines:
866 871 engine = compression.compengines[name]
867 872 if engine.available() and engine.revlogheader():
868 873 supported.add(b'exp-compression-%s' % name)
869 874 if engine.name() == b'zstd':
870 875 supported.add(b'revlog-compression-zstd')
871 876 return supported
872 877
873 878
874 879 def check_requirements_changes(repo, new_reqs):
875 880 old_reqs = repo.requirements
876 881
877 882 support_removal = supportremovedrequirements(repo)
878 883 no_remove_reqs = old_reqs - new_reqs - support_removal
879 884 if no_remove_reqs:
880 885 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
881 886 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
882 887 raise error.Abort(msg % no_remove_reqs)
883 888
884 889 support_addition = allowednewrequirements(repo)
885 890 no_add_reqs = new_reqs - old_reqs - support_addition
886 891 if no_add_reqs:
887 892 m = _(b'cannot upgrade repository; do not support adding requirement: ')
888 893 no_add_reqs = b', '.join(sorted(no_add_reqs))
889 894 raise error.Abort(m + no_add_reqs)
890 895
891 896 supported = supporteddestrequirements(repo)
892 897 unsupported_reqs = new_reqs - supported
893 898 if unsupported_reqs:
894 899 msg = _(
895 900 b'cannot upgrade repository; do not support destination '
896 901 b'requirement: %s'
897 902 )
898 903 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
899 904 raise error.Abort(msg % unsupported_reqs)
@@ -1,522 +1,520 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from ..i18n import _
13 13 from ..pycompat import getattr
14 14 from .. import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 manifest,
19 19 metadata,
20 20 pycompat,
21 21 requirements,
22 22 revlog,
23 23 scmutil,
24 24 util,
25 25 vfs as vfsmod,
26 26 )
27 27
28 28
29 29 def _revlogfrompath(repo, path):
30 30 """Obtain a revlog from a repo path.
31 31
32 32 An instance of the appropriate class is returned.
33 33 """
34 34 if path == b'00changelog.i':
35 35 return changelog.changelog(repo.svfs)
36 36 elif path.endswith(b'00manifest.i'):
37 37 mandir = path[: -len(b'00manifest.i')]
38 38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 39 else:
40 40 # reverse of "/".join(("data", path + ".i"))
41 41 return filelog.filelog(repo.svfs, path[5:-2])
42 42
43 43
44 44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 45 """copy all relevant files for `oldrl` into `destrepo` store
46 46
47 47 Files are copied "as is" without any transformation. The copy is performed
48 48 without extra checks. Callers are responsible for making sure the copied
49 49 content is compatible with format of the destination repository.
50 50 """
51 51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 52 newrl = _revlogfrompath(destrepo, unencodedname)
53 53 newrl = getattr(newrl, '_revlog', newrl)
54 54
55 55 oldvfs = oldrl.opener
56 56 newvfs = newrl.opener
57 57 oldindex = oldvfs.join(oldrl.indexfile)
58 58 newindex = newvfs.join(newrl.indexfile)
59 59 olddata = oldvfs.join(oldrl.datafile)
60 60 newdata = newvfs.join(newrl.datafile)
61 61
62 62 with newvfs(newrl.indexfile, b'w'):
63 63 pass # create all the directories
64 64
65 65 util.copyfile(oldindex, newindex)
66 66 copydata = oldrl.opener.exists(oldrl.datafile)
67 67 if copydata:
68 68 util.copyfile(olddata, newdata)
69 69
70 70 if not (
71 71 unencodedname.endswith(b'00changelog.i')
72 72 or unencodedname.endswith(b'00manifest.i')
73 73 ):
74 74 destrepo.svfs.fncache.add(unencodedname)
75 75 if copydata:
76 76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77 77
78 78
79 79 UPGRADE_CHANGELOG = b"changelog"
80 80 UPGRADE_MANIFEST = b"manifest"
81 81 UPGRADE_FILELOGS = b"all-filelogs"
82 82
83 83 UPGRADE_ALL_REVLOGS = frozenset(
84 84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 85 )
86 86
87 87
88 88 def getsidedatacompanion(srcrepo, dstrepo):
89 89 sidedatacompanion = None
90 90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93 93
94 94 def sidedatacompanion(rl, rev):
95 95 rl = getattr(rl, '_revlog', rl)
96 96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 97 return True, (), {}, 0, 0
98 98 return False, (), {}, 0, 0
99 99
100 100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 104 return sidedatacompanion
105 105
106 106
107 107 def matchrevlog(revlogfilter, entry):
108 108 """check if a revlog is selected for cloning.
109 109
110 110 In other words, are there any updates which need to be done on revlog
111 111 or it can be blindly copied.
112 112
113 113 The store entry is checked against the passed filter"""
114 114 if entry.endswith(b'00changelog.i'):
115 115 return UPGRADE_CHANGELOG in revlogfilter
116 116 elif entry.endswith(b'00manifest.i'):
117 117 return UPGRADE_MANIFEST in revlogfilter
118 118 return UPGRADE_FILELOGS in revlogfilter
119 119
120 120
121 121 def _perform_clone(
122 122 ui,
123 123 dstrepo,
124 124 tr,
125 125 old_revlog,
126 126 unencoded,
127 127 upgrade_op,
128 128 sidedatacompanion,
129 129 oncopiedrevision,
130 130 ):
131 131 """ returns the new revlog object created"""
132 132 newrl = None
133 133 if matchrevlog(upgrade_op.revlogs_to_process, unencoded):
134 134 ui.note(
135 135 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
136 136 )
137 137 newrl = _revlogfrompath(dstrepo, unencoded)
138 138 old_revlog.clone(
139 139 tr,
140 140 newrl,
141 141 addrevisioncb=oncopiedrevision,
142 142 deltareuse=upgrade_op.delta_reuse_mode,
143 forcedeltabothparents=upgrade_op.has_upgrade_action(
144 b're-delta-multibase'
145 ),
143 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
146 144 sidedatacompanion=sidedatacompanion,
147 145 )
148 146 else:
149 147 msg = _(b'blindly copying %s containing %i revisions\n')
150 148 ui.note(msg % (unencoded, len(old_revlog)))
151 149 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
152 150
153 151 newrl = _revlogfrompath(dstrepo, unencoded)
154 152 return newrl
155 153
156 154
157 155 def _clonerevlogs(
158 156 ui,
159 157 srcrepo,
160 158 dstrepo,
161 159 tr,
162 160 upgrade_op,
163 161 ):
164 162 """Copy revlogs between 2 repos."""
165 163 revcount = 0
166 164 srcsize = 0
167 165 srcrawsize = 0
168 166 dstsize = 0
169 167 fcount = 0
170 168 frevcount = 0
171 169 fsrcsize = 0
172 170 frawsize = 0
173 171 fdstsize = 0
174 172 mcount = 0
175 173 mrevcount = 0
176 174 msrcsize = 0
177 175 mrawsize = 0
178 176 mdstsize = 0
179 177 crevcount = 0
180 178 csrcsize = 0
181 179 crawsize = 0
182 180 cdstsize = 0
183 181
184 182 alldatafiles = list(srcrepo.store.walk())
185 183 # mapping of data files which needs to be cloned
186 184 # key is unencoded filename
187 185 # value is revlog_object_from_srcrepo
188 186 manifests = {}
189 187 changelogs = {}
190 188 filelogs = {}
191 189
192 190 # Perform a pass to collect metadata. This validates we can open all
193 191 # source files and allows a unified progress bar to be displayed.
194 192 for unencoded, encoded, size in alldatafiles:
195 193 if unencoded.endswith(b'.d'):
196 194 continue
197 195
198 196 rl = _revlogfrompath(srcrepo, unencoded)
199 197
200 198 info = rl.storageinfo(
201 199 exclusivefiles=True,
202 200 revisionscount=True,
203 201 trackedsize=True,
204 202 storedsize=True,
205 203 )
206 204
207 205 revcount += info[b'revisionscount'] or 0
208 206 datasize = info[b'storedsize'] or 0
209 207 rawsize = info[b'trackedsize'] or 0
210 208
211 209 srcsize += datasize
212 210 srcrawsize += rawsize
213 211
214 212 # This is for the separate progress bars.
215 213 if isinstance(rl, changelog.changelog):
216 214 changelogs[unencoded] = rl
217 215 crevcount += len(rl)
218 216 csrcsize += datasize
219 217 crawsize += rawsize
220 218 elif isinstance(rl, manifest.manifestrevlog):
221 219 manifests[unencoded] = rl
222 220 mcount += 1
223 221 mrevcount += len(rl)
224 222 msrcsize += datasize
225 223 mrawsize += rawsize
226 224 elif isinstance(rl, filelog.filelog):
227 225 filelogs[unencoded] = rl
228 226 fcount += 1
229 227 frevcount += len(rl)
230 228 fsrcsize += datasize
231 229 frawsize += rawsize
232 230 else:
233 231 error.ProgrammingError(b'unknown revlog type')
234 232
235 233 if not revcount:
236 234 return
237 235
238 236 ui.status(
239 237 _(
240 238 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
241 239 b'%d in changelog)\n'
242 240 )
243 241 % (revcount, frevcount, mrevcount, crevcount)
244 242 )
245 243 ui.status(
246 244 _(b'migrating %s in store; %s tracked data\n')
247 245 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
248 246 )
249 247
250 248 # Used to keep track of progress.
251 249 progress = None
252 250
253 251 def oncopiedrevision(rl, rev, node):
254 252 progress.increment()
255 253
256 254 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
257 255
258 256 # Migrating filelogs
259 257 ui.status(
260 258 _(
261 259 b'migrating %d filelogs containing %d revisions '
262 260 b'(%s in store; %s tracked data)\n'
263 261 )
264 262 % (
265 263 fcount,
266 264 frevcount,
267 265 util.bytecount(fsrcsize),
268 266 util.bytecount(frawsize),
269 267 )
270 268 )
271 269 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
272 270 for unencoded, oldrl in sorted(filelogs.items()):
273 271 newrl = _perform_clone(
274 272 ui,
275 273 dstrepo,
276 274 tr,
277 275 oldrl,
278 276 unencoded,
279 277 upgrade_op,
280 278 sidedatacompanion,
281 279 oncopiedrevision,
282 280 )
283 281 info = newrl.storageinfo(storedsize=True)
284 282 fdstsize += info[b'storedsize'] or 0
285 283 ui.status(
286 284 _(
287 285 b'finished migrating %d filelog revisions across %d '
288 286 b'filelogs; change in size: %s\n'
289 287 )
290 288 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
291 289 )
292 290
293 291 # Migrating manifests
294 292 ui.status(
295 293 _(
296 294 b'migrating %d manifests containing %d revisions '
297 295 b'(%s in store; %s tracked data)\n'
298 296 )
299 297 % (
300 298 mcount,
301 299 mrevcount,
302 300 util.bytecount(msrcsize),
303 301 util.bytecount(mrawsize),
304 302 )
305 303 )
306 304 if progress:
307 305 progress.complete()
308 306 progress = srcrepo.ui.makeprogress(
309 307 _(b'manifest revisions'), total=mrevcount
310 308 )
311 309 for unencoded, oldrl in sorted(manifests.items()):
312 310 newrl = _perform_clone(
313 311 ui,
314 312 dstrepo,
315 313 tr,
316 314 oldrl,
317 315 unencoded,
318 316 upgrade_op,
319 317 sidedatacompanion,
320 318 oncopiedrevision,
321 319 )
322 320 info = newrl.storageinfo(storedsize=True)
323 321 mdstsize += info[b'storedsize'] or 0
324 322 ui.status(
325 323 _(
326 324 b'finished migrating %d manifest revisions across %d '
327 325 b'manifests; change in size: %s\n'
328 326 )
329 327 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
330 328 )
331 329
332 330 # Migrating changelog
333 331 ui.status(
334 332 _(
335 333 b'migrating changelog containing %d revisions '
336 334 b'(%s in store; %s tracked data)\n'
337 335 )
338 336 % (
339 337 crevcount,
340 338 util.bytecount(csrcsize),
341 339 util.bytecount(crawsize),
342 340 )
343 341 )
344 342 if progress:
345 343 progress.complete()
346 344 progress = srcrepo.ui.makeprogress(
347 345 _(b'changelog revisions'), total=crevcount
348 346 )
349 347 for unencoded, oldrl in sorted(changelogs.items()):
350 348 newrl = _perform_clone(
351 349 ui,
352 350 dstrepo,
353 351 tr,
354 352 oldrl,
355 353 unencoded,
356 354 upgrade_op,
357 355 sidedatacompanion,
358 356 oncopiedrevision,
359 357 )
360 358 info = newrl.storageinfo(storedsize=True)
361 359 cdstsize += info[b'storedsize'] or 0
362 360 progress.complete()
363 361 ui.status(
364 362 _(
365 363 b'finished migrating %d changelog revisions; change in size: '
366 364 b'%s\n'
367 365 )
368 366 % (crevcount, util.bytecount(cdstsize - csrcsize))
369 367 )
370 368
371 369 dstsize = fdstsize + mdstsize + cdstsize
372 370 ui.status(
373 371 _(
374 372 b'finished migrating %d total revisions; total change in store '
375 373 b'size: %s\n'
376 374 )
377 375 % (revcount, util.bytecount(dstsize - srcsize))
378 376 )
379 377
380 378
381 379 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
382 380 """Determine whether to copy a store file during upgrade.
383 381
384 382 This function is called when migrating store files from ``srcrepo`` to
385 383 ``dstrepo`` as part of upgrading a repository.
386 384
387 385 Args:
388 386 srcrepo: repo we are copying from
389 387 dstrepo: repo we are copying to
390 388 requirements: set of requirements for ``dstrepo``
391 389 path: store file being examined
392 390 mode: the ``ST_MODE`` file type of ``path``
393 391 st: ``stat`` data structure for ``path``
394 392
395 393 Function should return ``True`` if the file is to be copied.
396 394 """
397 395 # Skip revlogs.
398 396 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
399 397 return False
400 398 # Skip transaction related files.
401 399 if path.startswith(b'undo'):
402 400 return False
403 401 # Only copy regular files.
404 402 if mode != stat.S_IFREG:
405 403 return False
406 404 # Skip other skipped files.
407 405 if path in (b'lock', b'fncache'):
408 406 return False
409 407
410 408 return True
411 409
412 410
413 411 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
414 412 """Hook point for extensions to perform additional actions during upgrade.
415 413
416 414 This function is called after revlogs and store files have been copied but
417 415 before the new store is swapped into the original location.
418 416 """
419 417
420 418
421 419 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
422 420 """Do the low-level work of upgrading a repository.
423 421
424 422 The upgrade is effectively performed as a copy between a source
425 423 repository and a temporary destination repository.
426 424
427 425 The source repository is unmodified for as long as possible so the
428 426 upgrade can abort at any time without causing loss of service for
429 427 readers and without corrupting the source repository.
430 428 """
431 429 assert srcrepo.currentwlock()
432 430 assert dstrepo.currentwlock()
433 431
434 432 ui.status(
435 433 _(
436 434 b'(it is safe to interrupt this process any time before '
437 435 b'data migration completes)\n'
438 436 )
439 437 )
440 438
441 439 with dstrepo.transaction(b'upgrade') as tr:
442 440 _clonerevlogs(
443 441 ui,
444 442 srcrepo,
445 443 dstrepo,
446 444 tr,
447 445 upgrade_op,
448 446 )
449 447
450 448 # Now copy other files in the store directory.
451 449 # The sorted() makes execution deterministic.
452 450 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
453 451 if not _filterstorefile(
454 452 srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st
455 453 ):
456 454 continue
457 455
458 456 srcrepo.ui.status(_(b'copying %s\n') % p)
459 457 src = srcrepo.store.rawvfs.join(p)
460 458 dst = dstrepo.store.rawvfs.join(p)
461 459 util.copyfile(src, dst, copystat=True)
462 460
463 461 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
464 462
465 463 ui.status(_(b'data fully migrated to temporary repository\n'))
466 464
467 465 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
468 466 backupvfs = vfsmod.vfs(backuppath)
469 467
470 468 # Make a backup of requires file first, as it is the first to be modified.
471 469 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
472 470
473 471 # We install an arbitrary requirement that clients must not support
474 472 # as a mechanism to lock out new clients during the data swap. This is
475 473 # better than allowing a client to continue while the repository is in
476 474 # an inconsistent state.
477 475 ui.status(
478 476 _(
479 477 b'marking source repository as being upgraded; clients will be '
480 478 b'unable to read from repository\n'
481 479 )
482 480 )
483 481 scmutil.writereporequirements(
484 482 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
485 483 )
486 484
487 485 ui.status(_(b'starting in-place swap of repository data\n'))
488 486 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
489 487
490 488 # Now swap in the new store directory. Doing it as a rename should make
491 489 # the operation nearly instantaneous and atomic (at least in well-behaved
492 490 # environments).
493 491 ui.status(_(b'replacing store...\n'))
494 492 tstart = util.timer()
495 493 util.rename(srcrepo.spath, backupvfs.join(b'store'))
496 494 util.rename(dstrepo.spath, srcrepo.spath)
497 495 elapsed = util.timer() - tstart
498 496 ui.status(
499 497 _(
500 498 b'store replacement complete; repository was inconsistent for '
501 499 b'%0.1fs\n'
502 500 )
503 501 % elapsed
504 502 )
505 503
506 504 # We first write the requirements file. Any new requirements will lock
507 505 # out legacy clients.
508 506 ui.status(
509 507 _(
510 508 b'finalizing requirements file and making repository readable '
511 509 b'again\n'
512 510 )
513 511 )
514 512 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
515 513
516 514 # The lock file from the old store won't be removed because nothing has a
517 515 # reference to its new location. So clean it up manually. Alternatively, we
518 516 # could update srcrepo.svfs and other variables to point to the new
519 517 # location. This is simpler.
520 518 backupvfs.unlink(b'store/lock')
521 519
522 520 return backuppath
General Comments 0
You need to be logged in to leave comments. Login now