##// END OF EJS Templates
actions: store deltareuse mode of whole operation in UpgradeOperation...
Pulkit Goyal -
r46832:82f3ee1a default
parent child Browse files
Show More
@@ -1,887 +1,899 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from ..i18n import _
11 11 from .. import (
12 12 error,
13 13 localrepo,
14 14 requirements,
15 revlog,
15 16 util,
16 17 )
17 18
18 19 from ..utils import compression
19 20
20 21 # list of requirements that request a clone of all revlog if added/removed
21 22 RECLONES_REQUIREMENTS = {
22 23 b'generaldelta',
23 24 requirements.SPARSEREVLOG_REQUIREMENT,
24 25 }
25 26
26 27
27 28 def preservedrequirements(repo):
28 29 return set()
29 30
30 31
31 32 FORMAT_VARIANT = b'deficiency'
32 33 OPTIMISATION = b'optimization'
33 34
34 35
35 36 class improvement(object):
36 37 """Represents an improvement that can be made as part of an upgrade.
37 38
38 39 The following attributes are defined on each instance:
39 40
40 41 name
41 42 Machine-readable string uniquely identifying this improvement. It
42 43 will be mapped to an action later in the upgrade process.
43 44
44 45 type
45 46 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
46 47 A format variant is where we change the storage format. Not all format
47 48 variant changes are an obvious problem.
48 49 An optimization is an action (sometimes optional) that
49 50 can be taken to further improve the state of the repository.
50 51
51 52 description
52 53 Message intended for humans explaining the improvement in more detail,
53 54 including the implications of it. For ``FORMAT_VARIANT`` types, should be
54 55 worded in the present tense. For ``OPTIMISATION`` types, should be
55 56 worded in the future tense.
56 57
57 58 upgrademessage
58 59 Message intended for humans explaining what an upgrade addressing this
59 60 issue will do. Should be worded in the future tense.
60 61
61 62 postupgrademessage
62 63 Message intended for humans which will be shown post an upgrade
63 64 operation when the improvement will be added
64 65
65 66 postdowngrademessage
66 67 Message intended for humans which will be shown post an upgrade
67 68 operation in which this improvement was removed
68 69 """
69 70
70 71 def __init__(self, name, type, description, upgrademessage):
71 72 self.name = name
72 73 self.type = type
73 74 self.description = description
74 75 self.upgrademessage = upgrademessage
75 76 self.postupgrademessage = None
76 77 self.postdowngrademessage = None
77 78
78 79 def __eq__(self, other):
79 80 if not isinstance(other, improvement):
80 81 # This is what python tell use to do
81 82 return NotImplemented
82 83 return self.name == other.name
83 84
84 85 def __ne__(self, other):
85 86 return not (self == other)
86 87
87 88 def __hash__(self):
88 89 return hash(self.name)
89 90
90 91
91 92 allformatvariant = []
92 93
93 94
94 95 def registerformatvariant(cls):
95 96 allformatvariant.append(cls)
96 97 return cls
97 98
98 99
99 100 class formatvariant(improvement):
100 101 """an improvement subclass dedicated to repository format"""
101 102
102 103 type = FORMAT_VARIANT
103 104 ### The following attributes should be defined for each class:
104 105
105 106 # machine-readable string uniquely identifying this improvement. it will be
106 107 # mapped to an action later in the upgrade process.
107 108 name = None
108 109
109 110 # message intended for humans explaining the improvement in more detail,
110 111 # including the implications of it ``FORMAT_VARIANT`` types, should be
111 112 # worded
112 113 # in the present tense.
113 114 description = None
114 115
115 116 # message intended for humans explaining what an upgrade addressing this
116 117 # issue will do. should be worded in the future tense.
117 118 upgrademessage = None
118 119
119 120 # value of current Mercurial default for new repository
120 121 default = None
121 122
122 123 # Message intended for humans which will be shown post an upgrade
123 124 # operation when the improvement will be added
124 125 postupgrademessage = None
125 126
126 127 # Message intended for humans which will be shown post an upgrade
127 128 # operation in which this improvement was removed
128 129 postdowngrademessage = None
129 130
130 131 def __init__(self):
131 132 raise NotImplementedError()
132 133
133 134 @staticmethod
134 135 def fromrepo(repo):
135 136 """current value of the variant in the repository"""
136 137 raise NotImplementedError()
137 138
138 139 @staticmethod
139 140 def fromconfig(repo):
140 141 """current value of the variant in the configuration"""
141 142 raise NotImplementedError()
142 143
143 144
144 145 class requirementformatvariant(formatvariant):
145 146 """formatvariant based on a 'requirement' name.
146 147
147 148 Many format variant are controlled by a 'requirement'. We define a small
148 149 subclass to factor the code.
149 150 """
150 151
151 152 # the requirement that control this format variant
152 153 _requirement = None
153 154
154 155 @staticmethod
155 156 def _newreporequirements(ui):
156 157 return localrepo.newreporequirements(
157 158 ui, localrepo.defaultcreateopts(ui)
158 159 )
159 160
160 161 @classmethod
161 162 def fromrepo(cls, repo):
162 163 assert cls._requirement is not None
163 164 return cls._requirement in repo.requirements
164 165
165 166 @classmethod
166 167 def fromconfig(cls, repo):
167 168 assert cls._requirement is not None
168 169 return cls._requirement in cls._newreporequirements(repo.ui)
169 170
170 171
171 172 @registerformatvariant
172 173 class fncache(requirementformatvariant):
173 174 name = b'fncache'
174 175
175 176 _requirement = b'fncache'
176 177
177 178 default = True
178 179
179 180 description = _(
180 181 b'long and reserved filenames may not work correctly; '
181 182 b'repository performance is sub-optimal'
182 183 )
183 184
184 185 upgrademessage = _(
185 186 b'repository will be more resilient to storing '
186 187 b'certain paths and performance of certain '
187 188 b'operations should be improved'
188 189 )
189 190
190 191
191 192 @registerformatvariant
192 193 class dotencode(requirementformatvariant):
193 194 name = b'dotencode'
194 195
195 196 _requirement = b'dotencode'
196 197
197 198 default = True
198 199
199 200 description = _(
200 201 b'storage of filenames beginning with a period or '
201 202 b'space may not work correctly'
202 203 )
203 204
204 205 upgrademessage = _(
205 206 b'repository will be better able to store files '
206 207 b'beginning with a space or period'
207 208 )
208 209
209 210
210 211 @registerformatvariant
211 212 class generaldelta(requirementformatvariant):
212 213 name = b'generaldelta'
213 214
214 215 _requirement = b'generaldelta'
215 216
216 217 default = True
217 218
218 219 description = _(
219 220 b'deltas within internal storage are unable to '
220 221 b'choose optimal revisions; repository is larger and '
221 222 b'slower than it could be; interaction with other '
222 223 b'repositories may require extra network and CPU '
223 224 b'resources, making "hg push" and "hg pull" slower'
224 225 )
225 226
226 227 upgrademessage = _(
227 228 b'repository storage will be able to create '
228 229 b'optimal deltas; new repository data will be '
229 230 b'smaller and read times should decrease; '
230 231 b'interacting with other repositories using this '
231 232 b'storage model should require less network and '
232 233 b'CPU resources, making "hg push" and "hg pull" '
233 234 b'faster'
234 235 )
235 236
236 237
237 238 @registerformatvariant
238 239 class sharesafe(requirementformatvariant):
239 240 name = b'exp-sharesafe'
240 241 _requirement = requirements.SHARESAFE_REQUIREMENT
241 242
242 243 default = False
243 244
244 245 description = _(
245 246 b'old shared repositories do not share source repository '
246 247 b'requirements and config. This leads to various problems '
247 248 b'when the source repository format is upgraded or some new '
248 249 b'extensions are enabled.'
249 250 )
250 251
251 252 upgrademessage = _(
252 253 b'Upgrades a repository to share-safe format so that future '
253 254 b'shares of this repository share its requirements and configs.'
254 255 )
255 256
256 257 postdowngrademessage = _(
257 258 b'repository downgraded to not use share safe mode, '
258 259 b'existing shares will not work and needs to'
259 260 b' be reshared.'
260 261 )
261 262
262 263 postupgrademessage = _(
263 264 b'repository upgraded to share safe mode, existing'
264 265 b' shares will still work in old non-safe mode. '
265 266 b'Re-share existing shares to use them in safe mode'
266 267 b' New shares will be created in safe mode.'
267 268 )
268 269
269 270
270 271 @registerformatvariant
271 272 class sparserevlog(requirementformatvariant):
272 273 name = b'sparserevlog'
273 274
274 275 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
275 276
276 277 default = True
277 278
278 279 description = _(
279 280 b'in order to limit disk reading and memory usage on older '
280 281 b'version, the span of a delta chain from its root to its '
281 282 b'end is limited, whatever the relevant data in this span. '
282 283 b'This can severly limit Mercurial ability to build good '
283 284 b'chain of delta resulting is much more storage space being '
284 285 b'taken and limit reusability of on disk delta during '
285 286 b'exchange.'
286 287 )
287 288
288 289 upgrademessage = _(
289 290 b'Revlog supports delta chain with more unused data '
290 291 b'between payload. These gaps will be skipped at read '
291 292 b'time. This allows for better delta chains, making a '
292 293 b'better compression and faster exchange with server.'
293 294 )
294 295
295 296
296 297 @registerformatvariant
297 298 class sidedata(requirementformatvariant):
298 299 name = b'sidedata'
299 300
300 301 _requirement = requirements.SIDEDATA_REQUIREMENT
301 302
302 303 default = False
303 304
304 305 description = _(
305 306 b'Allows storage of extra data alongside a revision, '
306 307 b'unlocking various caching options.'
307 308 )
308 309
309 310 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
310 311
311 312
312 313 @registerformatvariant
313 314 class persistentnodemap(requirementformatvariant):
314 315 name = b'persistent-nodemap'
315 316
316 317 _requirement = requirements.NODEMAP_REQUIREMENT
317 318
318 319 default = False
319 320
320 321 description = _(
321 322 b'persist the node -> rev mapping on disk to speedup lookup'
322 323 )
323 324
324 325 upgrademessage = _(b'Speedup revision lookup by node id.')
325 326
326 327
327 328 @registerformatvariant
328 329 class copiessdc(requirementformatvariant):
329 330 name = b'copies-sdc'
330 331
331 332 _requirement = requirements.COPIESSDC_REQUIREMENT
332 333
333 334 default = False
334 335
335 336 description = _(b'Stores copies information alongside changesets.')
336 337
337 338 upgrademessage = _(
338 339 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
339 340 )
340 341
341 342
342 343 @registerformatvariant
343 344 class removecldeltachain(formatvariant):
344 345 name = b'plain-cl-delta'
345 346
346 347 default = True
347 348
348 349 description = _(
349 350 b'changelog storage is using deltas instead of '
350 351 b'raw entries; changelog reading and any '
351 352 b'operation relying on changelog data are slower '
352 353 b'than they could be'
353 354 )
354 355
355 356 upgrademessage = _(
356 357 b'changelog storage will be reformated to '
357 358 b'store raw entries; changelog reading will be '
358 359 b'faster; changelog size may be reduced'
359 360 )
360 361
361 362 @staticmethod
362 363 def fromrepo(repo):
363 364 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
364 365 # changelogs with deltas.
365 366 cl = repo.changelog
366 367 chainbase = cl.chainbase
367 368 return all(rev == chainbase(rev) for rev in cl)
368 369
369 370 @staticmethod
370 371 def fromconfig(repo):
371 372 return True
372 373
373 374
374 375 @registerformatvariant
375 376 class compressionengine(formatvariant):
376 377 name = b'compression'
377 378 default = b'zlib'
378 379
379 380 description = _(
380 381 b'Compresion algorithm used to compress data. '
381 382 b'Some engine are faster than other'
382 383 )
383 384
384 385 upgrademessage = _(
385 386 b'revlog content will be recompressed with the new algorithm.'
386 387 )
387 388
388 389 @classmethod
389 390 def fromrepo(cls, repo):
390 391 # we allow multiple compression engine requirement to co-exist because
391 392 # strickly speaking, revlog seems to support mixed compression style.
392 393 #
393 394 # The compression used for new entries will be "the last one"
394 395 compression = b'zlib'
395 396 for req in repo.requirements:
396 397 prefix = req.startswith
397 398 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
398 399 compression = req.split(b'-', 2)[2]
399 400 return compression
400 401
401 402 @classmethod
402 403 def fromconfig(cls, repo):
403 404 compengines = repo.ui.configlist(b'format', b'revlog-compression')
404 405 # return the first valid value as the selection code would do
405 406 for comp in compengines:
406 407 if comp in util.compengines:
407 408 return comp
408 409
409 410 # no valide compression found lets display it all for clarity
410 411 return b','.join(compengines)
411 412
412 413
413 414 @registerformatvariant
414 415 class compressionlevel(formatvariant):
415 416 name = b'compression-level'
416 417 default = b'default'
417 418
418 419 description = _(b'compression level')
419 420
420 421 upgrademessage = _(b'revlog content will be recompressed')
421 422
422 423 @classmethod
423 424 def fromrepo(cls, repo):
424 425 comp = compressionengine.fromrepo(repo)
425 426 level = None
426 427 if comp == b'zlib':
427 428 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
428 429 elif comp == b'zstd':
429 430 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
430 431 if level is None:
431 432 return b'default'
432 433 return bytes(level)
433 434
434 435 @classmethod
435 436 def fromconfig(cls, repo):
436 437 comp = compressionengine.fromconfig(repo)
437 438 level = None
438 439 if comp == b'zlib':
439 440 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
440 441 elif comp == b'zstd':
441 442 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
442 443 if level is None:
443 444 return b'default'
444 445 return bytes(level)
445 446
446 447
447 448 def find_format_upgrades(repo):
448 449 """returns a list of format upgrades which can be perform on the repo"""
449 450 upgrades = []
450 451
451 452 # We could detect lack of revlogv1 and store here, but they were added
452 453 # in 0.9.2 and we don't support upgrading repos without these
453 454 # requirements, so let's not bother.
454 455
455 456 for fv in allformatvariant:
456 457 if not fv.fromrepo(repo):
457 458 upgrades.append(fv)
458 459
459 460 return upgrades
460 461
461 462
462 463 def find_format_downgrades(repo):
463 464 """returns a list of format downgrades which will be performed on the repo
464 465 because of disabled config option for them"""
465 466
466 467 downgrades = []
467 468
468 469 for fv in allformatvariant:
469 470 # format variant exist in repo but does not exist in new repository
470 471 # config
471 472 if fv.fromrepo(repo) and not fv.fromconfig(repo):
472 473 downgrades.append(fv)
473 474
474 475 return downgrades
475 476
476 477
477 478 ALL_OPTIMISATIONS = []
478 479
479 480
480 481 def register_optimization(obj):
481 482 ALL_OPTIMISATIONS.append(obj)
482 483 return obj
483 484
484 485
485 486 register_optimization(
486 487 improvement(
487 488 name=b're-delta-parent',
488 489 type=OPTIMISATION,
489 490 description=_(
490 491 b'deltas within internal storage will be recalculated to '
491 492 b'choose an optimal base revision where this was not '
492 493 b'already done; the size of the repository may shrink and '
493 494 b'various operations may become faster; the first time '
494 495 b'this optimization is performed could slow down upgrade '
495 496 b'execution considerably; subsequent invocations should '
496 497 b'not run noticeably slower'
497 498 ),
498 499 upgrademessage=_(
499 500 b'deltas within internal storage will choose a new '
500 501 b'base revision if needed'
501 502 ),
502 503 )
503 504 )
504 505
505 506 register_optimization(
506 507 improvement(
507 508 name=b're-delta-multibase',
508 509 type=OPTIMISATION,
509 510 description=_(
510 511 b'deltas within internal storage will be recalculated '
511 512 b'against multiple base revision and the smallest '
512 513 b'difference will be used; the size of the repository may '
513 514 b'shrink significantly when there are many merges; this '
514 515 b'optimization will slow down execution in proportion to '
515 516 b'the number of merges in the repository and the amount '
516 517 b'of files in the repository; this slow down should not '
517 518 b'be significant unless there are tens of thousands of '
518 519 b'files and thousands of merges'
519 520 ),
520 521 upgrademessage=_(
521 522 b'deltas within internal storage will choose an '
522 523 b'optimal delta by computing deltas against multiple '
523 524 b'parents; may slow down execution time '
524 525 b'significantly'
525 526 ),
526 527 )
527 528 )
528 529
529 530 register_optimization(
530 531 improvement(
531 532 name=b're-delta-all',
532 533 type=OPTIMISATION,
533 534 description=_(
534 535 b'deltas within internal storage will always be '
535 536 b'recalculated without reusing prior deltas; this will '
536 537 b'likely make execution run several times slower; this '
537 538 b'optimization is typically not needed'
538 539 ),
539 540 upgrademessage=_(
540 541 b'deltas within internal storage will be fully '
541 542 b'recomputed; this will likely drastically slow down '
542 543 b'execution time'
543 544 ),
544 545 )
545 546 )
546 547
547 548 register_optimization(
548 549 improvement(
549 550 name=b're-delta-fulladd',
550 551 type=OPTIMISATION,
551 552 description=_(
552 553 b'every revision will be re-added as if it was new '
553 554 b'content. It will go through the full storage '
554 555 b'mechanism giving extensions a chance to process it '
555 556 b'(eg. lfs). This is similar to "re-delta-all" but even '
556 557 b'slower since more logic is involved.'
557 558 ),
558 559 upgrademessage=_(
559 560 b'each revision will be added as new content to the '
560 561 b'internal storage; this will likely drastically slow '
561 562 b'down execution time, but some extensions might need '
562 563 b'it'
563 564 ),
564 565 )
565 566 )
566 567
567 568
568 569 def findoptimizations(repo):
569 570 """Determine optimisation that could be used during upgrade"""
570 571 # These are unconditionally added. There is logic later that figures out
571 572 # which ones to apply.
572 573 return list(ALL_OPTIMISATIONS)
573 574
574 575
575 576 def determine_upgrade_actions(
576 577 repo, format_upgrades, optimizations, sourcereqs, destreqs
577 578 ):
578 579 """Determine upgrade actions that will be performed.
579 580
580 581 Given a list of improvements as returned by ``find_format_upgrades`` and
581 582 ``findoptimizations``, determine the list of upgrade actions that
582 583 will be performed.
583 584
584 585 The role of this function is to filter improvements if needed, apply
585 586 recommended optimizations from the improvements list that make sense,
586 587 etc.
587 588
588 589 Returns a list of action names.
589 590 """
590 591 newactions = []
591 592
592 593 for d in format_upgrades:
593 594 name = d._requirement
594 595
595 596 # If the action is a requirement that doesn't show up in the
596 597 # destination requirements, prune the action.
597 598 if name is not None and name not in destreqs:
598 599 continue
599 600
600 601 newactions.append(d)
601 602
602 603 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
603 604
604 605 # FUTURE consider adding some optimizations here for certain transitions.
605 606 # e.g. adding generaldelta could schedule parent redeltas.
606 607
607 608 return newactions
608 609
609 610
610 611 class UpgradeOperation(object):
611 612 """represent the work to be done during an upgrade"""
612 613
613 614 def __init__(
614 615 self,
615 616 ui,
616 617 new_requirements,
617 618 current_requirements,
618 619 upgrade_actions,
619 620 removed_actions,
620 621 revlogs_to_process,
621 622 ):
622 623 self.ui = ui
623 624 self.new_requirements = new_requirements
624 625 self.current_requirements = current_requirements
625 626 # list of upgrade actions the operation will perform
626 627 self.upgrade_actions = upgrade_actions
627 628 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
628 629 self.removed_actions = removed_actions
629 630 self.revlogs_to_process = revlogs_to_process
630 631 # requirements which will be added by the operation
631 632 self._added_requirements = (
632 633 self.new_requirements - self.current_requirements
633 634 )
634 635 # requirements which will be removed by the operation
635 636 self._removed_requirements = (
636 637 self.current_requirements - self.new_requirements
637 638 )
638 639 # requirements which will be preserved by the operation
639 640 self._preserved_requirements = (
640 641 self.current_requirements & self.new_requirements
641 642 )
642 643 # optimizations which are not used and it's recommended that they
643 644 # should use them
644 645 all_optimizations = findoptimizations(None)
645 646 self.unused_optimizations = [
646 647 i for i in all_optimizations if i not in self.upgrade_actions
647 648 ]
648 649
650 # delta reuse mode of this upgrade operation
651 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
652 if b're-delta-all' in self._upgrade_actions_names:
653 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
654 elif b're-delta-parent' in self._upgrade_actions_names:
655 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
656 elif b're-delta-multibase' in self._upgrade_actions_names:
657 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
658 elif b're-delta-fulladd' in self._upgrade_actions_names:
659 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
660
649 661 def _write_labeled(self, l, label):
650 662 """
651 663 Utility function to aid writing of a list under one label
652 664 """
653 665 first = True
654 666 for r in sorted(l):
655 667 if not first:
656 668 self.ui.write(b', ')
657 669 self.ui.write(r, label=label)
658 670 first = False
659 671
660 672 def print_requirements(self):
661 673 self.ui.write(_(b'requirements\n'))
662 674 self.ui.write(_(b' preserved: '))
663 675 self._write_labeled(
664 676 self._preserved_requirements, "upgrade-repo.requirement.preserved"
665 677 )
666 678 self.ui.write((b'\n'))
667 679 if self._removed_requirements:
668 680 self.ui.write(_(b' removed: '))
669 681 self._write_labeled(
670 682 self._removed_requirements, "upgrade-repo.requirement.removed"
671 683 )
672 684 self.ui.write((b'\n'))
673 685 if self._added_requirements:
674 686 self.ui.write(_(b' added: '))
675 687 self._write_labeled(
676 688 self._added_requirements, "upgrade-repo.requirement.added"
677 689 )
678 690 self.ui.write((b'\n'))
679 691 self.ui.write(b'\n')
680 692
681 693 def print_optimisations(self):
682 694 optimisations = [
683 695 a for a in self.upgrade_actions if a.type == OPTIMISATION
684 696 ]
685 697 optimisations.sort(key=lambda a: a.name)
686 698 if optimisations:
687 699 self.ui.write(_(b'optimisations: '))
688 700 self._write_labeled(
689 701 [a.name for a in optimisations],
690 702 "upgrade-repo.optimisation.performed",
691 703 )
692 704 self.ui.write(b'\n\n')
693 705
694 706 def print_upgrade_actions(self):
695 707 for a in self.upgrade_actions:
696 708 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
697 709
698 710 def print_affected_revlogs(self):
699 711 if not self.revlogs_to_process:
700 712 self.ui.write((b'no revlogs to process\n'))
701 713 else:
702 714 self.ui.write((b'processed revlogs:\n'))
703 715 for r in sorted(self.revlogs_to_process):
704 716 self.ui.write((b' - %s\n' % r))
705 717 self.ui.write((b'\n'))
706 718
707 719 def print_unused_optimizations(self):
708 720 for i in self.unused_optimizations:
709 721 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
710 722
711 723 def has_upgrade_action(self, name):
712 724 """ Check whether the upgrade operation will perform this action """
713 725 return name in self._upgrade_actions_names
714 726
715 727 def print_post_op_messages(self):
716 728 """ print post upgrade operation warning messages """
717 729 for a in self.upgrade_actions:
718 730 if a.postupgrademessage is not None:
719 731 self.ui.warn(b'%s\n' % a.postupgrademessage)
720 732 for a in self.removed_actions:
721 733 if a.postdowngrademessage is not None:
722 734 self.ui.warn(b'%s\n' % a.postdowngrademessage)
723 735
724 736
725 737 ### Code checking if a repository can got through the upgrade process at all. #
726 738
727 739
728 740 def requiredsourcerequirements(repo):
729 741 """Obtain requirements required to be present to upgrade a repo.
730 742
731 743 An upgrade will not be allowed if the repository doesn't have the
732 744 requirements returned by this function.
733 745 """
734 746 return {
735 747 # Introduced in Mercurial 0.9.2.
736 748 b'revlogv1',
737 749 # Introduced in Mercurial 0.9.2.
738 750 b'store',
739 751 }
740 752
741 753
742 754 def blocksourcerequirements(repo):
743 755 """Obtain requirements that will prevent an upgrade from occurring.
744 756
745 757 An upgrade cannot be performed if the source repository contains a
746 758 requirements in the returned set.
747 759 """
748 760 return {
749 761 # The upgrade code does not yet support these experimental features.
750 762 # This is an artificial limitation.
751 763 requirements.TREEMANIFEST_REQUIREMENT,
752 764 # This was a precursor to generaldelta and was never enabled by default.
753 765 # It should (hopefully) not exist in the wild.
754 766 b'parentdelta',
755 767 # Upgrade should operate on the actual store, not the shared link.
756 768 requirements.SHARED_REQUIREMENT,
757 769 }
758 770
759 771
760 772 def check_source_requirements(repo):
761 773 """Ensure that no existing requirements prevent the repository upgrade"""
762 774
763 775 required = requiredsourcerequirements(repo)
764 776 missingreqs = required - repo.requirements
765 777 if missingreqs:
766 778 msg = _(b'cannot upgrade repository; requirement missing: %s')
767 779 missingreqs = b', '.join(sorted(missingreqs))
768 780 raise error.Abort(msg % missingreqs)
769 781
770 782 blocking = blocksourcerequirements(repo)
771 783 blockingreqs = blocking & repo.requirements
772 784 if blockingreqs:
773 785 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
774 786 blockingreqs = b', '.join(sorted(blockingreqs))
775 787 raise error.Abort(m % blockingreqs)
776 788
777 789
778 790 ### Verify the validity of the planned requirement changes ####################
779 791
780 792
781 793 def supportremovedrequirements(repo):
782 794 """Obtain requirements that can be removed during an upgrade.
783 795
784 796 If an upgrade were to create a repository that dropped a requirement,
785 797 the dropped requirement must appear in the returned set for the upgrade
786 798 to be allowed.
787 799 """
788 800 supported = {
789 801 requirements.SPARSEREVLOG_REQUIREMENT,
790 802 requirements.SIDEDATA_REQUIREMENT,
791 803 requirements.COPIESSDC_REQUIREMENT,
792 804 requirements.NODEMAP_REQUIREMENT,
793 805 requirements.SHARESAFE_REQUIREMENT,
794 806 }
795 807 for name in compression.compengines:
796 808 engine = compression.compengines[name]
797 809 if engine.available() and engine.revlogheader():
798 810 supported.add(b'exp-compression-%s' % name)
799 811 if engine.name() == b'zstd':
800 812 supported.add(b'revlog-compression-zstd')
801 813 return supported
802 814
803 815
804 816 def supporteddestrequirements(repo):
805 817 """Obtain requirements that upgrade supports in the destination.
806 818
807 819 If the result of the upgrade would create requirements not in this set,
808 820 the upgrade is disallowed.
809 821
810 822 Extensions should monkeypatch this to add their custom requirements.
811 823 """
812 824 supported = {
813 825 b'dotencode',
814 826 b'fncache',
815 827 b'generaldelta',
816 828 b'revlogv1',
817 829 b'store',
818 830 requirements.SPARSEREVLOG_REQUIREMENT,
819 831 requirements.SIDEDATA_REQUIREMENT,
820 832 requirements.COPIESSDC_REQUIREMENT,
821 833 requirements.NODEMAP_REQUIREMENT,
822 834 requirements.SHARESAFE_REQUIREMENT,
823 835 }
824 836 for name in compression.compengines:
825 837 engine = compression.compengines[name]
826 838 if engine.available() and engine.revlogheader():
827 839 supported.add(b'exp-compression-%s' % name)
828 840 if engine.name() == b'zstd':
829 841 supported.add(b'revlog-compression-zstd')
830 842 return supported
831 843
832 844
833 845 def allowednewrequirements(repo):
834 846 """Obtain requirements that can be added to a repository during upgrade.
835 847
836 848 This is used to disallow proposed requirements from being added when
837 849 they weren't present before.
838 850
839 851 We use a list of allowed requirement additions instead of a list of known
840 852 bad additions because the whitelist approach is safer and will prevent
841 853 future, unknown requirements from accidentally being added.
842 854 """
843 855 supported = {
844 856 b'dotencode',
845 857 b'fncache',
846 858 b'generaldelta',
847 859 requirements.SPARSEREVLOG_REQUIREMENT,
848 860 requirements.SIDEDATA_REQUIREMENT,
849 861 requirements.COPIESSDC_REQUIREMENT,
850 862 requirements.NODEMAP_REQUIREMENT,
851 863 requirements.SHARESAFE_REQUIREMENT,
852 864 }
853 865 for name in compression.compengines:
854 866 engine = compression.compengines[name]
855 867 if engine.available() and engine.revlogheader():
856 868 supported.add(b'exp-compression-%s' % name)
857 869 if engine.name() == b'zstd':
858 870 supported.add(b'revlog-compression-zstd')
859 871 return supported
860 872
861 873
862 874 def check_requirements_changes(repo, new_reqs):
863 875 old_reqs = repo.requirements
864 876
865 877 support_removal = supportremovedrequirements(repo)
866 878 no_remove_reqs = old_reqs - new_reqs - support_removal
867 879 if no_remove_reqs:
868 880 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
869 881 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
870 882 raise error.Abort(msg % no_remove_reqs)
871 883
872 884 support_addition = allowednewrequirements(repo)
873 885 no_add_reqs = new_reqs - old_reqs - support_addition
874 886 if no_add_reqs:
875 887 m = _(b'cannot upgrade repository; do not support adding requirement: ')
876 888 no_add_reqs = b', '.join(sorted(no_add_reqs))
877 889 raise error.Abort(m + no_add_reqs)
878 890
879 891 supported = supporteddestrequirements(repo)
880 892 unsupported_reqs = new_reqs - supported
881 893 if unsupported_reqs:
882 894 msg = _(
883 895 b'cannot upgrade repository; do not support destination '
884 896 b'requirement: %s'
885 897 )
886 898 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
887 899 raise error.Abort(msg % unsupported_reqs)
@@ -1,543 +1,532 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from ..i18n import _
13 13 from ..pycompat import getattr
14 14 from .. import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 manifest,
19 19 metadata,
20 20 pycompat,
21 21 requirements,
22 22 revlog,
23 23 scmutil,
24 24 util,
25 25 vfs as vfsmod,
26 26 )
27 27
28 28
29 29 def _revlogfrompath(repo, path):
30 30 """Obtain a revlog from a repo path.
31 31
32 32 An instance of the appropriate class is returned.
33 33 """
34 34 if path == b'00changelog.i':
35 35 return changelog.changelog(repo.svfs)
36 36 elif path.endswith(b'00manifest.i'):
37 37 mandir = path[: -len(b'00manifest.i')]
38 38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 39 else:
40 40 # reverse of "/".join(("data", path + ".i"))
41 41 return filelog.filelog(repo.svfs, path[5:-2])
42 42
43 43
44 44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 45 """copy all relevant files for `oldrl` into `destrepo` store
46 46
47 47 Files are copied "as is" without any transformation. The copy is performed
48 48 without extra checks. Callers are responsible for making sure the copied
49 49 content is compatible with format of the destination repository.
50 50 """
51 51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 52 newrl = _revlogfrompath(destrepo, unencodedname)
53 53 newrl = getattr(newrl, '_revlog', newrl)
54 54
55 55 oldvfs = oldrl.opener
56 56 newvfs = newrl.opener
57 57 oldindex = oldvfs.join(oldrl.indexfile)
58 58 newindex = newvfs.join(newrl.indexfile)
59 59 olddata = oldvfs.join(oldrl.datafile)
60 60 newdata = newvfs.join(newrl.datafile)
61 61
62 62 with newvfs(newrl.indexfile, b'w'):
63 63 pass # create all the directories
64 64
65 65 util.copyfile(oldindex, newindex)
66 66 copydata = oldrl.opener.exists(oldrl.datafile)
67 67 if copydata:
68 68 util.copyfile(olddata, newdata)
69 69
70 70 if not (
71 71 unencodedname.endswith(b'00changelog.i')
72 72 or unencodedname.endswith(b'00manifest.i')
73 73 ):
74 74 destrepo.svfs.fncache.add(unencodedname)
75 75 if copydata:
76 76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77 77
78 78
79 79 UPGRADE_CHANGELOG = b"changelog"
80 80 UPGRADE_MANIFEST = b"manifest"
81 81 UPGRADE_FILELOGS = b"all-filelogs"
82 82
83 83 UPGRADE_ALL_REVLOGS = frozenset(
84 84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 85 )
86 86
87 87
88 88 def getsidedatacompanion(srcrepo, dstrepo):
89 89 sidedatacompanion = None
90 90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93 93
94 94 def sidedatacompanion(rl, rev):
95 95 rl = getattr(rl, '_revlog', rl)
96 96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 97 return True, (), {}, 0, 0
98 98 return False, (), {}, 0, 0
99 99
100 100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 104 return sidedatacompanion
105 105
106 106
107 107 def matchrevlog(revlogfilter, entry):
108 108 """check if a revlog is selected for cloning.
109 109
110 110 In other words, are there any updates which need to be done on revlog
111 111 or it can be blindly copied.
112 112
113 113 The store entry is checked against the passed filter"""
114 114 if entry.endswith(b'00changelog.i'):
115 115 return UPGRADE_CHANGELOG in revlogfilter
116 116 elif entry.endswith(b'00manifest.i'):
117 117 return UPGRADE_MANIFEST in revlogfilter
118 118 return UPGRADE_FILELOGS in revlogfilter
119 119
120 120
121 121 def _perform_clone(
122 122 ui,
123 123 dstrepo,
124 124 tr,
125 125 old_revlog,
126 126 unencoded,
127 127 deltareuse,
128 128 forcedeltabothparents,
129 129 revlogs,
130 130 sidedatacompanion,
131 131 oncopiedrevision,
132 132 ):
133 133 """ returns the new revlog object created"""
134 134 newrl = None
135 135 if matchrevlog(revlogs, unencoded):
136 136 ui.note(
137 137 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
138 138 )
139 139 newrl = _revlogfrompath(dstrepo, unencoded)
140 140 old_revlog.clone(
141 141 tr,
142 142 newrl,
143 143 addrevisioncb=oncopiedrevision,
144 144 deltareuse=deltareuse,
145 145 forcedeltabothparents=forcedeltabothparents,
146 146 sidedatacompanion=sidedatacompanion,
147 147 )
148 148 else:
149 149 msg = _(b'blindly copying %s containing %i revisions\n')
150 150 ui.note(msg % (unencoded, len(old_revlog)))
151 151 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
152 152
153 153 newrl = _revlogfrompath(dstrepo, unencoded)
154 154 return newrl
155 155
156 156
157 157 def _clonerevlogs(
158 158 ui,
159 159 srcrepo,
160 160 dstrepo,
161 161 tr,
162 162 deltareuse,
163 163 forcedeltabothparents,
164 164 revlogs=UPGRADE_ALL_REVLOGS,
165 165 ):
166 166 """Copy revlogs between 2 repos."""
167 167 revcount = 0
168 168 srcsize = 0
169 169 srcrawsize = 0
170 170 dstsize = 0
171 171 fcount = 0
172 172 frevcount = 0
173 173 fsrcsize = 0
174 174 frawsize = 0
175 175 fdstsize = 0
176 176 mcount = 0
177 177 mrevcount = 0
178 178 msrcsize = 0
179 179 mrawsize = 0
180 180 mdstsize = 0
181 181 crevcount = 0
182 182 csrcsize = 0
183 183 crawsize = 0
184 184 cdstsize = 0
185 185
186 186 alldatafiles = list(srcrepo.store.walk())
187 187 # mapping of data files which needs to be cloned
188 188 # key is unencoded filename
189 189 # value is revlog_object_from_srcrepo
190 190 manifests = {}
191 191 changelogs = {}
192 192 filelogs = {}
193 193
194 194 # Perform a pass to collect metadata. This validates we can open all
195 195 # source files and allows a unified progress bar to be displayed.
196 196 for unencoded, encoded, size in alldatafiles:
197 197 if unencoded.endswith(b'.d'):
198 198 continue
199 199
200 200 rl = _revlogfrompath(srcrepo, unencoded)
201 201
202 202 info = rl.storageinfo(
203 203 exclusivefiles=True,
204 204 revisionscount=True,
205 205 trackedsize=True,
206 206 storedsize=True,
207 207 )
208 208
209 209 revcount += info[b'revisionscount'] or 0
210 210 datasize = info[b'storedsize'] or 0
211 211 rawsize = info[b'trackedsize'] or 0
212 212
213 213 srcsize += datasize
214 214 srcrawsize += rawsize
215 215
216 216 # This is for the separate progress bars.
217 217 if isinstance(rl, changelog.changelog):
218 218 changelogs[unencoded] = rl
219 219 crevcount += len(rl)
220 220 csrcsize += datasize
221 221 crawsize += rawsize
222 222 elif isinstance(rl, manifest.manifestrevlog):
223 223 manifests[unencoded] = rl
224 224 mcount += 1
225 225 mrevcount += len(rl)
226 226 msrcsize += datasize
227 227 mrawsize += rawsize
228 228 elif isinstance(rl, filelog.filelog):
229 229 filelogs[unencoded] = rl
230 230 fcount += 1
231 231 frevcount += len(rl)
232 232 fsrcsize += datasize
233 233 frawsize += rawsize
234 234 else:
235 235 error.ProgrammingError(b'unknown revlog type')
236 236
237 237 if not revcount:
238 238 return
239 239
240 240 ui.status(
241 241 _(
242 242 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
243 243 b'%d in changelog)\n'
244 244 )
245 245 % (revcount, frevcount, mrevcount, crevcount)
246 246 )
247 247 ui.status(
248 248 _(b'migrating %s in store; %s tracked data\n')
249 249 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
250 250 )
251 251
252 252 # Used to keep track of progress.
253 253 progress = None
254 254
255 255 def oncopiedrevision(rl, rev, node):
256 256 progress.increment()
257 257
258 258 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
259 259
260 260 # Migrating filelogs
261 261 ui.status(
262 262 _(
263 263 b'migrating %d filelogs containing %d revisions '
264 264 b'(%s in store; %s tracked data)\n'
265 265 )
266 266 % (
267 267 fcount,
268 268 frevcount,
269 269 util.bytecount(fsrcsize),
270 270 util.bytecount(frawsize),
271 271 )
272 272 )
273 273 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
274 274 for unencoded, oldrl in sorted(filelogs.items()):
275 275 newrl = _perform_clone(
276 276 ui,
277 277 dstrepo,
278 278 tr,
279 279 oldrl,
280 280 unencoded,
281 281 deltareuse,
282 282 forcedeltabothparents,
283 283 revlogs,
284 284 sidedatacompanion,
285 285 oncopiedrevision,
286 286 )
287 287 info = newrl.storageinfo(storedsize=True)
288 288 fdstsize += info[b'storedsize'] or 0
289 289 ui.status(
290 290 _(
291 291 b'finished migrating %d filelog revisions across %d '
292 292 b'filelogs; change in size: %s\n'
293 293 )
294 294 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
295 295 )
296 296
297 297 # Migrating manifests
298 298 ui.status(
299 299 _(
300 300 b'migrating %d manifests containing %d revisions '
301 301 b'(%s in store; %s tracked data)\n'
302 302 )
303 303 % (
304 304 mcount,
305 305 mrevcount,
306 306 util.bytecount(msrcsize),
307 307 util.bytecount(mrawsize),
308 308 )
309 309 )
310 310 if progress:
311 311 progress.complete()
312 312 progress = srcrepo.ui.makeprogress(
313 313 _(b'manifest revisions'), total=mrevcount
314 314 )
315 315 for unencoded, oldrl in sorted(manifests.items()):
316 316 newrl = _perform_clone(
317 317 ui,
318 318 dstrepo,
319 319 tr,
320 320 oldrl,
321 321 unencoded,
322 322 deltareuse,
323 323 forcedeltabothparents,
324 324 revlogs,
325 325 sidedatacompanion,
326 326 oncopiedrevision,
327 327 )
328 328 info = newrl.storageinfo(storedsize=True)
329 329 mdstsize += info[b'storedsize'] or 0
330 330 ui.status(
331 331 _(
332 332 b'finished migrating %d manifest revisions across %d '
333 333 b'manifests; change in size: %s\n'
334 334 )
335 335 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
336 336 )
337 337
338 338 # Migrating changelog
339 339 ui.status(
340 340 _(
341 341 b'migrating changelog containing %d revisions '
342 342 b'(%s in store; %s tracked data)\n'
343 343 )
344 344 % (
345 345 crevcount,
346 346 util.bytecount(csrcsize),
347 347 util.bytecount(crawsize),
348 348 )
349 349 )
350 350 if progress:
351 351 progress.complete()
352 352 progress = srcrepo.ui.makeprogress(
353 353 _(b'changelog revisions'), total=crevcount
354 354 )
355 355 for unencoded, oldrl in sorted(changelogs.items()):
356 356 newrl = _perform_clone(
357 357 ui,
358 358 dstrepo,
359 359 tr,
360 360 oldrl,
361 361 unencoded,
362 362 deltareuse,
363 363 forcedeltabothparents,
364 364 revlogs,
365 365 sidedatacompanion,
366 366 oncopiedrevision,
367 367 )
368 368 info = newrl.storageinfo(storedsize=True)
369 369 cdstsize += info[b'storedsize'] or 0
370 370 progress.complete()
371 371 ui.status(
372 372 _(
373 373 b'finished migrating %d changelog revisions; change in size: '
374 374 b'%s\n'
375 375 )
376 376 % (crevcount, util.bytecount(cdstsize - csrcsize))
377 377 )
378 378
379 379 dstsize = fdstsize + mdstsize + cdstsize
380 380 ui.status(
381 381 _(
382 382 b'finished migrating %d total revisions; total change in store '
383 383 b'size: %s\n'
384 384 )
385 385 % (revcount, util.bytecount(dstsize - srcsize))
386 386 )
387 387
388 388
389 389 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
390 390 """Determine whether to copy a store file during upgrade.
391 391
392 392 This function is called when migrating store files from ``srcrepo`` to
393 393 ``dstrepo`` as part of upgrading a repository.
394 394
395 395 Args:
396 396 srcrepo: repo we are copying from
397 397 dstrepo: repo we are copying to
398 398 requirements: set of requirements for ``dstrepo``
399 399 path: store file being examined
400 400 mode: the ``ST_MODE`` file type of ``path``
401 401 st: ``stat`` data structure for ``path``
402 402
403 403 Function should return ``True`` if the file is to be copied.
404 404 """
405 405 # Skip revlogs.
406 406 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
407 407 return False
408 408 # Skip transaction related files.
409 409 if path.startswith(b'undo'):
410 410 return False
411 411 # Only copy regular files.
412 412 if mode != stat.S_IFREG:
413 413 return False
414 414 # Skip other skipped files.
415 415 if path in (b'lock', b'fncache'):
416 416 return False
417 417
418 418 return True
419 419
420 420
421 421 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
422 422 """Hook point for extensions to perform additional actions during upgrade.
423 423
424 424 This function is called after revlogs and store files have been copied but
425 425 before the new store is swapped into the original location.
426 426 """
427 427
428 428
429 429 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
430 430 """Do the low-level work of upgrading a repository.
431 431
432 432 The upgrade is effectively performed as a copy between a source
433 433 repository and a temporary destination repository.
434 434
435 435 The source repository is unmodified for as long as possible so the
436 436 upgrade can abort at any time without causing loss of service for
437 437 readers and without corrupting the source repository.
438 438 """
439 439 assert srcrepo.currentwlock()
440 440 assert dstrepo.currentwlock()
441 441
442 442 ui.status(
443 443 _(
444 444 b'(it is safe to interrupt this process any time before '
445 445 b'data migration completes)\n'
446 446 )
447 447 )
448 448
449 if upgrade_op.has_upgrade_action(b're-delta-all'):
450 deltareuse = revlog.revlog.DELTAREUSENEVER
451 elif upgrade_op.has_upgrade_action(b're-delta-parent'):
452 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
453 elif upgrade_op.has_upgrade_action(b're-delta-multibase'):
454 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
455 elif upgrade_op.has_upgrade_action(b're-delta-fulladd'):
456 deltareuse = revlog.revlog.DELTAREUSEFULLADD
457 else:
458 deltareuse = revlog.revlog.DELTAREUSEALWAYS
459
460 449 with dstrepo.transaction(b'upgrade') as tr:
461 450 _clonerevlogs(
462 451 ui,
463 452 srcrepo,
464 453 dstrepo,
465 454 tr,
466 deltareuse,
455 upgrade_op.delta_reuse_mode,
467 456 upgrade_op.has_upgrade_action(b're-delta-multibase'),
468 457 revlogs=upgrade_op.revlogs_to_process,
469 458 )
470 459
471 460 # Now copy other files in the store directory.
472 461 # The sorted() makes execution deterministic.
473 462 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
474 463 if not _filterstorefile(
475 464 srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st
476 465 ):
477 466 continue
478 467
479 468 srcrepo.ui.status(_(b'copying %s\n') % p)
480 469 src = srcrepo.store.rawvfs.join(p)
481 470 dst = dstrepo.store.rawvfs.join(p)
482 471 util.copyfile(src, dst, copystat=True)
483 472
484 473 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
485 474
486 475 ui.status(_(b'data fully migrated to temporary repository\n'))
487 476
488 477 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
489 478 backupvfs = vfsmod.vfs(backuppath)
490 479
491 480 # Make a backup of requires file first, as it is the first to be modified.
492 481 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
493 482
494 483 # We install an arbitrary requirement that clients must not support
495 484 # as a mechanism to lock out new clients during the data swap. This is
496 485 # better than allowing a client to continue while the repository is in
497 486 # an inconsistent state.
498 487 ui.status(
499 488 _(
500 489 b'marking source repository as being upgraded; clients will be '
501 490 b'unable to read from repository\n'
502 491 )
503 492 )
504 493 scmutil.writereporequirements(
505 494 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
506 495 )
507 496
508 497 ui.status(_(b'starting in-place swap of repository data\n'))
509 498 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
510 499
511 500 # Now swap in the new store directory. Doing it as a rename should make
512 501 # the operation nearly instantaneous and atomic (at least in well-behaved
513 502 # environments).
514 503 ui.status(_(b'replacing store...\n'))
515 504 tstart = util.timer()
516 505 util.rename(srcrepo.spath, backupvfs.join(b'store'))
517 506 util.rename(dstrepo.spath, srcrepo.spath)
518 507 elapsed = util.timer() - tstart
519 508 ui.status(
520 509 _(
521 510 b'store replacement complete; repository was inconsistent for '
522 511 b'%0.1fs\n'
523 512 )
524 513 % elapsed
525 514 )
526 515
527 516 # We first write the requirements file. Any new requirements will lock
528 517 # out legacy clients.
529 518 ui.status(
530 519 _(
531 520 b'finalizing requirements file and making repository readable '
532 521 b'again\n'
533 522 )
534 523 )
535 524 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
536 525
537 526 # The lock file from the old store won't be removed because nothing has a
538 527 # reference to its new location. So clean it up manually. Alternatively, we
539 528 # could update srcrepo.svfs and other variables to point to the new
540 529 # location. This is simpler.
541 530 backupvfs.unlink(b'store/lock')
542 531
543 532 return backuppath
General Comments 0
You need to be logged in to leave comments. Login now