##// END OF EJS Templates
upgrade: close progress after each revlog...
Martin von Zweigbergk -
r38417:f273b768 default
parent child Browse files
Show More
@@ -1,868 +1,872
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 changelog,
15 15 error,
16 16 filelog,
17 17 hg,
18 18 localrepo,
19 19 manifest,
20 20 pycompat,
21 21 revlog,
22 22 scmutil,
23 23 util,
24 24 vfs as vfsmod,
25 25 )
26 26
27 27 def requiredsourcerequirements(repo):
28 28 """Obtain requirements required to be present to upgrade a repo.
29 29
30 30 An upgrade will not be allowed if the repository doesn't have the
31 31 requirements returned by this function.
32 32 """
33 33 return {
34 34 # Introduced in Mercurial 0.9.2.
35 35 'revlogv1',
36 36 # Introduced in Mercurial 0.9.2.
37 37 'store',
38 38 }
39 39
40 40 def blocksourcerequirements(repo):
41 41 """Obtain requirements that will prevent an upgrade from occurring.
42 42
43 43 An upgrade cannot be performed if the source repository contains a
44 44 requirements in the returned set.
45 45 """
46 46 return {
47 47 # The upgrade code does not yet support these experimental features.
48 48 # This is an artificial limitation.
49 49 'treemanifest',
50 50 # This was a precursor to generaldelta and was never enabled by default.
51 51 # It should (hopefully) not exist in the wild.
52 52 'parentdelta',
53 53 # Upgrade should operate on the actual store, not the shared link.
54 54 'shared',
55 55 }
56 56
57 57 def supportremovedrequirements(repo):
58 58 """Obtain requirements that can be removed during an upgrade.
59 59
60 60 If an upgrade were to create a repository that dropped a requirement,
61 61 the dropped requirement must appear in the returned set for the upgrade
62 62 to be allowed.
63 63 """
64 64 return set()
65 65
66 66 def supporteddestrequirements(repo):
67 67 """Obtain requirements that upgrade supports in the destination.
68 68
69 69 If the result of the upgrade would create requirements not in this set,
70 70 the upgrade is disallowed.
71 71
72 72 Extensions should monkeypatch this to add their custom requirements.
73 73 """
74 74 return {
75 75 'dotencode',
76 76 'fncache',
77 77 'generaldelta',
78 78 'revlogv1',
79 79 'store',
80 80 }
81 81
82 82 def allowednewrequirements(repo):
83 83 """Obtain requirements that can be added to a repository during upgrade.
84 84
85 85 This is used to disallow proposed requirements from being added when
86 86 they weren't present before.
87 87
88 88 We use a list of allowed requirement additions instead of a list of known
89 89 bad additions because the whitelist approach is safer and will prevent
90 90 future, unknown requirements from accidentally being added.
91 91 """
92 92 return {
93 93 'dotencode',
94 94 'fncache',
95 95 'generaldelta',
96 96 }
97 97
98 98 def preservedrequirements(repo):
99 99 return set()
100 100
101 101 deficiency = 'deficiency'
102 102 optimisation = 'optimization'
103 103
104 104 class improvement(object):
105 105 """Represents an improvement that can be made as part of an upgrade.
106 106
107 107 The following attributes are defined on each instance:
108 108
109 109 name
110 110 Machine-readable string uniquely identifying this improvement. It
111 111 will be mapped to an action later in the upgrade process.
112 112
113 113 type
114 114 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
115 115 problem. An optimization is an action (sometimes optional) that
116 116 can be taken to further improve the state of the repository.
117 117
118 118 description
119 119 Message intended for humans explaining the improvement in more detail,
120 120 including the implications of it. For ``deficiency`` types, should be
121 121 worded in the present tense. For ``optimisation`` types, should be
122 122 worded in the future tense.
123 123
124 124 upgrademessage
125 125 Message intended for humans explaining what an upgrade addressing this
126 126 issue will do. Should be worded in the future tense.
127 127 """
128 128 def __init__(self, name, type, description, upgrademessage):
129 129 self.name = name
130 130 self.type = type
131 131 self.description = description
132 132 self.upgrademessage = upgrademessage
133 133
134 134 def __eq__(self, other):
135 135 if not isinstance(other, improvement):
136 136 # This is what python tell use to do
137 137 return NotImplemented
138 138 return self.name == other.name
139 139
140 140 def __ne__(self, other):
141 141 return not self == other
142 142
143 143 def __hash__(self):
144 144 return hash(self.name)
145 145
146 146 allformatvariant = []
147 147
148 148 def registerformatvariant(cls):
149 149 allformatvariant.append(cls)
150 150 return cls
151 151
152 152 class formatvariant(improvement):
153 153 """an improvement subclass dedicated to repository format"""
154 154 type = deficiency
155 155 ### The following attributes should be defined for each class:
156 156
157 157 # machine-readable string uniquely identifying this improvement. it will be
158 158 # mapped to an action later in the upgrade process.
159 159 name = None
160 160
161 161 # message intended for humans explaining the improvement in more detail,
162 162 # including the implications of it ``deficiency`` types, should be worded
163 163 # in the present tense.
164 164 description = None
165 165
166 166 # message intended for humans explaining what an upgrade addressing this
167 167 # issue will do. should be worded in the future tense.
168 168 upgrademessage = None
169 169
170 170 # value of current Mercurial default for new repository
171 171 default = None
172 172
173 173 def __init__(self):
174 174 raise NotImplementedError()
175 175
176 176 @staticmethod
177 177 def fromrepo(repo):
178 178 """current value of the variant in the repository"""
179 179 raise NotImplementedError()
180 180
181 181 @staticmethod
182 182 def fromconfig(repo):
183 183 """current value of the variant in the configuration"""
184 184 raise NotImplementedError()
185 185
186 186 class requirementformatvariant(formatvariant):
187 187 """formatvariant based on a 'requirement' name.
188 188
189 189 Many format variant are controlled by a 'requirement'. We define a small
190 190 subclass to factor the code.
191 191 """
192 192
193 193 # the requirement that control this format variant
194 194 _requirement = None
195 195
196 196 @staticmethod
197 197 def _newreporequirements(repo):
198 198 return localrepo.newreporequirements(repo)
199 199
200 200 @classmethod
201 201 def fromrepo(cls, repo):
202 202 assert cls._requirement is not None
203 203 return cls._requirement in repo.requirements
204 204
205 205 @classmethod
206 206 def fromconfig(cls, repo):
207 207 assert cls._requirement is not None
208 208 return cls._requirement in cls._newreporequirements(repo)
209 209
210 210 @registerformatvariant
211 211 class fncache(requirementformatvariant):
212 212 name = 'fncache'
213 213
214 214 _requirement = 'fncache'
215 215
216 216 default = True
217 217
218 218 description = _('long and reserved filenames may not work correctly; '
219 219 'repository performance is sub-optimal')
220 220
221 221 upgrademessage = _('repository will be more resilient to storing '
222 222 'certain paths and performance of certain '
223 223 'operations should be improved')
224 224
225 225 @registerformatvariant
226 226 class dotencode(requirementformatvariant):
227 227 name = 'dotencode'
228 228
229 229 _requirement = 'dotencode'
230 230
231 231 default = True
232 232
233 233 description = _('storage of filenames beginning with a period or '
234 234 'space may not work correctly')
235 235
236 236 upgrademessage = _('repository will be better able to store files '
237 237 'beginning with a space or period')
238 238
239 239 @registerformatvariant
240 240 class generaldelta(requirementformatvariant):
241 241 name = 'generaldelta'
242 242
243 243 _requirement = 'generaldelta'
244 244
245 245 default = True
246 246
247 247 description = _('deltas within internal storage are unable to '
248 248 'choose optimal revisions; repository is larger and '
249 249 'slower than it could be; interaction with other '
250 250 'repositories may require extra network and CPU '
251 251 'resources, making "hg push" and "hg pull" slower')
252 252
253 253 upgrademessage = _('repository storage will be able to create '
254 254 'optimal deltas; new repository data will be '
255 255 'smaller and read times should decrease; '
256 256 'interacting with other repositories using this '
257 257 'storage model should require less network and '
258 258 'CPU resources, making "hg push" and "hg pull" '
259 259 'faster')
260 260
261 261 @registerformatvariant
262 262 class removecldeltachain(formatvariant):
263 263 name = 'plain-cl-delta'
264 264
265 265 default = True
266 266
267 267 description = _('changelog storage is using deltas instead of '
268 268 'raw entries; changelog reading and any '
269 269 'operation relying on changelog data are slower '
270 270 'than they could be')
271 271
272 272 upgrademessage = _('changelog storage will be reformated to '
273 273 'store raw entries; changelog reading will be '
274 274 'faster; changelog size may be reduced')
275 275
276 276 @staticmethod
277 277 def fromrepo(repo):
278 278 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
279 279 # changelogs with deltas.
280 280 cl = repo.changelog
281 281 chainbase = cl.chainbase
282 282 return all(rev == chainbase(rev) for rev in cl)
283 283
284 284 @staticmethod
285 285 def fromconfig(repo):
286 286 return True
287 287
288 288 @registerformatvariant
289 289 class compressionengine(formatvariant):
290 290 name = 'compression'
291 291 default = 'zlib'
292 292
293 293 description = _('Compresion algorithm used to compress data. '
294 294 'Some engine are faster than other')
295 295
296 296 upgrademessage = _('revlog content will be recompressed with the new '
297 297 'algorithm.')
298 298
299 299 @classmethod
300 300 def fromrepo(cls, repo):
301 301 for req in repo.requirements:
302 302 if req.startswith('exp-compression-'):
303 303 return req.split('-', 2)[2]
304 304 return 'zlib'
305 305
306 306 @classmethod
307 307 def fromconfig(cls, repo):
308 308 return repo.ui.config('experimental', 'format.compression')
309 309
310 310 def finddeficiencies(repo):
311 311 """returns a list of deficiencies that the repo suffer from"""
312 312 deficiencies = []
313 313
314 314 # We could detect lack of revlogv1 and store here, but they were added
315 315 # in 0.9.2 and we don't support upgrading repos without these
316 316 # requirements, so let's not bother.
317 317
318 318 for fv in allformatvariant:
319 319 if not fv.fromrepo(repo):
320 320 deficiencies.append(fv)
321 321
322 322 return deficiencies
323 323
324 324 def findoptimizations(repo):
325 325 """Determine optimisation that could be used during upgrade"""
326 326 # These are unconditionally added. There is logic later that figures out
327 327 # which ones to apply.
328 328 optimizations = []
329 329
330 330 optimizations.append(improvement(
331 331 name='redeltaparent',
332 332 type=optimisation,
333 333 description=_('deltas within internal storage will be recalculated to '
334 334 'choose an optimal base revision where this was not '
335 335 'already done; the size of the repository may shrink and '
336 336 'various operations may become faster; the first time '
337 337 'this optimization is performed could slow down upgrade '
338 338 'execution considerably; subsequent invocations should '
339 339 'not run noticeably slower'),
340 340 upgrademessage=_('deltas within internal storage will choose a new '
341 341 'base revision if needed')))
342 342
343 343 optimizations.append(improvement(
344 344 name='redeltamultibase',
345 345 type=optimisation,
346 346 description=_('deltas within internal storage will be recalculated '
347 347 'against multiple base revision and the smallest '
348 348 'difference will be used; the size of the repository may '
349 349 'shrink significantly when there are many merges; this '
350 350 'optimization will slow down execution in proportion to '
351 351 'the number of merges in the repository and the amount '
352 352 'of files in the repository; this slow down should not '
353 353 'be significant unless there are tens of thousands of '
354 354 'files and thousands of merges'),
355 355 upgrademessage=_('deltas within internal storage will choose an '
356 356 'optimal delta by computing deltas against multiple '
357 357 'parents; may slow down execution time '
358 358 'significantly')))
359 359
360 360 optimizations.append(improvement(
361 361 name='redeltaall',
362 362 type=optimisation,
363 363 description=_('deltas within internal storage will always be '
364 364 'recalculated without reusing prior deltas; this will '
365 365 'likely make execution run several times slower; this '
366 366 'optimization is typically not needed'),
367 367 upgrademessage=_('deltas within internal storage will be fully '
368 368 'recomputed; this will likely drastically slow down '
369 369 'execution time')))
370 370
371 371 optimizations.append(improvement(
372 372 name='redeltafulladd',
373 373 type=optimisation,
374 374 description=_('every revision will be re-added as if it was new '
375 375 'content. It will go through the full storage '
376 376 'mechanism giving extensions a chance to process it '
377 377 '(eg. lfs). This is similar to "redeltaall" but even '
378 378 'slower since more logic is involved.'),
379 379 upgrademessage=_('each revision will be added as new content to the '
380 380 'internal storage; this will likely drastically slow '
381 381 'down execution time, but some extensions might need '
382 382 'it')))
383 383
384 384 return optimizations
385 385
386 386 def determineactions(repo, deficiencies, sourcereqs, destreqs):
387 387 """Determine upgrade actions that will be performed.
388 388
389 389 Given a list of improvements as returned by ``finddeficiencies`` and
390 390 ``findoptimizations``, determine the list of upgrade actions that
391 391 will be performed.
392 392
393 393 The role of this function is to filter improvements if needed, apply
394 394 recommended optimizations from the improvements list that make sense,
395 395 etc.
396 396
397 397 Returns a list of action names.
398 398 """
399 399 newactions = []
400 400
401 401 knownreqs = supporteddestrequirements(repo)
402 402
403 403 for d in deficiencies:
404 404 name = d.name
405 405
406 406 # If the action is a requirement that doesn't show up in the
407 407 # destination requirements, prune the action.
408 408 if name in knownreqs and name not in destreqs:
409 409 continue
410 410
411 411 newactions.append(d)
412 412
413 413 # FUTURE consider adding some optimizations here for certain transitions.
414 414 # e.g. adding generaldelta could schedule parent redeltas.
415 415
416 416 return newactions
417 417
418 418 def _revlogfrompath(repo, path):
419 419 """Obtain a revlog from a repo path.
420 420
421 421 An instance of the appropriate class is returned.
422 422 """
423 423 if path == '00changelog.i':
424 424 return changelog.changelog(repo.svfs)
425 425 elif path.endswith('00manifest.i'):
426 426 mandir = path[:-len('00manifest.i')]
427 427 return manifest.manifestrevlog(repo.svfs, dir=mandir)
428 428 else:
429 429 #reverse of "/".join(("data", path + ".i"))
430 430 return filelog.filelog(repo.svfs, path[5:-2])
431 431
432 432 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
433 433 """Copy revlogs between 2 repos."""
434 434 revcount = 0
435 435 srcsize = 0
436 436 srcrawsize = 0
437 437 dstsize = 0
438 438 fcount = 0
439 439 frevcount = 0
440 440 fsrcsize = 0
441 441 frawsize = 0
442 442 fdstsize = 0
443 443 mcount = 0
444 444 mrevcount = 0
445 445 msrcsize = 0
446 446 mrawsize = 0
447 447 mdstsize = 0
448 448 crevcount = 0
449 449 csrcsize = 0
450 450 crawsize = 0
451 451 cdstsize = 0
452 452
453 453 # Perform a pass to collect metadata. This validates we can open all
454 454 # source files and allows a unified progress bar to be displayed.
455 455 for unencoded, encoded, size in srcrepo.store.walk():
456 456 if unencoded.endswith('.d'):
457 457 continue
458 458
459 459 rl = _revlogfrompath(srcrepo, unencoded)
460 460 revcount += len(rl)
461 461
462 462 datasize = 0
463 463 rawsize = 0
464 464 idx = rl.index
465 465 for rev in rl:
466 466 e = idx[rev]
467 467 datasize += e[1]
468 468 rawsize += e[2]
469 469
470 470 srcsize += datasize
471 471 srcrawsize += rawsize
472 472
473 473 # This is for the separate progress bars.
474 474 if isinstance(rl, changelog.changelog):
475 475 crevcount += len(rl)
476 476 csrcsize += datasize
477 477 crawsize += rawsize
478 478 elif isinstance(rl, manifest.manifestrevlog):
479 479 mcount += 1
480 480 mrevcount += len(rl)
481 481 msrcsize += datasize
482 482 mrawsize += rawsize
483 483 elif isinstance(rl, filelog.filelog):
484 484 fcount += 1
485 485 frevcount += len(rl)
486 486 fsrcsize += datasize
487 487 frawsize += rawsize
488 488 else:
489 489 error.ProgrammingError('unknown revlog type')
490 490
491 491 if not revcount:
492 492 return
493 493
494 494 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
495 495 '%d in changelog)\n') %
496 496 (revcount, frevcount, mrevcount, crevcount))
497 497 ui.write(_('migrating %s in store; %s tracked data\n') % (
498 498 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
499 499
500 500 # Used to keep track of progress.
501 501 progress = []
502 502 def oncopiedrevision(rl, rev, node):
503 503 progress[1] += 1
504 504 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
505 505
506 506 # Do the actual copying.
507 507 # FUTURE this operation can be farmed off to worker processes.
508 508 seen = set()
509 509 for unencoded, encoded, size in srcrepo.store.walk():
510 510 if unencoded.endswith('.d'):
511 511 continue
512 512
513 513 oldrl = _revlogfrompath(srcrepo, unencoded)
514 514 newrl = _revlogfrompath(dstrepo, unencoded)
515 515
516 516 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
517 517 ui.write(_('finished migrating %d manifest revisions across %d '
518 518 'manifests; change in size: %s\n') %
519 519 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
520 520
521 521 ui.write(_('migrating changelog containing %d revisions '
522 522 '(%s in store; %s tracked data)\n') %
523 523 (crevcount, util.bytecount(csrcsize),
524 524 util.bytecount(crawsize)))
525 525 seen.add('c')
526 526 progress[:] = [_('changelog revisions'), 0, crevcount]
527 527 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
528 528 ui.write(_('finished migrating %d filelog revisions across %d '
529 529 'filelogs; change in size: %s\n') %
530 530 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
531 531
532 532 ui.write(_('migrating %d manifests containing %d revisions '
533 533 '(%s in store; %s tracked data)\n') %
534 534 (mcount, mrevcount, util.bytecount(msrcsize),
535 535 util.bytecount(mrawsize)))
536 536 seen.add('m')
537 if progress:
538 ui.progress(progress[0], None)
537 539 progress[:] = [_('manifest revisions'), 0, mrevcount]
538 540 elif 'f' not in seen:
539 541 ui.write(_('migrating %d filelogs containing %d revisions '
540 542 '(%s in store; %s tracked data)\n') %
541 543 (fcount, frevcount, util.bytecount(fsrcsize),
542 544 util.bytecount(frawsize)))
543 545 seen.add('f')
546 if progress:
547 ui.progress(progress[0], None)
544 548 progress[:] = [_('file revisions'), 0, frevcount]
545 549
546 550 ui.progress(progress[0], progress[1], total=progress[2])
547 551
548 552 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
549 553 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
550 554 deltareuse=deltareuse,
551 555 aggressivemergedeltas=aggressivemergedeltas)
552 556
553 557 datasize = 0
554 558 idx = newrl.index
555 559 for rev in newrl:
556 560 datasize += idx[rev][1]
557 561
558 562 dstsize += datasize
559 563
560 564 if isinstance(newrl, changelog.changelog):
561 565 cdstsize += datasize
562 566 elif isinstance(newrl, manifest.manifestrevlog):
563 567 mdstsize += datasize
564 568 else:
565 569 fdstsize += datasize
566 570
567 571 ui.progress(progress[0], None)
568 572
569 573 ui.write(_('finished migrating %d changelog revisions; change in size: '
570 574 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
571 575
572 576 ui.write(_('finished migrating %d total revisions; total change in store '
573 577 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
574 578
575 579 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
576 580 """Determine whether to copy a store file during upgrade.
577 581
578 582 This function is called when migrating store files from ``srcrepo`` to
579 583 ``dstrepo`` as part of upgrading a repository.
580 584
581 585 Args:
582 586 srcrepo: repo we are copying from
583 587 dstrepo: repo we are copying to
584 588 requirements: set of requirements for ``dstrepo``
585 589 path: store file being examined
586 590 mode: the ``ST_MODE`` file type of ``path``
587 591 st: ``stat`` data structure for ``path``
588 592
589 593 Function should return ``True`` if the file is to be copied.
590 594 """
591 595 # Skip revlogs.
592 596 if path.endswith(('.i', '.d')):
593 597 return False
594 598 # Skip transaction related files.
595 599 if path.startswith('undo'):
596 600 return False
597 601 # Only copy regular files.
598 602 if mode != stat.S_IFREG:
599 603 return False
600 604 # Skip other skipped files.
601 605 if path in ('lock', 'fncache'):
602 606 return False
603 607
604 608 return True
605 609
606 610 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
607 611 """Hook point for extensions to perform additional actions during upgrade.
608 612
609 613 This function is called after revlogs and store files have been copied but
610 614 before the new store is swapped into the original location.
611 615 """
612 616
613 617 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
614 618 """Do the low-level work of upgrading a repository.
615 619
616 620 The upgrade is effectively performed as a copy between a source
617 621 repository and a temporary destination repository.
618 622
619 623 The source repository is unmodified for as long as possible so the
620 624 upgrade can abort at any time without causing loss of service for
621 625 readers and without corrupting the source repository.
622 626 """
623 627 assert srcrepo.currentwlock()
624 628 assert dstrepo.currentwlock()
625 629
626 630 ui.write(_('(it is safe to interrupt this process any time before '
627 631 'data migration completes)\n'))
628 632
629 633 if 'redeltaall' in actions:
630 634 deltareuse = revlog.revlog.DELTAREUSENEVER
631 635 elif 'redeltaparent' in actions:
632 636 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
633 637 elif 'redeltamultibase' in actions:
634 638 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
635 639 elif 'redeltafulladd' in actions:
636 640 deltareuse = revlog.revlog.DELTAREUSEFULLADD
637 641 else:
638 642 deltareuse = revlog.revlog.DELTAREUSEALWAYS
639 643
640 644 with dstrepo.transaction('upgrade') as tr:
641 645 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
642 646 'redeltamultibase' in actions)
643 647
644 648 # Now copy other files in the store directory.
645 649 # The sorted() makes execution deterministic.
646 650 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
647 651 if not _filterstorefile(srcrepo, dstrepo, requirements,
648 652 p, kind, st):
649 653 continue
650 654
651 655 srcrepo.ui.write(_('copying %s\n') % p)
652 656 src = srcrepo.store.rawvfs.join(p)
653 657 dst = dstrepo.store.rawvfs.join(p)
654 658 util.copyfile(src, dst, copystat=True)
655 659
656 660 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
657 661
658 662 ui.write(_('data fully migrated to temporary repository\n'))
659 663
660 664 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
661 665 backupvfs = vfsmod.vfs(backuppath)
662 666
663 667 # Make a backup of requires file first, as it is the first to be modified.
664 668 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
665 669
666 670 # We install an arbitrary requirement that clients must not support
667 671 # as a mechanism to lock out new clients during the data swap. This is
668 672 # better than allowing a client to continue while the repository is in
669 673 # an inconsistent state.
670 674 ui.write(_('marking source repository as being upgraded; clients will be '
671 675 'unable to read from repository\n'))
672 676 scmutil.writerequires(srcrepo.vfs,
673 677 srcrepo.requirements | {'upgradeinprogress'})
674 678
675 679 ui.write(_('starting in-place swap of repository data\n'))
676 680 ui.write(_('replaced files will be backed up at %s\n') %
677 681 backuppath)
678 682
679 683 # Now swap in the new store directory. Doing it as a rename should make
680 684 # the operation nearly instantaneous and atomic (at least in well-behaved
681 685 # environments).
682 686 ui.write(_('replacing store...\n'))
683 687 tstart = util.timer()
684 688 util.rename(srcrepo.spath, backupvfs.join('store'))
685 689 util.rename(dstrepo.spath, srcrepo.spath)
686 690 elapsed = util.timer() - tstart
687 691 ui.write(_('store replacement complete; repository was inconsistent for '
688 692 '%0.1fs\n') % elapsed)
689 693
690 694 # We first write the requirements file. Any new requirements will lock
691 695 # out legacy clients.
692 696 ui.write(_('finalizing requirements file and making repository readable '
693 697 'again\n'))
694 698 scmutil.writerequires(srcrepo.vfs, requirements)
695 699
696 700 # The lock file from the old store won't be removed because nothing has a
697 701 # reference to its new location. So clean it up manually. Alternatively, we
698 702 # could update srcrepo.svfs and other variables to point to the new
699 703 # location. This is simpler.
700 704 backupvfs.unlink('store/lock')
701 705
702 706 return backuppath
703 707
704 708 def upgraderepo(ui, repo, run=False, optimize=None):
705 709 """Upgrade a repository in place."""
706 710 optimize = set(optimize or [])
707 711 repo = repo.unfiltered()
708 712
709 713 # Ensure the repository can be upgraded.
710 714 missingreqs = requiredsourcerequirements(repo) - repo.requirements
711 715 if missingreqs:
712 716 raise error.Abort(_('cannot upgrade repository; requirement '
713 717 'missing: %s') % _(', ').join(sorted(missingreqs)))
714 718
715 719 blockedreqs = blocksourcerequirements(repo) & repo.requirements
716 720 if blockedreqs:
717 721 raise error.Abort(_('cannot upgrade repository; unsupported source '
718 722 'requirement: %s') %
719 723 _(', ').join(sorted(blockedreqs)))
720 724
721 725 # FUTURE there is potentially a need to control the wanted requirements via
722 726 # command arguments or via an extension hook point.
723 727 newreqs = localrepo.newreporequirements(repo)
724 728 newreqs.update(preservedrequirements(repo))
725 729
726 730 noremovereqs = (repo.requirements - newreqs -
727 731 supportremovedrequirements(repo))
728 732 if noremovereqs:
729 733 raise error.Abort(_('cannot upgrade repository; requirement would be '
730 734 'removed: %s') % _(', ').join(sorted(noremovereqs)))
731 735
732 736 noaddreqs = (newreqs - repo.requirements -
733 737 allowednewrequirements(repo))
734 738 if noaddreqs:
735 739 raise error.Abort(_('cannot upgrade repository; do not support adding '
736 740 'requirement: %s') %
737 741 _(', ').join(sorted(noaddreqs)))
738 742
739 743 unsupportedreqs = newreqs - supporteddestrequirements(repo)
740 744 if unsupportedreqs:
741 745 raise error.Abort(_('cannot upgrade repository; do not support '
742 746 'destination requirement: %s') %
743 747 _(', ').join(sorted(unsupportedreqs)))
744 748
745 749 # Find and validate all improvements that can be made.
746 750 alloptimizations = findoptimizations(repo)
747 751
748 752 # Apply and Validate arguments.
749 753 optimizations = []
750 754 for o in alloptimizations:
751 755 if o.name in optimize:
752 756 optimizations.append(o)
753 757 optimize.discard(o.name)
754 758
755 759 if optimize: # anything left is unknown
756 760 raise error.Abort(_('unknown optimization action requested: %s') %
757 761 ', '.join(sorted(optimize)),
758 762 hint=_('run without arguments to see valid '
759 763 'optimizations'))
760 764
761 765 deficiencies = finddeficiencies(repo)
762 766 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
763 767 actions.extend(o for o in sorted(optimizations)
764 768 # determineactions could have added optimisation
765 769 if o not in actions)
766 770
767 771 def printrequirements():
768 772 ui.write(_('requirements\n'))
769 773 ui.write(_(' preserved: %s\n') %
770 774 _(', ').join(sorted(newreqs & repo.requirements)))
771 775
772 776 if repo.requirements - newreqs:
773 777 ui.write(_(' removed: %s\n') %
774 778 _(', ').join(sorted(repo.requirements - newreqs)))
775 779
776 780 if newreqs - repo.requirements:
777 781 ui.write(_(' added: %s\n') %
778 782 _(', ').join(sorted(newreqs - repo.requirements)))
779 783
780 784 ui.write('\n')
781 785
782 786 def printupgradeactions():
783 787 for a in actions:
784 788 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
785 789
786 790 if not run:
787 791 fromconfig = []
788 792 onlydefault = []
789 793
790 794 for d in deficiencies:
791 795 if d.fromconfig(repo):
792 796 fromconfig.append(d)
793 797 elif d.default:
794 798 onlydefault.append(d)
795 799
796 800 if fromconfig or onlydefault:
797 801
798 802 if fromconfig:
799 803 ui.write(_('repository lacks features recommended by '
800 804 'current config options:\n\n'))
801 805 for i in fromconfig:
802 806 ui.write('%s\n %s\n\n' % (i.name, i.description))
803 807
804 808 if onlydefault:
805 809 ui.write(_('repository lacks features used by the default '
806 810 'config options:\n\n'))
807 811 for i in onlydefault:
808 812 ui.write('%s\n %s\n\n' % (i.name, i.description))
809 813
810 814 ui.write('\n')
811 815 else:
812 816 ui.write(_('(no feature deficiencies found in existing '
813 817 'repository)\n'))
814 818
815 819 ui.write(_('performing an upgrade with "--run" will make the following '
816 820 'changes:\n\n'))
817 821
818 822 printrequirements()
819 823 printupgradeactions()
820 824
821 825 unusedoptimize = [i for i in alloptimizations if i not in actions]
822 826
823 827 if unusedoptimize:
824 828 ui.write(_('additional optimizations are available by specifying '
825 829 '"--optimize <name>":\n\n'))
826 830 for i in unusedoptimize:
827 831 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
828 832 return
829 833
830 834 # Else we're in the run=true case.
831 835 ui.write(_('upgrade will perform the following actions:\n\n'))
832 836 printrequirements()
833 837 printupgradeactions()
834 838
835 839 upgradeactions = [a.name for a in actions]
836 840
837 841 ui.write(_('beginning upgrade...\n'))
838 842 with repo.wlock(), repo.lock():
839 843 ui.write(_('repository locked and read-only\n'))
840 844 # Our strategy for upgrading the repository is to create a new,
841 845 # temporary repository, write data to it, then do a swap of the
842 846 # data. There are less heavyweight ways to do this, but it is easier
843 847 # to create a new repo object than to instantiate all the components
844 848 # (like the store) separately.
845 849 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
846 850 backuppath = None
847 851 try:
848 852 ui.write(_('creating temporary repository to stage migrated '
849 853 'data: %s\n') % tmppath)
850 854
851 855 # clone ui without using ui.copy because repo.ui is protected
852 856 repoui = repo.ui.__class__(repo.ui)
853 857 dstrepo = hg.repository(repoui, path=tmppath, create=True)
854 858
855 859 with dstrepo.wlock(), dstrepo.lock():
856 860 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
857 861 upgradeactions)
858 862
859 863 finally:
860 864 ui.write(_('removing temporary repository %s\n') % tmppath)
861 865 repo.vfs.rmtree(tmppath, forcibly=True)
862 866
863 867 if backuppath:
864 868 ui.warn(_('copy of old repository backed up at %s\n') %
865 869 backuppath)
866 870 ui.warn(_('the old repository will not be deleted; remove '
867 871 'it to free up disk space once the upgraded '
868 872 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now