##// END OF EJS Templates
upgrade: sniff for filelog type...
Gregory Szorc -
r37462:c8666a9e default
parent child Browse files
Show More
@@ -1,866 +1,868
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11 import tempfile
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 hg,
19 19 localrepo,
20 20 manifest,
21 21 revlog,
22 22 scmutil,
23 23 util,
24 24 vfs as vfsmod,
25 25 )
26 26
27 27 def requiredsourcerequirements(repo):
28 28 """Obtain requirements required to be present to upgrade a repo.
29 29
30 30 An upgrade will not be allowed if the repository doesn't have the
31 31 requirements returned by this function.
32 32 """
33 33 return {
34 34 # Introduced in Mercurial 0.9.2.
35 35 'revlogv1',
36 36 # Introduced in Mercurial 0.9.2.
37 37 'store',
38 38 }
39 39
40 40 def blocksourcerequirements(repo):
41 41 """Obtain requirements that will prevent an upgrade from occurring.
42 42
43 43 An upgrade cannot be performed if the source repository contains a
44 44 requirements in the returned set.
45 45 """
46 46 return {
47 47 # The upgrade code does not yet support these experimental features.
48 48 # This is an artificial limitation.
49 49 'treemanifest',
50 50 # This was a precursor to generaldelta and was never enabled by default.
51 51 # It should (hopefully) not exist in the wild.
52 52 'parentdelta',
53 53 # Upgrade should operate on the actual store, not the shared link.
54 54 'shared',
55 55 }
56 56
57 57 def supportremovedrequirements(repo):
58 58 """Obtain requirements that can be removed during an upgrade.
59 59
60 60 If an upgrade were to create a repository that dropped a requirement,
61 61 the dropped requirement must appear in the returned set for the upgrade
62 62 to be allowed.
63 63 """
64 64 return set()
65 65
66 66 def supporteddestrequirements(repo):
67 67 """Obtain requirements that upgrade supports in the destination.
68 68
69 69 If the result of the upgrade would create requirements not in this set,
70 70 the upgrade is disallowed.
71 71
72 72 Extensions should monkeypatch this to add their custom requirements.
73 73 """
74 74 return {
75 75 'dotencode',
76 76 'fncache',
77 77 'generaldelta',
78 78 'revlogv1',
79 79 'store',
80 80 }
81 81
82 82 def allowednewrequirements(repo):
83 83 """Obtain requirements that can be added to a repository during upgrade.
84 84
85 85 This is used to disallow proposed requirements from being added when
86 86 they weren't present before.
87 87
88 88 We use a list of allowed requirement additions instead of a list of known
89 89 bad additions because the whitelist approach is safer and will prevent
90 90 future, unknown requirements from accidentally being added.
91 91 """
92 92 return {
93 93 'dotencode',
94 94 'fncache',
95 95 'generaldelta',
96 96 }
97 97
98 98 def preservedrequirements(repo):
99 99 return set()
100 100
101 101 deficiency = 'deficiency'
102 102 optimisation = 'optimization'
103 103
104 104 class improvement(object):
105 105 """Represents an improvement that can be made as part of an upgrade.
106 106
107 107 The following attributes are defined on each instance:
108 108
109 109 name
110 110 Machine-readable string uniquely identifying this improvement. It
111 111 will be mapped to an action later in the upgrade process.
112 112
113 113 type
114 114 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
115 115 problem. An optimization is an action (sometimes optional) that
116 116 can be taken to further improve the state of the repository.
117 117
118 118 description
119 119 Message intended for humans explaining the improvement in more detail,
120 120 including the implications of it. For ``deficiency`` types, should be
121 121 worded in the present tense. For ``optimisation`` types, should be
122 122 worded in the future tense.
123 123
124 124 upgrademessage
125 125 Message intended for humans explaining what an upgrade addressing this
126 126 issue will do. Should be worded in the future tense.
127 127 """
128 128 def __init__(self, name, type, description, upgrademessage):
129 129 self.name = name
130 130 self.type = type
131 131 self.description = description
132 132 self.upgrademessage = upgrademessage
133 133
134 134 def __eq__(self, other):
135 135 if not isinstance(other, improvement):
136 136 # This is what python tell use to do
137 137 return NotImplemented
138 138 return self.name == other.name
139 139
140 140 def __ne__(self, other):
141 141 return not self == other
142 142
143 143 def __hash__(self):
144 144 return hash(self.name)
145 145
146 146 allformatvariant = []
147 147
148 148 def registerformatvariant(cls):
149 149 allformatvariant.append(cls)
150 150 return cls
151 151
152 152 class formatvariant(improvement):
153 153 """an improvement subclass dedicated to repository format"""
154 154 type = deficiency
155 155 ### The following attributes should be defined for each class:
156 156
157 157 # machine-readable string uniquely identifying this improvement. it will be
158 158 # mapped to an action later in the upgrade process.
159 159 name = None
160 160
161 161 # message intended for humans explaining the improvement in more detail,
162 162 # including the implications of it ``deficiency`` types, should be worded
163 163 # in the present tense.
164 164 description = None
165 165
166 166 # message intended for humans explaining what an upgrade addressing this
167 167 # issue will do. should be worded in the future tense.
168 168 upgrademessage = None
169 169
170 170 # value of current Mercurial default for new repository
171 171 default = None
172 172
173 173 def __init__(self):
174 174 raise NotImplementedError()
175 175
176 176 @staticmethod
177 177 def fromrepo(repo):
178 178 """current value of the variant in the repository"""
179 179 raise NotImplementedError()
180 180
181 181 @staticmethod
182 182 def fromconfig(repo):
183 183 """current value of the variant in the configuration"""
184 184 raise NotImplementedError()
185 185
186 186 class requirementformatvariant(formatvariant):
187 187 """formatvariant based on a 'requirement' name.
188 188
189 189 Many format variant are controlled by a 'requirement'. We define a small
190 190 subclass to factor the code.
191 191 """
192 192
193 193 # the requirement that control this format variant
194 194 _requirement = None
195 195
196 196 @staticmethod
197 197 def _newreporequirements(repo):
198 198 return localrepo.newreporequirements(repo)
199 199
200 200 @classmethod
201 201 def fromrepo(cls, repo):
202 202 assert cls._requirement is not None
203 203 return cls._requirement in repo.requirements
204 204
205 205 @classmethod
206 206 def fromconfig(cls, repo):
207 207 assert cls._requirement is not None
208 208 return cls._requirement in cls._newreporequirements(repo)
209 209
210 210 @registerformatvariant
211 211 class fncache(requirementformatvariant):
212 212 name = 'fncache'
213 213
214 214 _requirement = 'fncache'
215 215
216 216 default = True
217 217
218 218 description = _('long and reserved filenames may not work correctly; '
219 219 'repository performance is sub-optimal')
220 220
221 221 upgrademessage = _('repository will be more resilient to storing '
222 222 'certain paths and performance of certain '
223 223 'operations should be improved')
224 224
225 225 @registerformatvariant
226 226 class dotencode(requirementformatvariant):
227 227 name = 'dotencode'
228 228
229 229 _requirement = 'dotencode'
230 230
231 231 default = True
232 232
233 233 description = _('storage of filenames beginning with a period or '
234 234 'space may not work correctly')
235 235
236 236 upgrademessage = _('repository will be better able to store files '
237 237 'beginning with a space or period')
238 238
239 239 @registerformatvariant
240 240 class generaldelta(requirementformatvariant):
241 241 name = 'generaldelta'
242 242
243 243 _requirement = 'generaldelta'
244 244
245 245 default = True
246 246
247 247 description = _('deltas within internal storage are unable to '
248 248 'choose optimal revisions; repository is larger and '
249 249 'slower than it could be; interaction with other '
250 250 'repositories may require extra network and CPU '
251 251 'resources, making "hg push" and "hg pull" slower')
252 252
253 253 upgrademessage = _('repository storage will be able to create '
254 254 'optimal deltas; new repository data will be '
255 255 'smaller and read times should decrease; '
256 256 'interacting with other repositories using this '
257 257 'storage model should require less network and '
258 258 'CPU resources, making "hg push" and "hg pull" '
259 259 'faster')
260 260
261 261 @registerformatvariant
262 262 class removecldeltachain(formatvariant):
263 263 name = 'plain-cl-delta'
264 264
265 265 default = True
266 266
267 267 description = _('changelog storage is using deltas instead of '
268 268 'raw entries; changelog reading and any '
269 269 'operation relying on changelog data are slower '
270 270 'than they could be')
271 271
272 272 upgrademessage = _('changelog storage will be reformated to '
273 273 'store raw entries; changelog reading will be '
274 274 'faster; changelog size may be reduced')
275 275
276 276 @staticmethod
277 277 def fromrepo(repo):
278 278 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
279 279 # changelogs with deltas.
280 280 cl = repo.changelog
281 281 chainbase = cl.chainbase
282 282 return all(rev == chainbase(rev) for rev in cl)
283 283
284 284 @staticmethod
285 285 def fromconfig(repo):
286 286 return True
287 287
288 288 @registerformatvariant
289 289 class compressionengine(formatvariant):
290 290 name = 'compression'
291 291 default = 'zlib'
292 292
293 293 description = _('Compresion algorithm used to compress data. '
294 294 'Some engine are faster than other')
295 295
296 296 upgrademessage = _('revlog content will be recompressed with the new '
297 297 'algorithm.')
298 298
299 299 @classmethod
300 300 def fromrepo(cls, repo):
301 301 for req in repo.requirements:
302 302 if req.startswith('exp-compression-'):
303 303 return req.split('-', 2)[2]
304 304 return 'zlib'
305 305
306 306 @classmethod
307 307 def fromconfig(cls, repo):
308 308 return repo.ui.config('experimental', 'format.compression')
309 309
310 310 def finddeficiencies(repo):
311 311 """returns a list of deficiencies that the repo suffer from"""
312 312 deficiencies = []
313 313
314 314 # We could detect lack of revlogv1 and store here, but they were added
315 315 # in 0.9.2 and we don't support upgrading repos without these
316 316 # requirements, so let's not bother.
317 317
318 318 for fv in allformatvariant:
319 319 if not fv.fromrepo(repo):
320 320 deficiencies.append(fv)
321 321
322 322 return deficiencies
323 323
324 324 def findoptimizations(repo):
325 325 """Determine optimisation that could be used during upgrade"""
326 326 # These are unconditionally added. There is logic later that figures out
327 327 # which ones to apply.
328 328 optimizations = []
329 329
330 330 optimizations.append(improvement(
331 331 name='redeltaparent',
332 332 type=optimisation,
333 333 description=_('deltas within internal storage will be recalculated to '
334 334 'choose an optimal base revision where this was not '
335 335 'already done; the size of the repository may shrink and '
336 336 'various operations may become faster; the first time '
337 337 'this optimization is performed could slow down upgrade '
338 338 'execution considerably; subsequent invocations should '
339 339 'not run noticeably slower'),
340 340 upgrademessage=_('deltas within internal storage will choose a new '
341 341 'base revision if needed')))
342 342
343 343 optimizations.append(improvement(
344 344 name='redeltamultibase',
345 345 type=optimisation,
346 346 description=_('deltas within internal storage will be recalculated '
347 347 'against multiple base revision and the smallest '
348 348 'difference will be used; the size of the repository may '
349 349 'shrink significantly when there are many merges; this '
350 350 'optimization will slow down execution in proportion to '
351 351 'the number of merges in the repository and the amount '
352 352 'of files in the repository; this slow down should not '
353 353 'be significant unless there are tens of thousands of '
354 354 'files and thousands of merges'),
355 355 upgrademessage=_('deltas within internal storage will choose an '
356 356 'optimal delta by computing deltas against multiple '
357 357 'parents; may slow down execution time '
358 358 'significantly')))
359 359
360 360 optimizations.append(improvement(
361 361 name='redeltaall',
362 362 type=optimisation,
363 363 description=_('deltas within internal storage will always be '
364 364 'recalculated without reusing prior deltas; this will '
365 365 'likely make execution run several times slower; this '
366 366 'optimization is typically not needed'),
367 367 upgrademessage=_('deltas within internal storage will be fully '
368 368 'recomputed; this will likely drastically slow down '
369 369 'execution time')))
370 370
371 371 optimizations.append(improvement(
372 372 name='redeltafulladd',
373 373 type=optimisation,
374 374 description=_('every revision will be re-added as if it was new '
375 375 'content. It will go through the full storage '
376 376 'mechanism giving extensions a chance to process it '
377 377 '(eg. lfs). This is similar to "redeltaall" but even '
378 378 'slower since more logic is involved.'),
379 379 upgrademessage=_('each revision will be added as new content to the '
380 380 'internal storage; this will likely drastically slow '
381 381 'down execution time, but some extensions might need '
382 382 'it')))
383 383
384 384 return optimizations
385 385
386 386 def determineactions(repo, deficiencies, sourcereqs, destreqs):
387 387 """Determine upgrade actions that will be performed.
388 388
389 389 Given a list of improvements as returned by ``finddeficiencies`` and
390 390 ``findoptimizations``, determine the list of upgrade actions that
391 391 will be performed.
392 392
393 393 The role of this function is to filter improvements if needed, apply
394 394 recommended optimizations from the improvements list that make sense,
395 395 etc.
396 396
397 397 Returns a list of action names.
398 398 """
399 399 newactions = []
400 400
401 401 knownreqs = supporteddestrequirements(repo)
402 402
403 403 for d in deficiencies:
404 404 name = d.name
405 405
406 406 # If the action is a requirement that doesn't show up in the
407 407 # destination requirements, prune the action.
408 408 if name in knownreqs and name not in destreqs:
409 409 continue
410 410
411 411 newactions.append(d)
412 412
413 413 # FUTURE consider adding some optimizations here for certain transitions.
414 414 # e.g. adding generaldelta could schedule parent redeltas.
415 415
416 416 return newactions
417 417
418 418 def _revlogfrompath(repo, path):
419 419 """Obtain a revlog from a repo path.
420 420
421 421 An instance of the appropriate class is returned.
422 422 """
423 423 if path == '00changelog.i':
424 424 return changelog.changelog(repo.svfs)
425 425 elif path.endswith('00manifest.i'):
426 426 mandir = path[:-len('00manifest.i')]
427 427 return manifest.manifestrevlog(repo.svfs, dir=mandir)
428 428 else:
429 429 #reverse of "/".join(("data", path + ".i"))
430 430 return filelog.filelog(repo.svfs, path[5:-2])
431 431
432 432 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
433 433 """Copy revlogs between 2 repos."""
434 434 revcount = 0
435 435 srcsize = 0
436 436 srcrawsize = 0
437 437 dstsize = 0
438 438 fcount = 0
439 439 frevcount = 0
440 440 fsrcsize = 0
441 441 frawsize = 0
442 442 fdstsize = 0
443 443 mcount = 0
444 444 mrevcount = 0
445 445 msrcsize = 0
446 446 mrawsize = 0
447 447 mdstsize = 0
448 448 crevcount = 0
449 449 csrcsize = 0
450 450 crawsize = 0
451 451 cdstsize = 0
452 452
453 453 # Perform a pass to collect metadata. This validates we can open all
454 454 # source files and allows a unified progress bar to be displayed.
455 455 for unencoded, encoded, size in srcrepo.store.walk():
456 456 if unencoded.endswith('.d'):
457 457 continue
458 458
459 459 rl = _revlogfrompath(srcrepo, unencoded)
460 460 revcount += len(rl)
461 461
462 462 datasize = 0
463 463 rawsize = 0
464 464 idx = rl.index
465 465 for rev in rl:
466 466 e = idx[rev]
467 467 datasize += e[1]
468 468 rawsize += e[2]
469 469
470 470 srcsize += datasize
471 471 srcrawsize += rawsize
472 472
473 473 # This is for the separate progress bars.
474 474 if isinstance(rl, changelog.changelog):
475 475 crevcount += len(rl)
476 476 csrcsize += datasize
477 477 crawsize += rawsize
478 478 elif isinstance(rl, manifest.manifestrevlog):
479 479 mcount += 1
480 480 mrevcount += len(rl)
481 481 msrcsize += datasize
482 482 mrawsize += rawsize
483 elif isinstance(rl, revlog.revlog):
483 elif isinstance(rl, filelog.filelog):
484 484 fcount += 1
485 485 frevcount += len(rl)
486 486 fsrcsize += datasize
487 487 frawsize += rawsize
488 else:
489 error.ProgrammingError('unknown revlog type')
488 490
489 491 if not revcount:
490 492 return
491 493
492 494 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
493 495 '%d in changelog)\n') %
494 496 (revcount, frevcount, mrevcount, crevcount))
495 497 ui.write(_('migrating %s in store; %s tracked data\n') % (
496 498 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
497 499
498 500 # Used to keep track of progress.
499 501 progress = []
500 502 def oncopiedrevision(rl, rev, node):
501 503 progress[1] += 1
502 504 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
503 505
504 506 # Do the actual copying.
505 507 # FUTURE this operation can be farmed off to worker processes.
506 508 seen = set()
507 509 for unencoded, encoded, size in srcrepo.store.walk():
508 510 if unencoded.endswith('.d'):
509 511 continue
510 512
511 513 oldrl = _revlogfrompath(srcrepo, unencoded)
512 514 newrl = _revlogfrompath(dstrepo, unencoded)
513 515
514 516 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
515 517 ui.write(_('finished migrating %d manifest revisions across %d '
516 518 'manifests; change in size: %s\n') %
517 519 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
518 520
519 521 ui.write(_('migrating changelog containing %d revisions '
520 522 '(%s in store; %s tracked data)\n') %
521 523 (crevcount, util.bytecount(csrcsize),
522 524 util.bytecount(crawsize)))
523 525 seen.add('c')
524 526 progress[:] = [_('changelog revisions'), 0, crevcount]
525 527 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
526 528 ui.write(_('finished migrating %d filelog revisions across %d '
527 529 'filelogs; change in size: %s\n') %
528 530 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
529 531
530 532 ui.write(_('migrating %d manifests containing %d revisions '
531 533 '(%s in store; %s tracked data)\n') %
532 534 (mcount, mrevcount, util.bytecount(msrcsize),
533 535 util.bytecount(mrawsize)))
534 536 seen.add('m')
535 537 progress[:] = [_('manifest revisions'), 0, mrevcount]
536 538 elif 'f' not in seen:
537 539 ui.write(_('migrating %d filelogs containing %d revisions '
538 540 '(%s in store; %s tracked data)\n') %
539 541 (fcount, frevcount, util.bytecount(fsrcsize),
540 542 util.bytecount(frawsize)))
541 543 seen.add('f')
542 544 progress[:] = [_('file revisions'), 0, frevcount]
543 545
544 546 ui.progress(progress[0], progress[1], total=progress[2])
545 547
546 548 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
547 549 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
548 550 deltareuse=deltareuse,
549 551 aggressivemergedeltas=aggressivemergedeltas)
550 552
551 553 datasize = 0
552 554 idx = newrl.index
553 555 for rev in newrl:
554 556 datasize += idx[rev][1]
555 557
556 558 dstsize += datasize
557 559
558 560 if isinstance(newrl, changelog.changelog):
559 561 cdstsize += datasize
560 562 elif isinstance(newrl, manifest.manifestrevlog):
561 563 mdstsize += datasize
562 564 else:
563 565 fdstsize += datasize
564 566
565 567 ui.progress(progress[0], None)
566 568
567 569 ui.write(_('finished migrating %d changelog revisions; change in size: '
568 570 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
569 571
570 572 ui.write(_('finished migrating %d total revisions; total change in store '
571 573 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
572 574
573 575 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
574 576 """Determine whether to copy a store file during upgrade.
575 577
576 578 This function is called when migrating store files from ``srcrepo`` to
577 579 ``dstrepo`` as part of upgrading a repository.
578 580
579 581 Args:
580 582 srcrepo: repo we are copying from
581 583 dstrepo: repo we are copying to
582 584 requirements: set of requirements for ``dstrepo``
583 585 path: store file being examined
584 586 mode: the ``ST_MODE`` file type of ``path``
585 587 st: ``stat`` data structure for ``path``
586 588
587 589 Function should return ``True`` if the file is to be copied.
588 590 """
589 591 # Skip revlogs.
590 592 if path.endswith(('.i', '.d')):
591 593 return False
592 594 # Skip transaction related files.
593 595 if path.startswith('undo'):
594 596 return False
595 597 # Only copy regular files.
596 598 if mode != stat.S_IFREG:
597 599 return False
598 600 # Skip other skipped files.
599 601 if path in ('lock', 'fncache'):
600 602 return False
601 603
602 604 return True
603 605
604 606 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
605 607 """Hook point for extensions to perform additional actions during upgrade.
606 608
607 609 This function is called after revlogs and store files have been copied but
608 610 before the new store is swapped into the original location.
609 611 """
610 612
611 613 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
612 614 """Do the low-level work of upgrading a repository.
613 615
614 616 The upgrade is effectively performed as a copy between a source
615 617 repository and a temporary destination repository.
616 618
617 619 The source repository is unmodified for as long as possible so the
618 620 upgrade can abort at any time without causing loss of service for
619 621 readers and without corrupting the source repository.
620 622 """
621 623 assert srcrepo.currentwlock()
622 624 assert dstrepo.currentwlock()
623 625
624 626 ui.write(_('(it is safe to interrupt this process any time before '
625 627 'data migration completes)\n'))
626 628
627 629 if 'redeltaall' in actions:
628 630 deltareuse = revlog.revlog.DELTAREUSENEVER
629 631 elif 'redeltaparent' in actions:
630 632 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
631 633 elif 'redeltamultibase' in actions:
632 634 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
633 635 elif 'redeltafulladd' in actions:
634 636 deltareuse = revlog.revlog.DELTAREUSEFULLADD
635 637 else:
636 638 deltareuse = revlog.revlog.DELTAREUSEALWAYS
637 639
638 640 with dstrepo.transaction('upgrade') as tr:
639 641 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
640 642 'redeltamultibase' in actions)
641 643
642 644 # Now copy other files in the store directory.
643 645 # The sorted() makes execution deterministic.
644 646 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
645 647 if not _filterstorefile(srcrepo, dstrepo, requirements,
646 648 p, kind, st):
647 649 continue
648 650
649 651 srcrepo.ui.write(_('copying %s\n') % p)
650 652 src = srcrepo.store.rawvfs.join(p)
651 653 dst = dstrepo.store.rawvfs.join(p)
652 654 util.copyfile(src, dst, copystat=True)
653 655
654 656 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
655 657
656 658 ui.write(_('data fully migrated to temporary repository\n'))
657 659
658 660 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
659 661 backupvfs = vfsmod.vfs(backuppath)
660 662
661 663 # Make a backup of requires file first, as it is the first to be modified.
662 664 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
663 665
664 666 # We install an arbitrary requirement that clients must not support
665 667 # as a mechanism to lock out new clients during the data swap. This is
666 668 # better than allowing a client to continue while the repository is in
667 669 # an inconsistent state.
668 670 ui.write(_('marking source repository as being upgraded; clients will be '
669 671 'unable to read from repository\n'))
670 672 scmutil.writerequires(srcrepo.vfs,
671 673 srcrepo.requirements | {'upgradeinprogress'})
672 674
673 675 ui.write(_('starting in-place swap of repository data\n'))
674 676 ui.write(_('replaced files will be backed up at %s\n') %
675 677 backuppath)
676 678
677 679 # Now swap in the new store directory. Doing it as a rename should make
678 680 # the operation nearly instantaneous and atomic (at least in well-behaved
679 681 # environments).
680 682 ui.write(_('replacing store...\n'))
681 683 tstart = util.timer()
682 684 util.rename(srcrepo.spath, backupvfs.join('store'))
683 685 util.rename(dstrepo.spath, srcrepo.spath)
684 686 elapsed = util.timer() - tstart
685 687 ui.write(_('store replacement complete; repository was inconsistent for '
686 688 '%0.1fs\n') % elapsed)
687 689
688 690 # We first write the requirements file. Any new requirements will lock
689 691 # out legacy clients.
690 692 ui.write(_('finalizing requirements file and making repository readable '
691 693 'again\n'))
692 694 scmutil.writerequires(srcrepo.vfs, requirements)
693 695
694 696 # The lock file from the old store won't be removed because nothing has a
695 697 # reference to its new location. So clean it up manually. Alternatively, we
696 698 # could update srcrepo.svfs and other variables to point to the new
697 699 # location. This is simpler.
698 700 backupvfs.unlink('store/lock')
699 701
700 702 return backuppath
701 703
702 704 def upgraderepo(ui, repo, run=False, optimize=None):
703 705 """Upgrade a repository in place."""
704 706 optimize = set(optimize or [])
705 707 repo = repo.unfiltered()
706 708
707 709 # Ensure the repository can be upgraded.
708 710 missingreqs = requiredsourcerequirements(repo) - repo.requirements
709 711 if missingreqs:
710 712 raise error.Abort(_('cannot upgrade repository; requirement '
711 713 'missing: %s') % _(', ').join(sorted(missingreqs)))
712 714
713 715 blockedreqs = blocksourcerequirements(repo) & repo.requirements
714 716 if blockedreqs:
715 717 raise error.Abort(_('cannot upgrade repository; unsupported source '
716 718 'requirement: %s') %
717 719 _(', ').join(sorted(blockedreqs)))
718 720
719 721 # FUTURE there is potentially a need to control the wanted requirements via
720 722 # command arguments or via an extension hook point.
721 723 newreqs = localrepo.newreporequirements(repo)
722 724 newreqs.update(preservedrequirements(repo))
723 725
724 726 noremovereqs = (repo.requirements - newreqs -
725 727 supportremovedrequirements(repo))
726 728 if noremovereqs:
727 729 raise error.Abort(_('cannot upgrade repository; requirement would be '
728 730 'removed: %s') % _(', ').join(sorted(noremovereqs)))
729 731
730 732 noaddreqs = (newreqs - repo.requirements -
731 733 allowednewrequirements(repo))
732 734 if noaddreqs:
733 735 raise error.Abort(_('cannot upgrade repository; do not support adding '
734 736 'requirement: %s') %
735 737 _(', ').join(sorted(noaddreqs)))
736 738
737 739 unsupportedreqs = newreqs - supporteddestrequirements(repo)
738 740 if unsupportedreqs:
739 741 raise error.Abort(_('cannot upgrade repository; do not support '
740 742 'destination requirement: %s') %
741 743 _(', ').join(sorted(unsupportedreqs)))
742 744
743 745 # Find and validate all improvements that can be made.
744 746 alloptimizations = findoptimizations(repo)
745 747
746 748 # Apply and Validate arguments.
747 749 optimizations = []
748 750 for o in alloptimizations:
749 751 if o.name in optimize:
750 752 optimizations.append(o)
751 753 optimize.discard(o.name)
752 754
753 755 if optimize: # anything left is unknown
754 756 raise error.Abort(_('unknown optimization action requested: %s') %
755 757 ', '.join(sorted(optimize)),
756 758 hint=_('run without arguments to see valid '
757 759 'optimizations'))
758 760
759 761 deficiencies = finddeficiencies(repo)
760 762 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
761 763 actions.extend(o for o in sorted(optimizations)
762 764 # determineactions could have added optimisation
763 765 if o not in actions)
764 766
765 767 def printrequirements():
766 768 ui.write(_('requirements\n'))
767 769 ui.write(_(' preserved: %s\n') %
768 770 _(', ').join(sorted(newreqs & repo.requirements)))
769 771
770 772 if repo.requirements - newreqs:
771 773 ui.write(_(' removed: %s\n') %
772 774 _(', ').join(sorted(repo.requirements - newreqs)))
773 775
774 776 if newreqs - repo.requirements:
775 777 ui.write(_(' added: %s\n') %
776 778 _(', ').join(sorted(newreqs - repo.requirements)))
777 779
778 780 ui.write('\n')
779 781
780 782 def printupgradeactions():
781 783 for a in actions:
782 784 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
783 785
784 786 if not run:
785 787 fromconfig = []
786 788 onlydefault = []
787 789
788 790 for d in deficiencies:
789 791 if d.fromconfig(repo):
790 792 fromconfig.append(d)
791 793 elif d.default:
792 794 onlydefault.append(d)
793 795
794 796 if fromconfig or onlydefault:
795 797
796 798 if fromconfig:
797 799 ui.write(_('repository lacks features recommended by '
798 800 'current config options:\n\n'))
799 801 for i in fromconfig:
800 802 ui.write('%s\n %s\n\n' % (i.name, i.description))
801 803
802 804 if onlydefault:
803 805 ui.write(_('repository lacks features used by the default '
804 806 'config options:\n\n'))
805 807 for i in onlydefault:
806 808 ui.write('%s\n %s\n\n' % (i.name, i.description))
807 809
808 810 ui.write('\n')
809 811 else:
810 812 ui.write(_('(no feature deficiencies found in existing '
811 813 'repository)\n'))
812 814
813 815 ui.write(_('performing an upgrade with "--run" will make the following '
814 816 'changes:\n\n'))
815 817
816 818 printrequirements()
817 819 printupgradeactions()
818 820
819 821 unusedoptimize = [i for i in alloptimizations if i not in actions]
820 822
821 823 if unusedoptimize:
822 824 ui.write(_('additional optimizations are available by specifying '
823 825 '"--optimize <name>":\n\n'))
824 826 for i in unusedoptimize:
825 827 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
826 828 return
827 829
828 830 # Else we're in the run=true case.
829 831 ui.write(_('upgrade will perform the following actions:\n\n'))
830 832 printrequirements()
831 833 printupgradeactions()
832 834
833 835 upgradeactions = [a.name for a in actions]
834 836
835 837 ui.write(_('beginning upgrade...\n'))
836 838 with repo.wlock(), repo.lock():
837 839 ui.write(_('repository locked and read-only\n'))
838 840 # Our strategy for upgrading the repository is to create a new,
839 841 # temporary repository, write data to it, then do a swap of the
840 842 # data. There are less heavyweight ways to do this, but it is easier
841 843 # to create a new repo object than to instantiate all the components
842 844 # (like the store) separately.
843 845 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
844 846 backuppath = None
845 847 try:
846 848 ui.write(_('creating temporary repository to stage migrated '
847 849 'data: %s\n') % tmppath)
848 850
849 851 # clone ui without using ui.copy because repo.ui is protected
850 852 repoui = repo.ui.__class__(repo.ui)
851 853 dstrepo = hg.repository(repoui, path=tmppath, create=True)
852 854
853 855 with dstrepo.wlock(), dstrepo.lock():
854 856 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
855 857 upgradeactions)
856 858
857 859 finally:
858 860 ui.write(_('removing temporary repository %s\n') % tmppath)
859 861 repo.vfs.rmtree(tmppath, forcibly=True)
860 862
861 863 if backuppath:
862 864 ui.warn(_('copy of old repository backed up at %s\n') %
863 865 backuppath)
864 866 ui.warn(_('the old repository will not be deleted; remove '
865 867 'it to free up disk space once the upgraded '
866 868 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now