##// END OF EJS Templates
upgrade: drop the prefix to the '_finishdatamigration' function...
Pierre-Yves David -
r31874:27ec6517 default
parent child Browse files
Show More
@@ -1,758 +1,758
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import stat
12 12 import tempfile
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 changelog,
17 17 error,
18 18 manifest,
19 19 revlog,
20 20 scmutil,
21 21 util,
22 22 vfs as vfsmod,
23 23 )
24 24
25 25 def requiredsourcerequirements(repo):
26 26 """Obtain requirements required to be present to upgrade a repo.
27 27
28 28 An upgrade will not be allowed if the repository doesn't have the
29 29 requirements returned by this function.
30 30 """
31 31 return set([
32 32 # Introduced in Mercurial 0.9.2.
33 33 'revlogv1',
34 34 # Introduced in Mercurial 0.9.2.
35 35 'store',
36 36 ])
37 37
38 38 def blocksourcerequirements(repo):
39 39 """Obtain requirements that will prevent an upgrade from occurring.
40 40
41 41 An upgrade cannot be performed if the source repository contains a
42 42 requirements in the returned set.
43 43 """
44 44 return set([
45 45 # The upgrade code does not yet support these experimental features.
46 46 # This is an artificial limitation.
47 47 'manifestv2',
48 48 'treemanifest',
49 49 # This was a precursor to generaldelta and was never enabled by default.
50 50 # It should (hopefully) not exist in the wild.
51 51 'parentdelta',
52 52 # Upgrade should operate on the actual store, not the shared link.
53 53 'shared',
54 54 ])
55 55
56 56 def supportremovedrequirements(repo):
57 57 """Obtain requirements that can be removed during an upgrade.
58 58
59 59 If an upgrade were to create a repository that dropped a requirement,
60 60 the dropped requirement must appear in the returned set for the upgrade
61 61 to be allowed.
62 62 """
63 63 return set()
64 64
65 65 def supporteddestrequirements(repo):
66 66 """Obtain requirements that upgrade supports in the destination.
67 67
68 68 If the result of the upgrade would create requirements not in this set,
69 69 the upgrade is disallowed.
70 70
71 71 Extensions should monkeypatch this to add their custom requirements.
72 72 """
73 73 return set([
74 74 'dotencode',
75 75 'fncache',
76 76 'generaldelta',
77 77 'revlogv1',
78 78 'store',
79 79 ])
80 80
81 81 def allowednewrequirements(repo):
82 82 """Obtain requirements that can be added to a repository during upgrade.
83 83
84 84 This is used to disallow proposed requirements from being added when
85 85 they weren't present before.
86 86
87 87 We use a list of allowed requirement additions instead of a list of known
88 88 bad additions because the whitelist approach is safer and will prevent
89 89 future, unknown requirements from accidentally being added.
90 90 """
91 91 return set([
92 92 'dotencode',
93 93 'fncache',
94 94 'generaldelta',
95 95 ])
96 96
97 97 deficiency = 'deficiency'
98 98 optimisation = 'optimization'
99 99
100 100 class improvement(object):
101 101 """Represents an improvement that can be made as part of an upgrade.
102 102
103 103 The following attributes are defined on each instance:
104 104
105 105 name
106 106 Machine-readable string uniquely identifying this improvement. It
107 107 will be mapped to an action later in the upgrade process.
108 108
109 109 type
110 110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
111 111 problem. An optimization is an action (sometimes optional) that
112 112 can be taken to further improve the state of the repository.
113 113
114 114 description
115 115 Message intended for humans explaining the improvement in more detail,
116 116 including the implications of it. For ``deficiency`` types, should be
117 117 worded in the present tense. For ``optimisation`` types, should be
118 118 worded in the future tense.
119 119
120 120 upgrademessage
121 121 Message intended for humans explaining what an upgrade addressing this
122 122 issue will do. Should be worded in the future tense.
123 123
124 124 fromdefault (``deficiency`` types only)
125 125 Boolean indicating whether the current (deficient) state deviates
126 126 from Mercurial's default configuration.
127 127
128 128 fromconfig (``deficiency`` types only)
129 129 Boolean indicating whether the current (deficient) state deviates
130 130 from the current Mercurial configuration.
131 131 """
132 132 def __init__(self, name, type, description, upgrademessage, **kwargs):
133 133 self.name = name
134 134 self.type = type
135 135 self.description = description
136 136 self.upgrademessage = upgrademessage
137 137
138 138 for k, v in kwargs.items():
139 139 setattr(self, k, v)
140 140
141 141 def findimprovements(repo):
142 142 """Determine improvements that can be made to the repo during upgrade.
143 143
144 144 Returns a list of ``upgradeimprovement`` describing repository deficiencies
145 145 and optimizations.
146 146 """
147 147 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
148 148 from . import localrepo
149 149
150 150 newreporeqs = localrepo.newreporequirements(repo)
151 151
152 152 improvements = []
153 153
154 154 # We could detect lack of revlogv1 and store here, but they were added
155 155 # in 0.9.2 and we don't support upgrading repos without these
156 156 # requirements, so let's not bother.
157 157
158 158 if 'fncache' not in repo.requirements:
159 159 improvements.append(improvement(
160 160 name='fncache',
161 161 type=deficiency,
162 162 description=_('long and reserved filenames may not work correctly; '
163 163 'repository performance is sub-optimal'),
164 164 upgrademessage=_('repository will be more resilient to storing '
165 165 'certain paths and performance of certain '
166 166 'operations should be improved'),
167 167 fromdefault=True,
168 168 fromconfig='fncache' in newreporeqs))
169 169
170 170 if 'dotencode' not in repo.requirements:
171 171 improvements.append(improvement(
172 172 name='dotencode',
173 173 type=deficiency,
174 174 description=_('storage of filenames beginning with a period or '
175 175 'space may not work correctly'),
176 176 upgrademessage=_('repository will be better able to store files '
177 177 'beginning with a space or period'),
178 178 fromdefault=True,
179 179 fromconfig='dotencode' in newreporeqs))
180 180
181 181 if 'generaldelta' not in repo.requirements:
182 182 improvements.append(improvement(
183 183 name='generaldelta',
184 184 type=deficiency,
185 185 description=_('deltas within internal storage are unable to '
186 186 'choose optimal revisions; repository is larger and '
187 187 'slower than it could be; interaction with other '
188 188 'repositories may require extra network and CPU '
189 189 'resources, making "hg push" and "hg pull" slower'),
190 190 upgrademessage=_('repository storage will be able to create '
191 191 'optimal deltas; new repository data will be '
192 192 'smaller and read times should decrease; '
193 193 'interacting with other repositories using this '
194 194 'storage model should require less network and '
195 195 'CPU resources, making "hg push" and "hg pull" '
196 196 'faster'),
197 197 fromdefault=True,
198 198 fromconfig='generaldelta' in newreporeqs))
199 199
200 200 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
201 201 # changelogs with deltas.
202 202 cl = repo.changelog
203 203 for rev in cl:
204 204 chainbase = cl.chainbase(rev)
205 205 if chainbase != rev:
206 206 improvements.append(improvement(
207 207 name='removecldeltachain',
208 208 type=deficiency,
209 209 description=_('changelog storage is using deltas instead of '
210 210 'raw entries; changelog reading and any '
211 211 'operation relying on changelog data are slower '
212 212 'than they could be'),
213 213 upgrademessage=_('changelog storage will be reformated to '
214 214 'store raw entries; changelog reading will be '
215 215 'faster; changelog size may be reduced'),
216 216 fromdefault=True,
217 217 fromconfig=True))
218 218 break
219 219
220 220 # Now for the optimizations.
221 221
222 222 # These are unconditionally added. There is logic later that figures out
223 223 # which ones to apply.
224 224
225 225 improvements.append(improvement(
226 226 name='redeltaparent',
227 227 type=optimisation,
228 228 description=_('deltas within internal storage will be recalculated to '
229 229 'choose an optimal base revision where this was not '
230 230 'already done; the size of the repository may shrink and '
231 231 'various operations may become faster; the first time '
232 232 'this optimization is performed could slow down upgrade '
233 233 'execution considerably; subsequent invocations should '
234 234 'not run noticeably slower'),
235 235 upgrademessage=_('deltas within internal storage will choose a new '
236 236 'base revision if needed')))
237 237
238 238 improvements.append(improvement(
239 239 name='redeltamultibase',
240 240 type=optimisation,
241 241 description=_('deltas within internal storage will be recalculated '
242 242 'against multiple base revision and the smallest '
243 243 'difference will be used; the size of the repository may '
244 244 'shrink significantly when there are many merges; this '
245 245 'optimization will slow down execution in proportion to '
246 246 'the number of merges in the repository and the amount '
247 247 'of files in the repository; this slow down should not '
248 248 'be significant unless there are tens of thousands of '
249 249 'files and thousands of merges'),
250 250 upgrademessage=_('deltas within internal storage will choose an '
251 251 'optimal delta by computing deltas against multiple '
252 252 'parents; may slow down execution time '
253 253 'significantly')))
254 254
255 255 improvements.append(improvement(
256 256 name='redeltaall',
257 257 type=optimisation,
258 258 description=_('deltas within internal storage will always be '
259 259 'recalculated without reusing prior deltas; this will '
260 260 'likely make execution run several times slower; this '
261 261 'optimization is typically not needed'),
262 262 upgrademessage=_('deltas within internal storage will be fully '
263 263 'recomputed; this will likely drastically slow down '
264 264 'execution time')))
265 265
266 266 return improvements
267 267
268 268 def determineactions(repo, improvements, sourcereqs, destreqs,
269 269 optimize):
270 270 """Determine upgrade actions that will be performed.
271 271
272 272 Given a list of improvements as returned by ``upgradefindimprovements``,
273 273 determine the list of upgrade actions that will be performed.
274 274
275 275 The role of this function is to filter improvements if needed, apply
276 276 recommended optimizations from the improvements list that make sense,
277 277 etc.
278 278
279 279 Returns a list of action names.
280 280 """
281 281 newactions = []
282 282
283 283 knownreqs = supporteddestrequirements(repo)
284 284
285 285 for i in improvements:
286 286 name = i.name
287 287
288 288 # If the action is a requirement that doesn't show up in the
289 289 # destination requirements, prune the action.
290 290 if name in knownreqs and name not in destreqs:
291 291 continue
292 292
293 293 if i.type == deficiency:
294 294 newactions.append(name)
295 295
296 296 newactions.extend(o for o in sorted(optimize) if o not in newactions)
297 297
298 298 # FUTURE consider adding some optimizations here for certain transitions.
299 299 # e.g. adding generaldelta could schedule parent redeltas.
300 300
301 301 return newactions
302 302
303 303 def _revlogfrompath(repo, path):
304 304 """Obtain a revlog from a repo path.
305 305
306 306 An instance of the appropriate class is returned.
307 307 """
308 308 if path == '00changelog.i':
309 309 return changelog.changelog(repo.svfs)
310 310 elif path.endswith('00manifest.i'):
311 311 mandir = path[:-len('00manifest.i')]
312 312 return manifest.manifestrevlog(repo.svfs, dir=mandir)
313 313 else:
314 314 # Filelogs don't do anything special with settings. So we can use a
315 315 # vanilla revlog.
316 316 return revlog.revlog(repo.svfs, path)
317 317
318 318 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
319 319 """Copy revlogs between 2 repos."""
320 320 revcount = 0
321 321 srcsize = 0
322 322 srcrawsize = 0
323 323 dstsize = 0
324 324 fcount = 0
325 325 frevcount = 0
326 326 fsrcsize = 0
327 327 frawsize = 0
328 328 fdstsize = 0
329 329 mcount = 0
330 330 mrevcount = 0
331 331 msrcsize = 0
332 332 mrawsize = 0
333 333 mdstsize = 0
334 334 crevcount = 0
335 335 csrcsize = 0
336 336 crawsize = 0
337 337 cdstsize = 0
338 338
339 339 # Perform a pass to collect metadata. This validates we can open all
340 340 # source files and allows a unified progress bar to be displayed.
341 341 for unencoded, encoded, size in srcrepo.store.walk():
342 342 if unencoded.endswith('.d'):
343 343 continue
344 344
345 345 rl = _revlogfrompath(srcrepo, unencoded)
346 346 revcount += len(rl)
347 347
348 348 datasize = 0
349 349 rawsize = 0
350 350 idx = rl.index
351 351 for rev in rl:
352 352 e = idx[rev]
353 353 datasize += e[1]
354 354 rawsize += e[2]
355 355
356 356 srcsize += datasize
357 357 srcrawsize += rawsize
358 358
359 359 # This is for the separate progress bars.
360 360 if isinstance(rl, changelog.changelog):
361 361 crevcount += len(rl)
362 362 csrcsize += datasize
363 363 crawsize += rawsize
364 364 elif isinstance(rl, manifest.manifestrevlog):
365 365 mcount += 1
366 366 mrevcount += len(rl)
367 367 msrcsize += datasize
368 368 mrawsize += rawsize
369 369 elif isinstance(rl, revlog.revlog):
370 370 fcount += 1
371 371 frevcount += len(rl)
372 372 fsrcsize += datasize
373 373 frawsize += rawsize
374 374
375 375 if not revcount:
376 376 return
377 377
378 378 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
379 379 '%d in changelog)\n') %
380 380 (revcount, frevcount, mrevcount, crevcount))
381 381 ui.write(_('migrating %s in store; %s tracked data\n') % (
382 382 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
383 383
384 384 # Used to keep track of progress.
385 385 progress = []
386 386 def oncopiedrevision(rl, rev, node):
387 387 progress[1] += 1
388 388 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
389 389
390 390 # Do the actual copying.
391 391 # FUTURE this operation can be farmed off to worker processes.
392 392 seen = set()
393 393 for unencoded, encoded, size in srcrepo.store.walk():
394 394 if unencoded.endswith('.d'):
395 395 continue
396 396
397 397 oldrl = _revlogfrompath(srcrepo, unencoded)
398 398 newrl = _revlogfrompath(dstrepo, unencoded)
399 399
400 400 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
401 401 ui.write(_('finished migrating %d manifest revisions across %d '
402 402 'manifests; change in size: %s\n') %
403 403 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
404 404
405 405 ui.write(_('migrating changelog containing %d revisions '
406 406 '(%s in store; %s tracked data)\n') %
407 407 (crevcount, util.bytecount(csrcsize),
408 408 util.bytecount(crawsize)))
409 409 seen.add('c')
410 410 progress[:] = [_('changelog revisions'), 0, crevcount]
411 411 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
412 412 ui.write(_('finished migrating %d filelog revisions across %d '
413 413 'filelogs; change in size: %s\n') %
414 414 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
415 415
416 416 ui.write(_('migrating %d manifests containing %d revisions '
417 417 '(%s in store; %s tracked data)\n') %
418 418 (mcount, mrevcount, util.bytecount(msrcsize),
419 419 util.bytecount(mrawsize)))
420 420 seen.add('m')
421 421 progress[:] = [_('manifest revisions'), 0, mrevcount]
422 422 elif 'f' not in seen:
423 423 ui.write(_('migrating %d filelogs containing %d revisions '
424 424 '(%s in store; %s tracked data)\n') %
425 425 (fcount, frevcount, util.bytecount(fsrcsize),
426 426 util.bytecount(frawsize)))
427 427 seen.add('f')
428 428 progress[:] = [_('file revisions'), 0, frevcount]
429 429
430 430 ui.progress(progress[0], progress[1], total=progress[2])
431 431
432 432 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
433 433 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
434 434 deltareuse=deltareuse,
435 435 aggressivemergedeltas=aggressivemergedeltas)
436 436
437 437 datasize = 0
438 438 idx = newrl.index
439 439 for rev in newrl:
440 440 datasize += idx[rev][1]
441 441
442 442 dstsize += datasize
443 443
444 444 if isinstance(newrl, changelog.changelog):
445 445 cdstsize += datasize
446 446 elif isinstance(newrl, manifest.manifestrevlog):
447 447 mdstsize += datasize
448 448 else:
449 449 fdstsize += datasize
450 450
451 451 ui.progress(progress[0], None)
452 452
453 453 ui.write(_('finished migrating %d changelog revisions; change in size: '
454 454 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
455 455
456 456 ui.write(_('finished migrating %d total revisions; total change in store '
457 457 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
458 458
459 459 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
460 460 """Determine whether to copy a store file during upgrade.
461 461
462 462 This function is called when migrating store files from ``srcrepo`` to
463 463 ``dstrepo`` as part of upgrading a repository.
464 464
465 465 Args:
466 466 srcrepo: repo we are copying from
467 467 dstrepo: repo we are copying to
468 468 requirements: set of requirements for ``dstrepo``
469 469 path: store file being examined
470 470 mode: the ``ST_MODE`` file type of ``path``
471 471 st: ``stat`` data structure for ``path``
472 472
473 473 Function should return ``True`` if the file is to be copied.
474 474 """
475 475 # Skip revlogs.
476 476 if path.endswith(('.i', '.d')):
477 477 return False
478 478 # Skip transaction related files.
479 479 if path.startswith('undo'):
480 480 return False
481 481 # Only copy regular files.
482 482 if mode != stat.S_IFREG:
483 483 return False
484 484 # Skip other skipped files.
485 485 if path in ('lock', 'fncache'):
486 486 return False
487 487
488 488 return True
489 489
490 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
490 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
491 491 """Hook point for extensions to perform additional actions during upgrade.
492 492
493 493 This function is called after revlogs and store files have been copied but
494 494 before the new store is swapped into the original location.
495 495 """
496 496
497 497 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
498 498 """Do the low-level work of upgrading a repository.
499 499
500 500 The upgrade is effectively performed as a copy between a source
501 501 repository and a temporary destination repository.
502 502
503 503 The source repository is unmodified for as long as possible so the
504 504 upgrade can abort at any time without causing loss of service for
505 505 readers and without corrupting the source repository.
506 506 """
507 507 assert srcrepo.currentwlock()
508 508 assert dstrepo.currentwlock()
509 509
510 510 ui.write(_('(it is safe to interrupt this process any time before '
511 511 'data migration completes)\n'))
512 512
513 513 if 'redeltaall' in actions:
514 514 deltareuse = revlog.revlog.DELTAREUSENEVER
515 515 elif 'redeltaparent' in actions:
516 516 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
517 517 elif 'redeltamultibase' in actions:
518 518 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
519 519 else:
520 520 deltareuse = revlog.revlog.DELTAREUSEALWAYS
521 521
522 522 with dstrepo.transaction('upgrade') as tr:
523 523 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
524 524 'redeltamultibase' in actions)
525 525
526 526 # Now copy other files in the store directory.
527 527 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
528 528 if not _filterstorefile(srcrepo, dstrepo, requirements,
529 529 p, kind, st):
530 530 continue
531 531
532 532 srcrepo.ui.write(_('copying %s\n') % p)
533 533 src = srcrepo.store.vfs.join(p)
534 534 dst = dstrepo.store.vfs.join(p)
535 535 util.copyfile(src, dst, copystat=True)
536 536
537 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
537 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
538 538
539 539 ui.write(_('data fully migrated to temporary repository\n'))
540 540
541 541 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
542 542 backupvfs = vfsmod.vfs(backuppath)
543 543
544 544 # Make a backup of requires file first, as it is the first to be modified.
545 545 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
546 546
547 547 # We install an arbitrary requirement that clients must not support
548 548 # as a mechanism to lock out new clients during the data swap. This is
549 549 # better than allowing a client to continue while the repository is in
550 550 # an inconsistent state.
551 551 ui.write(_('marking source repository as being upgraded; clients will be '
552 552 'unable to read from repository\n'))
553 553 scmutil.writerequires(srcrepo.vfs,
554 554 srcrepo.requirements | set(['upgradeinprogress']))
555 555
556 556 ui.write(_('starting in-place swap of repository data\n'))
557 557 ui.write(_('replaced files will be backed up at %s\n') %
558 558 backuppath)
559 559
560 560 # Now swap in the new store directory. Doing it as a rename should make
561 561 # the operation nearly instantaneous and atomic (at least in well-behaved
562 562 # environments).
563 563 ui.write(_('replacing store...\n'))
564 564 tstart = util.timer()
565 565 util.rename(srcrepo.spath, backupvfs.join('store'))
566 566 util.rename(dstrepo.spath, srcrepo.spath)
567 567 elapsed = util.timer() - tstart
568 568 ui.write(_('store replacement complete; repository was inconsistent for '
569 569 '%0.1fs\n') % elapsed)
570 570
571 571 # We first write the requirements file. Any new requirements will lock
572 572 # out legacy clients.
573 573 ui.write(_('finalizing requirements file and making repository readable '
574 574 'again\n'))
575 575 scmutil.writerequires(srcrepo.vfs, requirements)
576 576
577 577 # The lock file from the old store won't be removed because nothing has a
578 578 # reference to its new location. So clean it up manually. Alternatively, we
579 579 # could update srcrepo.svfs and other variables to point to the new
580 580 # location. This is simpler.
581 581 backupvfs.unlink('store/lock')
582 582
583 583 return backuppath
584 584
585 585 def upgraderepo(ui, repo, run=False, optimize=None):
586 586 """Upgrade a repository in place."""
587 587 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
588 588 from . import localrepo
589 589
590 590 optimize = set(optimize or [])
591 591 repo = repo.unfiltered()
592 592
593 593 # Ensure the repository can be upgraded.
594 594 missingreqs = requiredsourcerequirements(repo) - repo.requirements
595 595 if missingreqs:
596 596 raise error.Abort(_('cannot upgrade repository; requirement '
597 597 'missing: %s') % _(', ').join(sorted(missingreqs)))
598 598
599 599 blockedreqs = blocksourcerequirements(repo) & repo.requirements
600 600 if blockedreqs:
601 601 raise error.Abort(_('cannot upgrade repository; unsupported source '
602 602 'requirement: %s') %
603 603 _(', ').join(sorted(blockedreqs)))
604 604
605 605 # FUTURE there is potentially a need to control the wanted requirements via
606 606 # command arguments or via an extension hook point.
607 607 newreqs = localrepo.newreporequirements(repo)
608 608
609 609 noremovereqs = (repo.requirements - newreqs -
610 610 supportremovedrequirements(repo))
611 611 if noremovereqs:
612 612 raise error.Abort(_('cannot upgrade repository; requirement would be '
613 613 'removed: %s') % _(', ').join(sorted(noremovereqs)))
614 614
615 615 noaddreqs = (newreqs - repo.requirements -
616 616 allowednewrequirements(repo))
617 617 if noaddreqs:
618 618 raise error.Abort(_('cannot upgrade repository; do not support adding '
619 619 'requirement: %s') %
620 620 _(', ').join(sorted(noaddreqs)))
621 621
622 622 unsupportedreqs = newreqs - supporteddestrequirements(repo)
623 623 if unsupportedreqs:
624 624 raise error.Abort(_('cannot upgrade repository; do not support '
625 625 'destination requirement: %s') %
626 626 _(', ').join(sorted(unsupportedreqs)))
627 627
628 628 # Find and validate all improvements that can be made.
629 629 improvements = findimprovements(repo)
630 630 for i in improvements:
631 631 if i.type not in (deficiency, optimisation):
632 632 raise error.Abort(_('unexpected improvement type %s for %s') % (
633 633 i.type, i.name))
634 634
635 635 # Validate arguments.
636 636 unknownoptimize = optimize - set(i.name for i in improvements
637 637 if i.type == optimisation)
638 638 if unknownoptimize:
639 639 raise error.Abort(_('unknown optimization action requested: %s') %
640 640 ', '.join(sorted(unknownoptimize)),
641 641 hint=_('run without arguments to see valid '
642 642 'optimizations'))
643 643
644 644 actions = determineactions(repo, improvements, repo.requirements,
645 645 newreqs, optimize)
646 646
647 647 def printrequirements():
648 648 ui.write(_('requirements\n'))
649 649 ui.write(_(' preserved: %s\n') %
650 650 _(', ').join(sorted(newreqs & repo.requirements)))
651 651
652 652 if repo.requirements - newreqs:
653 653 ui.write(_(' removed: %s\n') %
654 654 _(', ').join(sorted(repo.requirements - newreqs)))
655 655
656 656 if newreqs - repo.requirements:
657 657 ui.write(_(' added: %s\n') %
658 658 _(', ').join(sorted(newreqs - repo.requirements)))
659 659
660 660 ui.write('\n')
661 661
662 662 def printupgradeactions():
663 663 for action in actions:
664 664 for i in improvements:
665 665 if i.name == action:
666 666 ui.write('%s\n %s\n\n' %
667 667 (i.name, i.upgrademessage))
668 668
669 669 if not run:
670 670 fromdefault = []
671 671 fromconfig = []
672 672 optimizations = []
673 673
674 674 for i in improvements:
675 675 assert i.type in (deficiency, optimisation)
676 676 if i.type == deficiency:
677 677 if i.fromdefault:
678 678 fromdefault.append(i)
679 679 if i.fromconfig:
680 680 fromconfig.append(i)
681 681 else:
682 682 optimizations.append(i)
683 683
684 684 if fromdefault or fromconfig:
685 685 fromconfignames = set(x.name for x in fromconfig)
686 686 onlydefault = [i for i in fromdefault
687 687 if i.name not in fromconfignames]
688 688
689 689 if fromconfig:
690 690 ui.write(_('repository lacks features recommended by '
691 691 'current config options:\n\n'))
692 692 for i in fromconfig:
693 693 ui.write('%s\n %s\n\n' % (i.name, i.description))
694 694
695 695 if onlydefault:
696 696 ui.write(_('repository lacks features used by the default '
697 697 'config options:\n\n'))
698 698 for i in onlydefault:
699 699 ui.write('%s\n %s\n\n' % (i.name, i.description))
700 700
701 701 ui.write('\n')
702 702 else:
703 703 ui.write(_('(no feature deficiencies found in existing '
704 704 'repository)\n'))
705 705
706 706 ui.write(_('performing an upgrade with "--run" will make the following '
707 707 'changes:\n\n'))
708 708
709 709 printrequirements()
710 710 printupgradeactions()
711 711
712 712 unusedoptimize = [i for i in improvements
713 713 if i.name not in actions and i.type == optimisation]
714 714 if unusedoptimize:
715 715 ui.write(_('additional optimizations are available by specifying '
716 716 '"--optimize <name>":\n\n'))
717 717 for i in unusedoptimize:
718 718 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
719 719 return
720 720
721 721 # Else we're in the run=true case.
722 722 ui.write(_('upgrade will perform the following actions:\n\n'))
723 723 printrequirements()
724 724 printupgradeactions()
725 725
726 726 ui.write(_('beginning upgrade...\n'))
727 727 with repo.wlock():
728 728 with repo.lock():
729 729 ui.write(_('repository locked and read-only\n'))
730 730 # Our strategy for upgrading the repository is to create a new,
731 731 # temporary repository, write data to it, then do a swap of the
732 732 # data. There are less heavyweight ways to do this, but it is easier
733 733 # to create a new repo object than to instantiate all the components
734 734 # (like the store) separately.
735 735 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
736 736 backuppath = None
737 737 try:
738 738 ui.write(_('creating temporary repository to stage migrated '
739 739 'data: %s\n') % tmppath)
740 740 dstrepo = localrepo.localrepository(repo.baseui,
741 741 path=tmppath,
742 742 create=True)
743 743
744 744 with dstrepo.wlock():
745 745 with dstrepo.lock():
746 746 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
747 747 actions)
748 748
749 749 finally:
750 750 ui.write(_('removing temporary repository %s\n') % tmppath)
751 751 repo.vfs.rmtree(tmppath, forcibly=True)
752 752
753 753 if backuppath:
754 754 ui.warn(_('copy of old repository backed up at %s\n') %
755 755 backuppath)
756 756 ui.warn(_('the old repository will not be deleted; remove '
757 757 'it to free up disk space once the upgraded '
758 758 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now