##// END OF EJS Templates
repair: clean up stale lock file from store backup...
Gregory Szorc -
r30781:f2c069bf default
parent child Browse files
Show More
@@ -1,1096 +1,1102
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import stat
14 14 import tempfile
15 15 import time
16 16
17 17 from .i18n import _
18 18 from .node import short
19 19 from . import (
20 20 bundle2,
21 21 changegroup,
22 22 changelog,
23 23 error,
24 24 exchange,
25 25 manifest,
26 26 obsolete,
27 27 revlog,
28 28 scmutil,
29 29 util,
30 30 )
31 31
32 32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 33 """create a bundle with the specified revisions as a backup"""
34 34 cgversion = changegroup.safeversion(repo)
35 35
36 36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 37 version=cgversion)
38 38 backupdir = "strip-backup"
39 39 vfs = repo.vfs
40 40 if not vfs.isdir(backupdir):
41 41 vfs.mkdir(backupdir)
42 42
43 43 # Include a hash of all the nodes in the filename for uniqueness
44 44 allcommits = repo.set('%ln::%ln', bases, heads)
45 45 allhashes = sorted(c.hex() for c in allcommits)
46 46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48 48
49 49 comp = None
50 50 if cgversion != '01':
51 51 bundletype = "HG20"
52 52 if compress:
53 53 comp = 'BZ'
54 54 elif compress:
55 55 bundletype = "HG10BZ"
56 56 else:
57 57 bundletype = "HG10UN"
58 58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 59 compression=comp)
60 60
61 61 def _collectfiles(repo, striprev):
62 62 """find out the filelogs affected by the strip"""
63 63 files = set()
64 64
65 65 for x in xrange(striprev, len(repo)):
66 66 files.update(repo[x].files())
67 67
68 68 return sorted(files)
69 69
70 70 def _collectbrokencsets(repo, files, striprev):
71 71 """return the changesets which will be broken by the truncation"""
72 72 s = set()
73 73 def collectone(revlog):
74 74 _, brokenset = revlog.getstrippoint(striprev)
75 75 s.update([revlog.linkrev(r) for r in brokenset])
76 76
77 77 collectone(repo.manifestlog._revlog)
78 78 for fname in files:
79 79 collectone(repo.file(fname))
80 80
81 81 return s
82 82
83 83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 84 # This function operates within a transaction of its own, but does
85 85 # not take any lock on the repo.
86 86 # Simple way to maintain backwards compatibility for this
87 87 # argument.
88 88 if backup in ['none', 'strip']:
89 89 backup = False
90 90
91 91 repo = repo.unfiltered()
92 92 repo.destroying()
93 93
94 94 cl = repo.changelog
95 95 # TODO handle undo of merge sets
96 96 if isinstance(nodelist, str):
97 97 nodelist = [nodelist]
98 98 striplist = [cl.rev(node) for node in nodelist]
99 99 striprev = min(striplist)
100 100
101 101 files = _collectfiles(repo, striprev)
102 102 saverevs = _collectbrokencsets(repo, files, striprev)
103 103
104 104 # Some revisions with rev > striprev may not be descendants of striprev.
105 105 # We have to find these revisions and put them in a bundle, so that
106 106 # we can restore them after the truncations.
107 107 # To create the bundle we use repo.changegroupsubset which requires
108 108 # the list of heads and bases of the set of interesting revisions.
109 109 # (head = revision in the set that has no descendant in the set;
110 110 # base = revision in the set that has no ancestor in the set)
111 111 tostrip = set(striplist)
112 112 saveheads = set(saverevs)
113 113 for r in cl.revs(start=striprev + 1):
114 114 if any(p in tostrip for p in cl.parentrevs(r)):
115 115 tostrip.add(r)
116 116
117 117 if r not in tostrip:
118 118 saverevs.add(r)
119 119 saveheads.difference_update(cl.parentrevs(r))
120 120 saveheads.add(r)
121 121 saveheads = [cl.node(r) for r in saveheads]
122 122
123 123 # compute base nodes
124 124 if saverevs:
125 125 descendants = set(cl.descendants(saverevs))
126 126 saverevs.difference_update(descendants)
127 127 savebases = [cl.node(r) for r in saverevs]
128 128 stripbases = [cl.node(r) for r in tostrip]
129 129
130 130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 131 # is much faster
132 132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 133 if newbmtarget:
134 134 newbmtarget = repo[newbmtarget.first()].node()
135 135 else:
136 136 newbmtarget = '.'
137 137
138 138 bm = repo._bookmarks
139 139 updatebm = []
140 140 for m in bm:
141 141 rev = repo[bm[m]].rev()
142 142 if rev in tostrip:
143 143 updatebm.append(m)
144 144
145 145 # create a changegroup for all the branches we need to keep
146 146 backupfile = None
147 147 vfs = repo.vfs
148 148 node = nodelist[-1]
149 149 if backup:
150 150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 151 repo.ui.status(_("saved backup bundle to %s\n") %
152 152 vfs.join(backupfile))
153 153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 154 vfs.join(backupfile))
155 155 tmpbundlefile = None
156 156 if saveheads:
157 157 # do not compress temporary bundle if we remove it from disk later
158 158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 159 compress=False)
160 160
161 161 mfst = repo.manifestlog._revlog
162 162
163 163 curtr = repo.currenttransaction()
164 164 if curtr is not None:
165 165 del curtr # avoid carrying reference to transaction for nothing
166 166 msg = _('programming error: cannot strip from inside a transaction')
167 167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
168 168
169 169 try:
170 170 with repo.transaction("strip") as tr:
171 171 offset = len(tr.entries)
172 172
173 173 tr.startgroup()
174 174 cl.strip(striprev, tr)
175 175 mfst.strip(striprev, tr)
176 176 if 'treemanifest' in repo.requirements: # safe but unnecessary
177 177 # otherwise
178 178 for unencoded, encoded, size in repo.store.datafiles():
179 179 if (unencoded.startswith('meta/') and
180 180 unencoded.endswith('00manifest.i')):
181 181 dir = unencoded[5:-12]
182 182 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
183 183 for fn in files:
184 184 repo.file(fn).strip(striprev, tr)
185 185 tr.endgroup()
186 186
187 187 for i in xrange(offset, len(tr.entries)):
188 188 file, troffset, ignore = tr.entries[i]
189 189 with repo.svfs(file, 'a', checkambig=True) as fp:
190 190 fp.truncate(troffset)
191 191 if troffset == 0:
192 192 repo.store.markremoved(file)
193 193
194 194 if tmpbundlefile:
195 195 ui.note(_("adding branch\n"))
196 196 f = vfs.open(tmpbundlefile, "rb")
197 197 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
198 198 if not repo.ui.verbose:
199 199 # silence internal shuffling chatter
200 200 repo.ui.pushbuffer()
201 201 if isinstance(gen, bundle2.unbundle20):
202 202 with repo.transaction('strip') as tr:
203 203 tr.hookargs = {'source': 'strip',
204 204 'url': 'bundle:' + vfs.join(tmpbundlefile)}
205 205 bundle2.applybundle(repo, gen, tr, source='strip',
206 206 url='bundle:' + vfs.join(tmpbundlefile))
207 207 else:
208 208 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
209 209 True)
210 210 if not repo.ui.verbose:
211 211 repo.ui.popbuffer()
212 212 f.close()
213 213 repo._phasecache.invalidate()
214 214
215 215 for m in updatebm:
216 216 bm[m] = repo[newbmtarget].node()
217 217 lock = tr = None
218 218 try:
219 219 lock = repo.lock()
220 220 tr = repo.transaction('repair')
221 221 bm.recordchange(tr)
222 222 tr.close()
223 223 finally:
224 224 tr.release()
225 225 lock.release()
226 226
227 227 # remove undo files
228 228 for undovfs, undofile in repo.undofiles():
229 229 try:
230 230 undovfs.unlink(undofile)
231 231 except OSError as e:
232 232 if e.errno != errno.ENOENT:
233 233 ui.warn(_('error removing %s: %s\n') %
234 234 (undovfs.join(undofile), str(e)))
235 235
236 236 except: # re-raises
237 237 if backupfile:
238 238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 239 % vfs.join(backupfile))
240 240 if tmpbundlefile:
241 241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 242 % vfs.join(tmpbundlefile))
243 243 ui.warn(_("(fix the problem, then recover the changesets with "
244 244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 245 raise
246 246 else:
247 247 if tmpbundlefile:
248 248 # Remove temporary bundle only if there were no exceptions
249 249 vfs.unlink(tmpbundlefile)
250 250
251 251 repo.destroyed()
252 252 # return the backup file path (or None if 'backup' was False) so
253 253 # extensions can use it
254 254 return backupfile
255 255
256 256 def rebuildfncache(ui, repo):
257 257 """Rebuilds the fncache file from repo history.
258 258
259 259 Missing entries will be added. Extra entries will be removed.
260 260 """
261 261 repo = repo.unfiltered()
262 262
263 263 if 'fncache' not in repo.requirements:
264 264 ui.warn(_('(not rebuilding fncache because repository does not '
265 265 'support fncache)\n'))
266 266 return
267 267
268 268 with repo.lock():
269 269 fnc = repo.store.fncache
270 270 # Trigger load of fncache.
271 271 if 'irrelevant' in fnc:
272 272 pass
273 273
274 274 oldentries = set(fnc.entries)
275 275 newentries = set()
276 276 seenfiles = set()
277 277
278 278 repolen = len(repo)
279 279 for rev in repo:
280 280 ui.progress(_('rebuilding'), rev, total=repolen,
281 281 unit=_('changesets'))
282 282
283 283 ctx = repo[rev]
284 284 for f in ctx.files():
285 285 # This is to minimize I/O.
286 286 if f in seenfiles:
287 287 continue
288 288 seenfiles.add(f)
289 289
290 290 i = 'data/%s.i' % f
291 291 d = 'data/%s.d' % f
292 292
293 293 if repo.store._exists(i):
294 294 newentries.add(i)
295 295 if repo.store._exists(d):
296 296 newentries.add(d)
297 297
298 298 ui.progress(_('rebuilding'), None)
299 299
300 300 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
301 301 for dir in util.dirs(seenfiles):
302 302 i = 'meta/%s/00manifest.i' % dir
303 303 d = 'meta/%s/00manifest.d' % dir
304 304
305 305 if repo.store._exists(i):
306 306 newentries.add(i)
307 307 if repo.store._exists(d):
308 308 newentries.add(d)
309 309
310 310 addcount = len(newentries - oldentries)
311 311 removecount = len(oldentries - newentries)
312 312 for p in sorted(oldentries - newentries):
313 313 ui.write(_('removing %s\n') % p)
314 314 for p in sorted(newentries - oldentries):
315 315 ui.write(_('adding %s\n') % p)
316 316
317 317 if addcount or removecount:
318 318 ui.write(_('%d items added, %d removed from fncache\n') %
319 319 (addcount, removecount))
320 320 fnc.entries = newentries
321 321 fnc._dirty = True
322 322
323 323 with repo.transaction('fncache') as tr:
324 324 fnc.write(tr)
325 325 else:
326 326 ui.write(_('fncache already up to date\n'))
327 327
328 328 def stripbmrevset(repo, mark):
329 329 """
330 330 The revset to strip when strip is called with -B mark
331 331
332 332 Needs to live here so extensions can use it and wrap it even when strip is
333 333 not enabled or not present on a box.
334 334 """
335 335 return repo.revs("ancestors(bookmark(%s)) - "
336 336 "ancestors(head() and not bookmark(%s)) - "
337 337 "ancestors(bookmark() and not bookmark(%s))",
338 338 mark, mark, mark)
339 339
340 340 def deleteobsmarkers(obsstore, indices):
341 341 """Delete some obsmarkers from obsstore and return how many were deleted
342 342
343 343 'indices' is a list of ints which are the indices
344 344 of the markers to be deleted.
345 345
346 346 Every invocation of this function completely rewrites the obsstore file,
347 347 skipping the markers we want to be removed. The new temporary file is
348 348 created, remaining markers are written there and on .close() this file
349 349 gets atomically renamed to obsstore, thus guaranteeing consistency."""
350 350 if not indices:
351 351 # we don't want to rewrite the obsstore with the same content
352 352 return
353 353
354 354 left = []
355 355 current = obsstore._all
356 356 n = 0
357 357 for i, m in enumerate(current):
358 358 if i in indices:
359 359 n += 1
360 360 continue
361 361 left.append(m)
362 362
363 363 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
364 364 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
365 365 newobsstorefile.write(bytes)
366 366 newobsstorefile.close()
367 367 return n
368 368
369 369 def upgraderequiredsourcerequirements(repo):
370 370 """Obtain requirements required to be present to upgrade a repo.
371 371
372 372 An upgrade will not be allowed if the repository doesn't have the
373 373 requirements returned by this function.
374 374 """
375 375 return set([
376 376 # Introduced in Mercurial 0.9.2.
377 377 'revlogv1',
378 378 # Introduced in Mercurial 0.9.2.
379 379 'store',
380 380 ])
381 381
382 382 def upgradeblocksourcerequirements(repo):
383 383 """Obtain requirements that will prevent an upgrade from occurring.
384 384
385 385 An upgrade cannot be performed if the source repository contains a
386 386 requirements in the returned set.
387 387 """
388 388 return set([
389 389 # The upgrade code does not yet support these experimental features.
390 390 # This is an artificial limitation.
391 391 'manifestv2',
392 392 'treemanifest',
393 393 # This was a precursor to generaldelta and was never enabled by default.
394 394 # It should (hopefully) not exist in the wild.
395 395 'parentdelta',
396 396 # Upgrade should operate on the actual store, not the shared link.
397 397 'shared',
398 398 ])
399 399
400 400 def upgradesupportremovedrequirements(repo):
401 401 """Obtain requirements that can be removed during an upgrade.
402 402
403 403 If an upgrade were to create a repository that dropped a requirement,
404 404 the dropped requirement must appear in the returned set for the upgrade
405 405 to be allowed.
406 406 """
407 407 return set()
408 408
409 409 def upgradesupporteddestrequirements(repo):
410 410 """Obtain requirements that upgrade supports in the destination.
411 411
412 412 If the result of the upgrade would create requirements not in this set,
413 413 the upgrade is disallowed.
414 414
415 415 Extensions should monkeypatch this to add their custom requirements.
416 416 """
417 417 return set([
418 418 'dotencode',
419 419 'fncache',
420 420 'generaldelta',
421 421 'revlogv1',
422 422 'store',
423 423 ])
424 424
425 425 def upgradeallowednewrequirements(repo):
426 426 """Obtain requirements that can be added to a repository during upgrade.
427 427
428 428 This is used to disallow proposed requirements from being added when
429 429 they weren't present before.
430 430
431 431 We use a list of allowed requirement additions instead of a list of known
432 432 bad additions because the whitelist approach is safer and will prevent
433 433 future, unknown requirements from accidentally being added.
434 434 """
435 435 return set([
436 436 'dotencode',
437 437 'fncache',
438 438 'generaldelta',
439 439 ])
440 440
441 441 deficiency = 'deficiency'
442 442 optimisation = 'optimization'
443 443
444 444 class upgradeimprovement(object):
445 445 """Represents an improvement that can be made as part of an upgrade.
446 446
447 447 The following attributes are defined on each instance:
448 448
449 449 name
450 450 Machine-readable string uniquely identifying this improvement. It
451 451 will be mapped to an action later in the upgrade process.
452 452
453 453 type
454 454 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
455 455 problem. An optimization is an action (sometimes optional) that
456 456 can be taken to further improve the state of the repository.
457 457
458 458 description
459 459 Message intended for humans explaining the improvement in more detail,
460 460 including the implications of it. For ``deficiency`` types, should be
461 461 worded in the present tense. For ``optimisation`` types, should be
462 462 worded in the future tense.
463 463
464 464 upgrademessage
465 465 Message intended for humans explaining what an upgrade addressing this
466 466 issue will do. Should be worded in the future tense.
467 467
468 468 fromdefault (``deficiency`` types only)
469 469 Boolean indicating whether the current (deficient) state deviates
470 470 from Mercurial's default configuration.
471 471
472 472 fromconfig (``deficiency`` types only)
473 473 Boolean indicating whether the current (deficient) state deviates
474 474 from the current Mercurial configuration.
475 475 """
476 476 def __init__(self, name, type, description, upgrademessage, **kwargs):
477 477 self.name = name
478 478 self.type = type
479 479 self.description = description
480 480 self.upgrademessage = upgrademessage
481 481
482 482 for k, v in kwargs.items():
483 483 setattr(self, k, v)
484 484
485 485 def upgradefindimprovements(repo):
486 486 """Determine improvements that can be made to the repo during upgrade.
487 487
488 488 Returns a list of ``upgradeimprovement`` describing repository deficiencies
489 489 and optimizations.
490 490 """
491 491 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
492 492 from . import localrepo
493 493
494 494 newreporeqs = localrepo.newreporequirements(repo)
495 495
496 496 improvements = []
497 497
498 498 # We could detect lack of revlogv1 and store here, but they were added
499 499 # in 0.9.2 and we don't support upgrading repos without these
500 500 # requirements, so let's not bother.
501 501
502 502 if 'fncache' not in repo.requirements:
503 503 improvements.append(upgradeimprovement(
504 504 name='fncache',
505 505 type=deficiency,
506 506 description=_('long and reserved filenames may not work correctly; '
507 507 'repository performance is sub-optimal'),
508 508 upgrademessage=_('repository will be more resilient to storing '
509 509 'certain paths and performance of certain '
510 510 'operations should be improved'),
511 511 fromdefault=True,
512 512 fromconfig='fncache' in newreporeqs))
513 513
514 514 if 'dotencode' not in repo.requirements:
515 515 improvements.append(upgradeimprovement(
516 516 name='dotencode',
517 517 type=deficiency,
518 518 description=_('storage of filenames beginning with a period or '
519 519 'space may not work correctly'),
520 520 upgrademessage=_('repository will be better able to store files '
521 521 'beginning with a space or period'),
522 522 fromdefault=True,
523 523 fromconfig='dotencode' in newreporeqs))
524 524
525 525 if 'generaldelta' not in repo.requirements:
526 526 improvements.append(upgradeimprovement(
527 527 name='generaldelta',
528 528 type=deficiency,
529 529 description=_('deltas within internal storage are unable to '
530 530 'choose optimal revisions; repository is larger and '
531 531 'slower than it could be; interaction with other '
532 532 'repositories may require extra network and CPU '
533 533 'resources, making "hg push" and "hg pull" slower'),
534 534 upgrademessage=_('repository storage will be able to create '
535 535 'optimal deltas; new repository data will be '
536 536 'smaller and read times should decrease; '
537 537 'interacting with other repositories using this '
538 538 'storage model should require less network and '
539 539 'CPU resources, making "hg push" and "hg pull" '
540 540 'faster'),
541 541 fromdefault=True,
542 542 fromconfig='generaldelta' in newreporeqs))
543 543
544 544 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
545 545 # changelogs with deltas.
546 546 cl = repo.changelog
547 547 for rev in cl:
548 548 chainbase = cl.chainbase(rev)
549 549 if chainbase != rev:
550 550 improvements.append(upgradeimprovement(
551 551 name='removecldeltachain',
552 552 type=deficiency,
553 553 description=_('changelog storage is using deltas instead of '
554 554 'raw entries; changelog reading and any '
555 555 'operation relying on changelog data are slower '
556 556 'than they could be'),
557 557 upgrademessage=_('changelog storage will be reformated to '
558 558 'store raw entries; changelog reading will be '
559 559 'faster; changelog size may be reduced'),
560 560 fromdefault=True,
561 561 fromconfig=True))
562 562 break
563 563
564 564 # Now for the optimizations.
565 565
566 566 # These are unconditionally added. There is logic later that figures out
567 567 # which ones to apply.
568 568
569 569 improvements.append(upgradeimprovement(
570 570 name='redeltaparent',
571 571 type=optimisation,
572 572 description=_('deltas within internal storage will be recalculated to '
573 573 'choose an optimal base revision where this was not '
574 574 'already done; the size of the repository may shrink and '
575 575 'various operations may become faster; the first time '
576 576 'this optimization is performed could slow down upgrade '
577 577 'execution considerably; subsequent invocations should '
578 578 'not run noticeably slower'),
579 579 upgrademessage=_('deltas within internal storage will choose a new '
580 580 'base revision if needed')))
581 581
582 582 improvements.append(upgradeimprovement(
583 583 name='redeltamultibase',
584 584 type=optimisation,
585 585 description=_('deltas within internal storage will be recalculated '
586 586 'against multiple base revision and the smallest '
587 587 'difference will be used; the size of the repository may '
588 588 'shrink significantly when there are many merges; this '
589 589 'optimization will slow down execution in proportion to '
590 590 'the number of merges in the repository and the amount '
591 591 'of files in the repository; this slow down should not '
592 592 'be significant unless there are tens of thousands of '
593 593 'files and thousands of merges'),
594 594 upgrademessage=_('deltas within internal storage will choose an '
595 595 'optimal delta by computing deltas against multiple '
596 596 'parents; may slow down execution time '
597 597 'significantly')))
598 598
599 599 improvements.append(upgradeimprovement(
600 600 name='redeltaall',
601 601 type=optimisation,
602 602 description=_('deltas within internal storage will always be '
603 603 'recalculated without reusing prior deltas; this will '
604 604 'likely make execution run several times slower; this '
605 605 'optimization is typically not needed'),
606 606 upgrademessage=_('deltas within internal storage will be fully '
607 607 'recomputed; this will likely drastically slow down '
608 608 'execution time')))
609 609
610 610 return improvements
611 611
612 612 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
613 613 optimize):
614 614 """Determine upgrade actions that will be performed.
615 615
616 616 Given a list of improvements as returned by ``upgradefindimprovements``,
617 617 determine the list of upgrade actions that will be performed.
618 618
619 619 The role of this function is to filter improvements if needed, apply
620 620 recommended optimizations from the improvements list that make sense,
621 621 etc.
622 622
623 623 Returns a list of action names.
624 624 """
625 625 newactions = []
626 626
627 627 knownreqs = upgradesupporteddestrequirements(repo)
628 628
629 629 for i in improvements:
630 630 name = i.name
631 631
632 632 # If the action is a requirement that doesn't show up in the
633 633 # destination requirements, prune the action.
634 634 if name in knownreqs and name not in destreqs:
635 635 continue
636 636
637 637 if i.type == deficiency:
638 638 newactions.append(name)
639 639
640 640 newactions.extend(o for o in sorted(optimize) if o not in newactions)
641 641
642 642 # FUTURE consider adding some optimizations here for certain transitions.
643 643 # e.g. adding generaldelta could schedule parent redeltas.
644 644
645 645 return newactions
646 646
647 647 def _revlogfrompath(repo, path):
648 648 """Obtain a revlog from a repo path.
649 649
650 650 An instance of the appropriate class is returned.
651 651 """
652 652 if path == '00changelog.i':
653 653 return changelog.changelog(repo.svfs)
654 654 elif path.endswith('00manifest.i'):
655 655 mandir = path[:-len('00manifest.i')]
656 656 return manifest.manifestrevlog(repo.svfs, dir=mandir)
657 657 else:
658 658 # Filelogs don't do anything special with settings. So we can use a
659 659 # vanilla revlog.
660 660 return revlog.revlog(repo.svfs, path)
661 661
662 662 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
663 663 """Copy revlogs between 2 repos."""
664 664 revcount = 0
665 665 srcsize = 0
666 666 srcrawsize = 0
667 667 dstsize = 0
668 668 fcount = 0
669 669 frevcount = 0
670 670 fsrcsize = 0
671 671 frawsize = 0
672 672 fdstsize = 0
673 673 mcount = 0
674 674 mrevcount = 0
675 675 msrcsize = 0
676 676 mrawsize = 0
677 677 mdstsize = 0
678 678 crevcount = 0
679 679 csrcsize = 0
680 680 crawsize = 0
681 681 cdstsize = 0
682 682
683 683 # Perform a pass to collect metadata. This validates we can open all
684 684 # source files and allows a unified progress bar to be displayed.
685 685 for unencoded, encoded, size in srcrepo.store.walk():
686 686 if unencoded.endswith('.d'):
687 687 continue
688 688
689 689 rl = _revlogfrompath(srcrepo, unencoded)
690 690 revcount += len(rl)
691 691
692 692 datasize = 0
693 693 rawsize = 0
694 694 idx = rl.index
695 695 for rev in rl:
696 696 e = idx[rev]
697 697 datasize += e[1]
698 698 rawsize += e[2]
699 699
700 700 srcsize += datasize
701 701 srcrawsize += rawsize
702 702
703 703 # This is for the separate progress bars.
704 704 if isinstance(rl, changelog.changelog):
705 705 crevcount += len(rl)
706 706 csrcsize += datasize
707 707 crawsize += rawsize
708 708 elif isinstance(rl, manifest.manifestrevlog):
709 709 mcount += 1
710 710 mrevcount += len(rl)
711 711 msrcsize += datasize
712 712 mrawsize += rawsize
713 713 elif isinstance(rl, revlog.revlog):
714 714 fcount += 1
715 715 frevcount += len(rl)
716 716 fsrcsize += datasize
717 717 frawsize += rawsize
718 718
719 719 if not revcount:
720 720 return
721 721
722 722 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
723 723 '%d in changelog)\n') %
724 724 (revcount, frevcount, mrevcount, crevcount))
725 725 ui.write(_('migrating %s in store; %s tracked data\n') % (
726 726 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
727 727
728 728 # Used to keep track of progress.
729 729 progress = []
730 730 def oncopiedrevision(rl, rev, node):
731 731 progress[1] += 1
732 732 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
733 733
734 734 # Do the actual copying.
735 735 # FUTURE this operation can be farmed off to worker processes.
736 736 seen = set()
737 737 for unencoded, encoded, size in srcrepo.store.walk():
738 738 if unencoded.endswith('.d'):
739 739 continue
740 740
741 741 oldrl = _revlogfrompath(srcrepo, unencoded)
742 742 newrl = _revlogfrompath(dstrepo, unencoded)
743 743
744 744 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
745 745 ui.write(_('finished migrating %d manifest revisions across %d '
746 746 'manifests; change in size: %s\n') %
747 747 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
748 748
749 749 ui.write(_('migrating changelog containing %d revisions '
750 750 '(%s in store; %s tracked data)\n') %
751 751 (crevcount, util.bytecount(csrcsize),
752 752 util.bytecount(crawsize)))
753 753 seen.add('c')
754 754 progress[:] = [_('changelog revisions'), 0, crevcount]
755 755 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
756 756 ui.write(_('finished migrating %d filelog revisions across %d '
757 757 'filelogs; change in size: %s\n') %
758 758 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
759 759
760 760 ui.write(_('migrating %d manifests containing %d revisions '
761 761 '(%s in store; %s tracked data)\n') %
762 762 (mcount, mrevcount, util.bytecount(msrcsize),
763 763 util.bytecount(mrawsize)))
764 764 seen.add('m')
765 765 progress[:] = [_('manifest revisions'), 0, mrevcount]
766 766 elif 'f' not in seen:
767 767 ui.write(_('migrating %d filelogs containing %d revisions '
768 768 '(%s in store; %s tracked data)\n') %
769 769 (fcount, frevcount, util.bytecount(fsrcsize),
770 770 util.bytecount(frawsize)))
771 771 seen.add('f')
772 772 progress[:] = [_('file revisions'), 0, frevcount]
773 773
774 774 ui.progress(progress[0], progress[1], total=progress[2])
775 775
776 776 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
777 777 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
778 778 deltareuse=deltareuse,
779 779 aggressivemergedeltas=aggressivemergedeltas)
780 780
781 781 datasize = 0
782 782 idx = newrl.index
783 783 for rev in newrl:
784 784 datasize += idx[rev][1]
785 785
786 786 dstsize += datasize
787 787
788 788 if isinstance(newrl, changelog.changelog):
789 789 cdstsize += datasize
790 790 elif isinstance(newrl, manifest.manifestrevlog):
791 791 mdstsize += datasize
792 792 else:
793 793 fdstsize += datasize
794 794
795 795 ui.progress(progress[0], None)
796 796
797 797 ui.write(_('finished migrating %d changelog revisions; change in size: '
798 798 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
799 799
800 800 ui.write(_('finished migrating %d total revisions; total change in store '
801 801 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
802 802
803 803 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
804 804 """Determine whether to copy a store file during upgrade.
805 805
806 806 This function is called when migrating store files from ``srcrepo`` to
807 807 ``dstrepo`` as part of upgrading a repository.
808 808
809 809 Args:
810 810 srcrepo: repo we are copying from
811 811 dstrepo: repo we are copying to
812 812 requirements: set of requirements for ``dstrepo``
813 813 path: store file being examined
814 814 mode: the ``ST_MODE`` file type of ``path``
815 815 st: ``stat`` data structure for ``path``
816 816
817 817 Function should return ``True`` if the file is to be copied.
818 818 """
819 819 # Skip revlogs.
820 820 if path.endswith(('.i', '.d')):
821 821 return False
822 822 # Skip transaction related files.
823 823 if path.startswith('undo'):
824 824 return False
825 825 # Only copy regular files.
826 826 if mode != stat.S_IFREG:
827 827 return False
828 828 # Skip other skipped files.
829 829 if path in ('lock', 'fncache'):
830 830 return False
831 831
832 832 return True
833 833
834 834 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
835 835 """Hook point for extensions to perform additional actions during upgrade.
836 836
837 837 This function is called after revlogs and store files have been copied but
838 838 before the new store is swapped into the original location.
839 839 """
840 840
841 841 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
842 842 """Do the low-level work of upgrading a repository.
843 843
844 844 The upgrade is effectively performed as a copy between a source
845 845 repository and a temporary destination repository.
846 846
847 847 The source repository is unmodified for as long as possible so the
848 848 upgrade can abort at any time without causing loss of service for
849 849 readers and without corrupting the source repository.
850 850 """
851 851 assert srcrepo.currentwlock()
852 852 assert dstrepo.currentwlock()
853 853
854 854 ui.write(_('(it is safe to interrupt this process any time before '
855 855 'data migration completes)\n'))
856 856
857 857 if 'redeltaall' in actions:
858 858 deltareuse = revlog.revlog.DELTAREUSENEVER
859 859 elif 'redeltaparent' in actions:
860 860 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
861 861 elif 'redeltamultibase' in actions:
862 862 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
863 863 else:
864 864 deltareuse = revlog.revlog.DELTAREUSEALWAYS
865 865
866 866 with dstrepo.transaction('upgrade') as tr:
867 867 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
868 868 'redeltamultibase' in actions)
869 869
870 870 # Now copy other files in the store directory.
871 871 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
872 872 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
873 873 p, kind, st):
874 874 continue
875 875
876 876 srcrepo.ui.write(_('copying %s\n') % p)
877 877 src = srcrepo.store.vfs.join(p)
878 878 dst = dstrepo.store.vfs.join(p)
879 879 util.copyfile(src, dst, copystat=True)
880 880
881 881 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
882 882
883 883 ui.write(_('data fully migrated to temporary repository\n'))
884 884
885 885 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
886 886 backupvfs = scmutil.vfs(backuppath)
887 887
888 888 # Make a backup of requires file first, as it is the first to be modified.
889 889 util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
890 890
891 891 # We install an arbitrary requirement that clients must not support
892 892 # as a mechanism to lock out new clients during the data swap. This is
893 893 # better than allowing a client to continue while the repository is in
894 894 # an inconsistent state.
895 895 ui.write(_('marking source repository as being upgraded; clients will be '
896 896 'unable to read from repository\n'))
897 897 scmutil.writerequires(srcrepo.vfs,
898 898 srcrepo.requirements | set(['upgradeinprogress']))
899 899
900 900 ui.write(_('starting in-place swap of repository data\n'))
901 901 ui.write(_('replaced files will be backed up at %s\n') %
902 902 backuppath)
903 903
904 904 # Now swap in the new store directory. Doing it as a rename should make
905 905 # the operation nearly instantaneous and atomic (at least in well-behaved
906 906 # environments).
907 907 ui.write(_('replacing store...\n'))
908 908 tstart = time.time()
909 909 util.rename(srcrepo.spath, backupvfs.join('store'))
910 910 util.rename(dstrepo.spath, srcrepo.spath)
911 911 elapsed = time.time() - tstart
912 912 ui.write(_('store replacement complete; repository was inconsistent for '
913 913 '%0.1fs\n') % elapsed)
914 914
915 915 # We first write the requirements file. Any new requirements will lock
916 916 # out legacy clients.
917 917 ui.write(_('finalizing requirements file and making repository readable '
918 918 'again\n'))
919 919 scmutil.writerequires(srcrepo.vfs, requirements)
920 920
921 # The lock file from the old store won't be removed because nothing has a
922 # reference to its new location. So clean it up manually. Alternatively, we
923 # could update srcrepo.svfs and other variables to point to the new
924 # location. This is simpler.
925 backupvfs.unlink('store/lock')
926
921 927 return backuppath
922 928
923 929 def upgraderepo(ui, repo, run=False, optimize=None):
924 930 """Upgrade a repository in place."""
925 931 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
926 932 from . import localrepo
927 933
928 934 optimize = set(optimize or [])
929 935 repo = repo.unfiltered()
930 936
931 937 # Ensure the repository can be upgraded.
932 938 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
933 939 if missingreqs:
934 940 raise error.Abort(_('cannot upgrade repository; requirement '
935 941 'missing: %s') % _(', ').join(sorted(missingreqs)))
936 942
937 943 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
938 944 if blockedreqs:
939 945 raise error.Abort(_('cannot upgrade repository; unsupported source '
940 946 'requirement: %s') %
941 947 _(', ').join(sorted(blockedreqs)))
942 948
943 949 # FUTURE there is potentially a need to control the wanted requirements via
944 950 # command arguments or via an extension hook point.
945 951 newreqs = localrepo.newreporequirements(repo)
946 952
947 953 noremovereqs = (repo.requirements - newreqs -
948 954 upgradesupportremovedrequirements(repo))
949 955 if noremovereqs:
950 956 raise error.Abort(_('cannot upgrade repository; requirement would be '
951 957 'removed: %s') % _(', ').join(sorted(noremovereqs)))
952 958
953 959 noaddreqs = (newreqs - repo.requirements -
954 960 upgradeallowednewrequirements(repo))
955 961 if noaddreqs:
956 962 raise error.Abort(_('cannot upgrade repository; do not support adding '
957 963 'requirement: %s') %
958 964 _(', ').join(sorted(noaddreqs)))
959 965
960 966 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
961 967 if unsupportedreqs:
962 968 raise error.Abort(_('cannot upgrade repository; do not support '
963 969 'destination requirement: %s') %
964 970 _(', ').join(sorted(unsupportedreqs)))
965 971
966 972 # Find and validate all improvements that can be made.
967 973 improvements = upgradefindimprovements(repo)
968 974 for i in improvements:
969 975 if i.type not in (deficiency, optimisation):
970 976 raise error.Abort(_('unexpected improvement type %s for %s') % (
971 977 i.type, i.name))
972 978
973 979 # Validate arguments.
974 980 unknownoptimize = optimize - set(i.name for i in improvements
975 981 if i.type == optimisation)
976 982 if unknownoptimize:
977 983 raise error.Abort(_('unknown optimization action requested: %s') %
978 984 ', '.join(sorted(unknownoptimize)),
979 985 hint=_('run without arguments to see valid '
980 986 'optimizations'))
981 987
982 988 actions = upgradedetermineactions(repo, improvements, repo.requirements,
983 989 newreqs, optimize)
984 990
985 991 def printrequirements():
986 992 ui.write(_('requirements\n'))
987 993 ui.write(_(' preserved: %s\n') %
988 994 _(', ').join(sorted(newreqs & repo.requirements)))
989 995
990 996 if repo.requirements - newreqs:
991 997 ui.write(_(' removed: %s\n') %
992 998 _(', ').join(sorted(repo.requirements - newreqs)))
993 999
994 1000 if newreqs - repo.requirements:
995 1001 ui.write(_(' added: %s\n') %
996 1002 _(', ').join(sorted(newreqs - repo.requirements)))
997 1003
998 1004 ui.write('\n')
999 1005
1000 1006 def printupgradeactions():
1001 1007 for action in actions:
1002 1008 for i in improvements:
1003 1009 if i.name == action:
1004 1010 ui.write('%s\n %s\n\n' %
1005 1011 (i.name, i.upgrademessage))
1006 1012
1007 1013 if not run:
1008 1014 fromdefault = []
1009 1015 fromconfig = []
1010 1016 optimizations = []
1011 1017
1012 1018 for i in improvements:
1013 1019 assert i.type in (deficiency, optimisation)
1014 1020 if i.type == deficiency:
1015 1021 if i.fromdefault:
1016 1022 fromdefault.append(i)
1017 1023 if i.fromconfig:
1018 1024 fromconfig.append(i)
1019 1025 else:
1020 1026 optimizations.append(i)
1021 1027
1022 1028 if fromdefault or fromconfig:
1023 1029 fromconfignames = set(x.name for x in fromconfig)
1024 1030 onlydefault = [i for i in fromdefault
1025 1031 if i.name not in fromconfignames]
1026 1032
1027 1033 if fromconfig:
1028 1034 ui.write(_('repository lacks features recommended by '
1029 1035 'current config options:\n\n'))
1030 1036 for i in fromconfig:
1031 1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1032 1038
1033 1039 if onlydefault:
1034 1040 ui.write(_('repository lacks features used by the default '
1035 1041 'config options:\n\n'))
1036 1042 for i in onlydefault:
1037 1043 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038 1044
1039 1045 ui.write('\n')
1040 1046 else:
1041 1047 ui.write(_('(no feature deficiencies found in existing '
1042 1048 'repository)\n'))
1043 1049
1044 1050 ui.write(_('performing an upgrade with "--run" will make the following '
1045 1051 'changes:\n\n'))
1046 1052
1047 1053 printrequirements()
1048 1054 printupgradeactions()
1049 1055
1050 1056 unusedoptimize = [i for i in improvements
1051 1057 if i.name not in actions and i.type == optimisation]
1052 1058 if unusedoptimize:
1053 1059 ui.write(_('additional optimizations are available by specifying '
1054 1060 '"--optimize <name>":\n\n'))
1055 1061 for i in unusedoptimize:
1056 1062 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1057 1063 return
1058 1064
1059 1065 # Else we're in the run=true case.
1060 1066 ui.write(_('upgrade will perform the following actions:\n\n'))
1061 1067 printrequirements()
1062 1068 printupgradeactions()
1063 1069
1064 1070 ui.write(_('beginning upgrade...\n'))
1065 1071 with repo.wlock():
1066 1072 with repo.lock():
1067 1073 ui.write(_('repository locked and read-only\n'))
1068 1074 # Our strategy for upgrading the repository is to create a new,
1069 1075 # temporary repository, write data to it, then do a swap of the
1070 1076 # data. There are less heavyweight ways to do this, but it is easier
1071 1077 # to create a new repo object than to instantiate all the components
1072 1078 # (like the store) separately.
1073 1079 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1074 1080 backuppath = None
1075 1081 try:
1076 1082 ui.write(_('creating temporary repository to stage migrated '
1077 1083 'data: %s\n') % tmppath)
1078 1084 dstrepo = localrepo.localrepository(repo.baseui,
1079 1085 path=tmppath,
1080 1086 create=True)
1081 1087
1082 1088 with dstrepo.wlock():
1083 1089 with dstrepo.lock():
1084 1090 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1085 1091 actions)
1086 1092
1087 1093 finally:
1088 1094 ui.write(_('removing temporary repository %s\n') % tmppath)
1089 1095 repo.vfs.rmtree(tmppath, forcibly=True)
1090 1096
1091 1097 if backuppath:
1092 1098 ui.warn(_('copy of old repository backed up at %s\n') %
1093 1099 backuppath)
1094 1100 ui.warn(_('the old repository will not be deleted; remove '
1095 1101 'it to free up disk space once the upgraded '
1096 1102 'repository is verified\n'))
@@ -1,313 +1,312
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [extensions]
3 3 > share =
4 4 > EOF
5 5
6 6 store and revlogv1 are required in source
7 7
8 8 $ hg --config format.usestore=false init no-store
9 9 $ hg -R no-store debugupgraderepo
10 10 abort: cannot upgrade repository; requirement missing: store
11 11 [255]
12 12
13 13 $ hg init no-revlogv1
14 14 $ cat > no-revlogv1/.hg/requires << EOF
15 15 > dotencode
16 16 > fncache
17 17 > generaldelta
18 18 > store
19 19 > EOF
20 20
21 21 $ hg -R no-revlogv1 debugupgraderepo
22 22 abort: cannot upgrade repository; requirement missing: revlogv1
23 23 [255]
24 24
25 25 Cannot upgrade shared repositories
26 26
27 27 $ hg init share-parent
28 28 $ hg -q share share-parent share-child
29 29
30 30 $ hg -R share-child debugupgraderepo
31 31 abort: cannot upgrade repository; unsupported source requirement: shared
32 32 [255]
33 33
34 34 Do not yet support upgrading manifestv2 and treemanifest repos
35 35
36 36 $ hg --config experimental.manifestv2=true init manifestv2
37 37 $ hg -R manifestv2 debugupgraderepo
38 38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
39 39 [255]
40 40
41 41 $ hg --config experimental.treemanifest=true init treemanifest
42 42 $ hg -R treemanifest debugupgraderepo
43 43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
44 44 [255]
45 45
46 46 Cannot add manifestv2 or treemanifest requirement during upgrade
47 47
48 48 $ hg init disallowaddedreq
49 49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
50 50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
51 51 [255]
52 52
53 53 An upgrade of a repository created with recommended settings only suggests optimizations
54 54
55 55 $ hg init empty
56 56 $ cd empty
57 57 $ hg debugupgraderepo
58 58 (no feature deficiencies found in existing repository)
59 59 performing an upgrade with "--run" will make the following changes:
60 60
61 61 requirements
62 62 preserved: dotencode, fncache, generaldelta, revlogv1, store
63 63
64 64 additional optimizations are available by specifying "--optimize <name>":
65 65
66 66 redeltaparent
67 67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
68 68
69 69 redeltamultibase
70 70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
71 71
72 72 redeltaall
73 73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
74 74
75 75
76 76 --optimize can be used to add optimizations
77 77
78 78 $ hg debugupgrade --optimize redeltaparent
79 79 (no feature deficiencies found in existing repository)
80 80 performing an upgrade with "--run" will make the following changes:
81 81
82 82 requirements
83 83 preserved: dotencode, fncache, generaldelta, revlogv1, store
84 84
85 85 redeltaparent
86 86 deltas within internal storage will choose a new base revision if needed
87 87
88 88 additional optimizations are available by specifying "--optimize <name>":
89 89
90 90 redeltamultibase
91 91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
92 92
93 93 redeltaall
94 94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
95 95
96 96
97 97 Various sub-optimal detections work
98 98
99 99 $ cat > .hg/requires << EOF
100 100 > revlogv1
101 101 > store
102 102 > EOF
103 103
104 104 $ hg debugupgraderepo
105 105 repository lacks features recommended by current config options:
106 106
107 107 fncache
108 108 long and reserved filenames may not work correctly; repository performance is sub-optimal
109 109
110 110 dotencode
111 111 storage of filenames beginning with a period or space may not work correctly
112 112
113 113 generaldelta
114 114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
115 115
116 116
117 117 performing an upgrade with "--run" will make the following changes:
118 118
119 119 requirements
120 120 preserved: revlogv1, store
121 121 added: dotencode, fncache, generaldelta
122 122
123 123 fncache
124 124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
125 125
126 126 dotencode
127 127 repository will be better able to store files beginning with a space or period
128 128
129 129 generaldelta
130 130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
131 131
132 132 additional optimizations are available by specifying "--optimize <name>":
133 133
134 134 redeltaparent
135 135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
136 136
137 137 redeltamultibase
138 138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
139 139
140 140 redeltaall
141 141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
142 142
143 143
144 144 $ hg --config format.dotencode=false debugupgraderepo
145 145 repository lacks features recommended by current config options:
146 146
147 147 fncache
148 148 long and reserved filenames may not work correctly; repository performance is sub-optimal
149 149
150 150 generaldelta
151 151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
152 152
153 153 repository lacks features used by the default config options:
154 154
155 155 dotencode
156 156 storage of filenames beginning with a period or space may not work correctly
157 157
158 158
159 159 performing an upgrade with "--run" will make the following changes:
160 160
161 161 requirements
162 162 preserved: revlogv1, store
163 163 added: fncache, generaldelta
164 164
165 165 fncache
166 166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
167 167
168 168 generaldelta
169 169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
170 170
171 171 additional optimizations are available by specifying "--optimize <name>":
172 172
173 173 redeltaparent
174 174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
175 175
176 176 redeltamultibase
177 177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
178 178
179 179 redeltaall
180 180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
181 181
182 182
183 183 $ cd ..
184 184
185 185 Upgrading a repository that is already modern essentially no-ops
186 186
187 187 $ hg init modern
188 188 $ hg -R modern debugupgraderepo --run
189 189 upgrade will perform the following actions:
190 190
191 191 requirements
192 192 preserved: dotencode, fncache, generaldelta, revlogv1, store
193 193
194 194 beginning upgrade...
195 195 repository locked and read-only
196 196 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
197 197 (it is safe to interrupt this process any time before data migration completes)
198 198 data fully migrated to temporary repository
199 199 marking source repository as being upgraded; clients will be unable to read from repository
200 200 starting in-place swap of repository data
201 201 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
202 202 replacing store...
203 203 store replacement complete; repository was inconsistent for *s (glob)
204 204 finalizing requirements file and making repository readable again
205 205 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
206 206 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
207 207 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
208 208
209 209 Upgrading a repository to generaldelta works
210 210
211 211 $ hg --config format.usegeneraldelta=false init upgradegd
212 212 $ cd upgradegd
213 213 $ touch f0
214 214 $ hg -q commit -A -m initial
215 215 $ touch f1
216 216 $ hg -q commit -A -m 'add f1'
217 217 $ hg -q up -r 0
218 218 $ touch f2
219 219 $ hg -q commit -A -m 'add f2'
220 220
221 221 $ hg debugupgraderepo --run
222 222 upgrade will perform the following actions:
223 223
224 224 requirements
225 225 preserved: dotencode, fncache, revlogv1, store
226 226 added: generaldelta
227 227
228 228 generaldelta
229 229 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
230 230
231 231 beginning upgrade...
232 232 repository locked and read-only
233 233 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
234 234 (it is safe to interrupt this process any time before data migration completes)
235 235 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
236 236 migrating 341 bytes in store; 401 bytes tracked data
237 237 migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
238 238 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
239 239 migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
240 240 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
241 241 migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
242 242 finished migrating 3 changelog revisions; change in size: 0 bytes
243 243 finished migrating 9 total revisions; total change in store size: 0 bytes
244 244 copying phaseroots
245 245 data fully migrated to temporary repository
246 246 marking source repository as being upgraded; clients will be unable to read from repository
247 247 starting in-place swap of repository data
248 248 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
249 249 replacing store...
250 250 store replacement complete; repository was inconsistent for *s (glob)
251 251 finalizing requirements file and making repository readable again
252 252 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
253 253 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
254 254 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
255 255
256 256 Original requirements backed up
257 257
258 258 $ cat .hg/upgradebackup.*/requires
259 259 dotencode
260 260 fncache
261 261 revlogv1
262 262 store
263 263
264 264 generaldelta added to original requirements files
265 265
266 266 $ cat .hg/requires
267 267 dotencode
268 268 fncache
269 269 generaldelta
270 270 revlogv1
271 271 store
272 272
273 273 store directory has files we expect
274 274
275 275 $ ls .hg/store
276 276 00changelog.i
277 277 00manifest.i
278 278 data
279 279 fncache
280 280 phaseroots
281 281 undo
282 282 undo.backupfiles
283 283 undo.phaseroots
284 284
285 285 manifest should be generaldelta
286 286
287 287 $ hg debugrevlog -m | grep flags
288 288 flags : inline, generaldelta
289 289
290 290 verify should be happy
291 291
292 292 $ hg verify
293 293 checking changesets
294 294 checking manifests
295 295 crosschecking files in changesets and manifests
296 296 checking files
297 297 3 files, 3 changesets, 3 total revisions
298 298
299 299 old store should be backed up
300 300
301 301 $ ls .hg/upgradebackup.*/store
302 302 00changelog.i
303 303 00manifest.i
304 304 data
305 305 fncache
306 lock
307 306 phaseroots
308 307 undo
309 308 undo.backup.fncache
310 309 undo.backupfiles
311 310 undo.phaseroots
312 311
313 312 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now