##// END OF EJS Templates
repair: determine what upgrade will do...
Gregory Szorc -
r30776:3997edc4 default
parent child Browse files
Show More
@@ -1,495 +1,773 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 error,
20 20 exchange,
21 21 obsolete,
22 22 util,
23 23 )
24 24
25 25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 26 """create a bundle with the specified revisions as a backup"""
27 27 cgversion = changegroup.safeversion(repo)
28 28
29 29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 30 version=cgversion)
31 31 backupdir = "strip-backup"
32 32 vfs = repo.vfs
33 33 if not vfs.isdir(backupdir):
34 34 vfs.mkdir(backupdir)
35 35
36 36 # Include a hash of all the nodes in the filename for uniqueness
37 37 allcommits = repo.set('%ln::%ln', bases, heads)
38 38 allhashes = sorted(c.hex() for c in allcommits)
39 39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41 41
42 42 comp = None
43 43 if cgversion != '01':
44 44 bundletype = "HG20"
45 45 if compress:
46 46 comp = 'BZ'
47 47 elif compress:
48 48 bundletype = "HG10BZ"
49 49 else:
50 50 bundletype = "HG10UN"
51 51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 52 compression=comp)
53 53
54 54 def _collectfiles(repo, striprev):
55 55 """find out the filelogs affected by the strip"""
56 56 files = set()
57 57
58 58 for x in xrange(striprev, len(repo)):
59 59 files.update(repo[x].files())
60 60
61 61 return sorted(files)
62 62
63 63 def _collectbrokencsets(repo, files, striprev):
64 64 """return the changesets which will be broken by the truncation"""
65 65 s = set()
66 66 def collectone(revlog):
67 67 _, brokenset = revlog.getstrippoint(striprev)
68 68 s.update([revlog.linkrev(r) for r in brokenset])
69 69
70 70 collectone(repo.manifestlog._revlog)
71 71 for fname in files:
72 72 collectone(repo.file(fname))
73 73
74 74 return s
75 75
76 76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 77 # This function operates within a transaction of its own, but does
78 78 # not take any lock on the repo.
79 79 # Simple way to maintain backwards compatibility for this
80 80 # argument.
81 81 if backup in ['none', 'strip']:
82 82 backup = False
83 83
84 84 repo = repo.unfiltered()
85 85 repo.destroying()
86 86
87 87 cl = repo.changelog
88 88 # TODO handle undo of merge sets
89 89 if isinstance(nodelist, str):
90 90 nodelist = [nodelist]
91 91 striplist = [cl.rev(node) for node in nodelist]
92 92 striprev = min(striplist)
93 93
94 94 files = _collectfiles(repo, striprev)
95 95 saverevs = _collectbrokencsets(repo, files, striprev)
96 96
97 97 # Some revisions with rev > striprev may not be descendants of striprev.
98 98 # We have to find these revisions and put them in a bundle, so that
99 99 # we can restore them after the truncations.
100 100 # To create the bundle we use repo.changegroupsubset which requires
101 101 # the list of heads and bases of the set of interesting revisions.
102 102 # (head = revision in the set that has no descendant in the set;
103 103 # base = revision in the set that has no ancestor in the set)
104 104 tostrip = set(striplist)
105 105 saveheads = set(saverevs)
106 106 for r in cl.revs(start=striprev + 1):
107 107 if any(p in tostrip for p in cl.parentrevs(r)):
108 108 tostrip.add(r)
109 109
110 110 if r not in tostrip:
111 111 saverevs.add(r)
112 112 saveheads.difference_update(cl.parentrevs(r))
113 113 saveheads.add(r)
114 114 saveheads = [cl.node(r) for r in saveheads]
115 115
116 116 # compute base nodes
117 117 if saverevs:
118 118 descendants = set(cl.descendants(saverevs))
119 119 saverevs.difference_update(descendants)
120 120 savebases = [cl.node(r) for r in saverevs]
121 121 stripbases = [cl.node(r) for r in tostrip]
122 122
123 123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 124 # is much faster
125 125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 126 if newbmtarget:
127 127 newbmtarget = repo[newbmtarget.first()].node()
128 128 else:
129 129 newbmtarget = '.'
130 130
131 131 bm = repo._bookmarks
132 132 updatebm = []
133 133 for m in bm:
134 134 rev = repo[bm[m]].rev()
135 135 if rev in tostrip:
136 136 updatebm.append(m)
137 137
138 138 # create a changegroup for all the branches we need to keep
139 139 backupfile = None
140 140 vfs = repo.vfs
141 141 node = nodelist[-1]
142 142 if backup:
143 143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 144 repo.ui.status(_("saved backup bundle to %s\n") %
145 145 vfs.join(backupfile))
146 146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 147 vfs.join(backupfile))
148 148 tmpbundlefile = None
149 149 if saveheads:
150 150 # do not compress temporary bundle if we remove it from disk later
151 151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
152 152 compress=False)
153 153
154 154 mfst = repo.manifestlog._revlog
155 155
156 156 curtr = repo.currenttransaction()
157 157 if curtr is not None:
158 158 del curtr # avoid carrying reference to transaction for nothing
159 159 msg = _('programming error: cannot strip from inside a transaction')
160 160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
161 161
162 162 try:
163 163 with repo.transaction("strip") as tr:
164 164 offset = len(tr.entries)
165 165
166 166 tr.startgroup()
167 167 cl.strip(striprev, tr)
168 168 mfst.strip(striprev, tr)
169 169 if 'treemanifest' in repo.requirements: # safe but unnecessary
170 170 # otherwise
171 171 for unencoded, encoded, size in repo.store.datafiles():
172 172 if (unencoded.startswith('meta/') and
173 173 unencoded.endswith('00manifest.i')):
174 174 dir = unencoded[5:-12]
175 175 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
176 176 for fn in files:
177 177 repo.file(fn).strip(striprev, tr)
178 178 tr.endgroup()
179 179
180 180 for i in xrange(offset, len(tr.entries)):
181 181 file, troffset, ignore = tr.entries[i]
182 182 with repo.svfs(file, 'a', checkambig=True) as fp:
183 183 fp.truncate(troffset)
184 184 if troffset == 0:
185 185 repo.store.markremoved(file)
186 186
187 187 if tmpbundlefile:
188 188 ui.note(_("adding branch\n"))
189 189 f = vfs.open(tmpbundlefile, "rb")
190 190 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
191 191 if not repo.ui.verbose:
192 192 # silence internal shuffling chatter
193 193 repo.ui.pushbuffer()
194 194 if isinstance(gen, bundle2.unbundle20):
195 195 with repo.transaction('strip') as tr:
196 196 tr.hookargs = {'source': 'strip',
197 197 'url': 'bundle:' + vfs.join(tmpbundlefile)}
198 198 bundle2.applybundle(repo, gen, tr, source='strip',
199 199 url='bundle:' + vfs.join(tmpbundlefile))
200 200 else:
201 201 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
202 202 True)
203 203 if not repo.ui.verbose:
204 204 repo.ui.popbuffer()
205 205 f.close()
206 206 repo._phasecache.invalidate()
207 207
208 208 for m in updatebm:
209 209 bm[m] = repo[newbmtarget].node()
210 210 lock = tr = None
211 211 try:
212 212 lock = repo.lock()
213 213 tr = repo.transaction('repair')
214 214 bm.recordchange(tr)
215 215 tr.close()
216 216 finally:
217 217 tr.release()
218 218 lock.release()
219 219
220 220 # remove undo files
221 221 for undovfs, undofile in repo.undofiles():
222 222 try:
223 223 undovfs.unlink(undofile)
224 224 except OSError as e:
225 225 if e.errno != errno.ENOENT:
226 226 ui.warn(_('error removing %s: %s\n') %
227 227 (undovfs.join(undofile), str(e)))
228 228
229 229 except: # re-raises
230 230 if backupfile:
231 231 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
232 232 % vfs.join(backupfile))
233 233 if tmpbundlefile:
234 234 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
235 235 % vfs.join(tmpbundlefile))
236 236 ui.warn(_("(fix the problem, then recover the changesets with "
237 237 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
238 238 raise
239 239 else:
240 240 if tmpbundlefile:
241 241 # Remove temporary bundle only if there were no exceptions
242 242 vfs.unlink(tmpbundlefile)
243 243
244 244 repo.destroyed()
245 245 # return the backup file path (or None if 'backup' was False) so
246 246 # extensions can use it
247 247 return backupfile
248 248
249 249 def rebuildfncache(ui, repo):
250 250 """Rebuilds the fncache file from repo history.
251 251
252 252 Missing entries will be added. Extra entries will be removed.
253 253 """
254 254 repo = repo.unfiltered()
255 255
256 256 if 'fncache' not in repo.requirements:
257 257 ui.warn(_('(not rebuilding fncache because repository does not '
258 258 'support fncache)\n'))
259 259 return
260 260
261 261 with repo.lock():
262 262 fnc = repo.store.fncache
263 263 # Trigger load of fncache.
264 264 if 'irrelevant' in fnc:
265 265 pass
266 266
267 267 oldentries = set(fnc.entries)
268 268 newentries = set()
269 269 seenfiles = set()
270 270
271 271 repolen = len(repo)
272 272 for rev in repo:
273 273 ui.progress(_('rebuilding'), rev, total=repolen,
274 274 unit=_('changesets'))
275 275
276 276 ctx = repo[rev]
277 277 for f in ctx.files():
278 278 # This is to minimize I/O.
279 279 if f in seenfiles:
280 280 continue
281 281 seenfiles.add(f)
282 282
283 283 i = 'data/%s.i' % f
284 284 d = 'data/%s.d' % f
285 285
286 286 if repo.store._exists(i):
287 287 newentries.add(i)
288 288 if repo.store._exists(d):
289 289 newentries.add(d)
290 290
291 291 ui.progress(_('rebuilding'), None)
292 292
293 293 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
294 294 for dir in util.dirs(seenfiles):
295 295 i = 'meta/%s/00manifest.i' % dir
296 296 d = 'meta/%s/00manifest.d' % dir
297 297
298 298 if repo.store._exists(i):
299 299 newentries.add(i)
300 300 if repo.store._exists(d):
301 301 newentries.add(d)
302 302
303 303 addcount = len(newentries - oldentries)
304 304 removecount = len(oldentries - newentries)
305 305 for p in sorted(oldentries - newentries):
306 306 ui.write(_('removing %s\n') % p)
307 307 for p in sorted(newentries - oldentries):
308 308 ui.write(_('adding %s\n') % p)
309 309
310 310 if addcount or removecount:
311 311 ui.write(_('%d items added, %d removed from fncache\n') %
312 312 (addcount, removecount))
313 313 fnc.entries = newentries
314 314 fnc._dirty = True
315 315
316 316 with repo.transaction('fncache') as tr:
317 317 fnc.write(tr)
318 318 else:
319 319 ui.write(_('fncache already up to date\n'))
320 320
321 321 def stripbmrevset(repo, mark):
322 322 """
323 323 The revset to strip when strip is called with -B mark
324 324
325 325 Needs to live here so extensions can use it and wrap it even when strip is
326 326 not enabled or not present on a box.
327 327 """
328 328 return repo.revs("ancestors(bookmark(%s)) - "
329 329 "ancestors(head() and not bookmark(%s)) - "
330 330 "ancestors(bookmark() and not bookmark(%s))",
331 331 mark, mark, mark)
332 332
333 333 def deleteobsmarkers(obsstore, indices):
334 334 """Delete some obsmarkers from obsstore and return how many were deleted
335 335
336 336 'indices' is a list of ints which are the indices
337 337 of the markers to be deleted.
338 338
339 339 Every invocation of this function completely rewrites the obsstore file,
340 340 skipping the markers we want to be removed. The new temporary file is
341 341 created, remaining markers are written there and on .close() this file
342 342 gets atomically renamed to obsstore, thus guaranteeing consistency."""
343 343 if not indices:
344 344 # we don't want to rewrite the obsstore with the same content
345 345 return
346 346
347 347 left = []
348 348 current = obsstore._all
349 349 n = 0
350 350 for i, m in enumerate(current):
351 351 if i in indices:
352 352 n += 1
353 353 continue
354 354 left.append(m)
355 355
356 356 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
357 357 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
358 358 newobsstorefile.write(bytes)
359 359 newobsstorefile.close()
360 360 return n
361 361
362 362 def upgraderequiredsourcerequirements(repo):
363 363 """Obtain requirements required to be present to upgrade a repo.
364 364
365 365 An upgrade will not be allowed if the repository doesn't have the
366 366 requirements returned by this function.
367 367 """
368 368 return set([
369 369 # Introduced in Mercurial 0.9.2.
370 370 'revlogv1',
371 371 # Introduced in Mercurial 0.9.2.
372 372 'store',
373 373 ])
374 374
375 375 def upgradeblocksourcerequirements(repo):
376 376 """Obtain requirements that will prevent an upgrade from occurring.
377 377
378 378 An upgrade cannot be performed if the source repository contains a
379 379 requirements in the returned set.
380 380 """
381 381 return set([
382 382 # The upgrade code does not yet support these experimental features.
383 383 # This is an artificial limitation.
384 384 'manifestv2',
385 385 'treemanifest',
386 386 # This was a precursor to generaldelta and was never enabled by default.
387 387 # It should (hopefully) not exist in the wild.
388 388 'parentdelta',
389 389 # Upgrade should operate on the actual store, not the shared link.
390 390 'shared',
391 391 ])
392 392
393 393 def upgradesupportremovedrequirements(repo):
394 394 """Obtain requirements that can be removed during an upgrade.
395 395
396 396 If an upgrade were to create a repository that dropped a requirement,
397 397 the dropped requirement must appear in the returned set for the upgrade
398 398 to be allowed.
399 399 """
400 400 return set()
401 401
402 402 def upgradesupporteddestrequirements(repo):
403 403 """Obtain requirements that upgrade supports in the destination.
404 404
405 405 If the result of the upgrade would create requirements not in this set,
406 406 the upgrade is disallowed.
407 407
408 408 Extensions should monkeypatch this to add their custom requirements.
409 409 """
410 410 return set([
411 411 'dotencode',
412 412 'fncache',
413 413 'generaldelta',
414 414 'revlogv1',
415 415 'store',
416 416 ])
417 417
418 418 def upgradeallowednewrequirements(repo):
419 419 """Obtain requirements that can be added to a repository during upgrade.
420 420
421 421 This is used to disallow proposed requirements from being added when
422 422 they weren't present before.
423 423
424 424 We use a list of allowed requirement additions instead of a list of known
425 425 bad additions because the whitelist approach is safer and will prevent
426 426 future, unknown requirements from accidentally being added.
427 427 """
428 428 return set([
429 429 'dotencode',
430 430 'fncache',
431 431 'generaldelta',
432 432 ])
433 433
434 deficiency = 'deficiency'
435 optimisation = 'optimization'
436
437 class upgradeimprovement(object):
438 """Represents an improvement that can be made as part of an upgrade.
439
440 The following attributes are defined on each instance:
441
442 name
443 Machine-readable string uniquely identifying this improvement. It
444 will be mapped to an action later in the upgrade process.
445
446 type
447 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
448 problem. An optimization is an action (sometimes optional) that
449 can be taken to further improve the state of the repository.
450
451 description
452 Message intended for humans explaining the improvement in more detail,
453 including the implications of it. For ``deficiency`` types, should be
454 worded in the present tense. For ``optimisation`` types, should be
455 worded in the future tense.
456
457 upgrademessage
458 Message intended for humans explaining what an upgrade addressing this
459 issue will do. Should be worded in the future tense.
460
461 fromdefault (``deficiency`` types only)
462 Boolean indicating whether the current (deficient) state deviates
463 from Mercurial's default configuration.
464
465 fromconfig (``deficiency`` types only)
466 Boolean indicating whether the current (deficient) state deviates
467 from the current Mercurial configuration.
468 """
469 def __init__(self, name, type, description, upgrademessage, **kwargs):
470 self.name = name
471 self.type = type
472 self.description = description
473 self.upgrademessage = upgrademessage
474
475 for k, v in kwargs.items():
476 setattr(self, k, v)
477
478 def upgradefindimprovements(repo):
479 """Determine improvements that can be made to the repo during upgrade.
480
481 Returns a list of ``upgradeimprovement`` describing repository deficiencies
482 and optimizations.
483 """
484 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
485 from . import localrepo
486
487 newreporeqs = localrepo.newreporequirements(repo)
488
489 improvements = []
490
491 # We could detect lack of revlogv1 and store here, but they were added
492 # in 0.9.2 and we don't support upgrading repos without these
493 # requirements, so let's not bother.
494
495 if 'fncache' not in repo.requirements:
496 improvements.append(upgradeimprovement(
497 name='fncache',
498 type=deficiency,
499 description=_('long and reserved filenames may not work correctly; '
500 'repository performance is sub-optimal'),
501 upgrademessage=_('repository will be more resilient to storing '
502 'certain paths and performance of certain '
503 'operations should be improved'),
504 fromdefault=True,
505 fromconfig='fncache' in newreporeqs))
506
507 if 'dotencode' not in repo.requirements:
508 improvements.append(upgradeimprovement(
509 name='dotencode',
510 type=deficiency,
511 description=_('storage of filenames beginning with a period or '
512 'space may not work correctly'),
513 upgrademessage=_('repository will be better able to store files '
514 'beginning with a space or period'),
515 fromdefault=True,
516 fromconfig='dotencode' in newreporeqs))
517
518 if 'generaldelta' not in repo.requirements:
519 improvements.append(upgradeimprovement(
520 name='generaldelta',
521 type=deficiency,
522 description=_('deltas within internal storage are unable to '
523 'choose optimal revisions; repository is larger and '
524 'slower than it could be; interaction with other '
525 'repositories may require extra network and CPU '
526 'resources, making "hg push" and "hg pull" slower'),
527 upgrademessage=_('repository storage will be able to create '
528 'optimal deltas; new repository data will be '
529 'smaller and read times should decrease; '
530 'interacting with other repositories using this '
531 'storage model should require less network and '
532 'CPU resources, making "hg push" and "hg pull" '
533 'faster'),
534 fromdefault=True,
535 fromconfig='generaldelta' in newreporeqs))
536
537 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
538 # changelogs with deltas.
539 cl = repo.changelog
540 for rev in cl:
541 chainbase = cl.chainbase(rev)
542 if chainbase != rev:
543 improvements.append(upgradeimprovement(
544 name='removecldeltachain',
545 type=deficiency,
546 description=_('changelog storage is using deltas instead of '
547 'raw entries; changelog reading and any '
548 'operation relying on changelog data are slower '
549 'than they could be'),
550 upgrademessage=_('changelog storage will be reformated to '
551 'store raw entries; changelog reading will be '
552 'faster; changelog size may be reduced'),
553 fromdefault=True,
554 fromconfig=True))
555 break
556
557 # Now for the optimizations.
558
559 # These are unconditionally added. There is logic later that figures out
560 # which ones to apply.
561
562 improvements.append(upgradeimprovement(
563 name='redeltaparent',
564 type=optimisation,
565 description=_('deltas within internal storage will be recalculated to '
566 'choose an optimal base revision where this was not '
567 'already done; the size of the repository may shrink and '
568 'various operations may become faster; the first time '
569 'this optimization is performed could slow down upgrade '
570 'execution considerably; subsequent invocations should '
571 'not run noticeably slower'),
572 upgrademessage=_('deltas within internal storage will choose a new '
573 'base revision if needed')))
574
575 improvements.append(upgradeimprovement(
576 name='redeltamultibase',
577 type=optimisation,
578 description=_('deltas within internal storage will be recalculated '
579 'against multiple base revision and the smallest '
580 'difference will be used; the size of the repository may '
581 'shrink significantly when there are many merges; this '
582 'optimization will slow down execution in proportion to '
583 'the number of merges in the repository and the amount '
584 'of files in the repository; this slow down should not '
585 'be significant unless there are tens of thousands of '
586 'files and thousands of merges'),
587 upgrademessage=_('deltas within internal storage will choose an '
588 'optimal delta by computing deltas against multiple '
589 'parents; may slow down execution time '
590 'significantly')))
591
592 improvements.append(upgradeimprovement(
593 name='redeltaall',
594 type=optimisation,
595 description=_('deltas within internal storage will always be '
596 'recalculated without reusing prior deltas; this will '
597 'likely make execution run several times slower; this '
598 'optimization is typically not needed'),
599 upgrademessage=_('deltas within internal storage will be fully '
600 'recomputed; this will likely drastically slow down '
601 'execution time')))
602
603 return improvements
604
605 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
606 optimize):
607 """Determine upgrade actions that will be performed.
608
609 Given a list of improvements as returned by ``upgradefindimprovements``,
610 determine the list of upgrade actions that will be performed.
611
612 The role of this function is to filter improvements if needed, apply
613 recommended optimizations from the improvements list that make sense,
614 etc.
615
616 Returns a list of action names.
617 """
618 newactions = []
619
620 knownreqs = upgradesupporteddestrequirements(repo)
621
622 for i in improvements:
623 name = i.name
624
625 # If the action is a requirement that doesn't show up in the
626 # destination requirements, prune the action.
627 if name in knownreqs and name not in destreqs:
628 continue
629
630 if i.type == deficiency:
631 newactions.append(name)
632
633 newactions.extend(o for o in sorted(optimize) if o not in newactions)
634
635 # FUTURE consider adding some optimizations here for certain transitions.
636 # e.g. adding generaldelta could schedule parent redeltas.
637
638 return newactions
639
434 640 def upgraderepo(ui, repo, run=False, optimize=None):
435 641 """Upgrade a repository in place."""
436 642 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
437 643 from . import localrepo
438 644
645 optimize = set(optimize or [])
439 646 repo = repo.unfiltered()
440 647
441 648 # Ensure the repository can be upgraded.
442 649 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
443 650 if missingreqs:
444 651 raise error.Abort(_('cannot upgrade repository; requirement '
445 652 'missing: %s') % _(', ').join(sorted(missingreqs)))
446 653
447 654 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
448 655 if blockedreqs:
449 656 raise error.Abort(_('cannot upgrade repository; unsupported source '
450 657 'requirement: %s') %
451 658 _(', ').join(sorted(blockedreqs)))
452 659
453 660 # FUTURE there is potentially a need to control the wanted requirements via
454 661 # command arguments or via an extension hook point.
455 662 newreqs = localrepo.newreporequirements(repo)
456 663
457 664 noremovereqs = (repo.requirements - newreqs -
458 665 upgradesupportremovedrequirements(repo))
459 666 if noremovereqs:
460 667 raise error.Abort(_('cannot upgrade repository; requirement would be '
461 668 'removed: %s') % _(', ').join(sorted(noremovereqs)))
462 669
463 670 noaddreqs = (newreqs - repo.requirements -
464 671 upgradeallowednewrequirements(repo))
465 672 if noaddreqs:
466 673 raise error.Abort(_('cannot upgrade repository; do not support adding '
467 674 'requirement: %s') %
468 675 _(', ').join(sorted(noaddreqs)))
469 676
470 677 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
471 678 if unsupportedreqs:
472 679 raise error.Abort(_('cannot upgrade repository; do not support '
473 680 'destination requirement: %s') %
474 681 _(', ').join(sorted(unsupportedreqs)))
475 682
683 # Find and validate all improvements that can be made.
684 improvements = upgradefindimprovements(repo)
685 for i in improvements:
686 if i.type not in (deficiency, optimisation):
687 raise error.Abort(_('unexpected improvement type %s for %s') % (
688 i.type, i.name))
689
690 # Validate arguments.
691 unknownoptimize = optimize - set(i.name for i in improvements
692 if i.type == optimisation)
693 if unknownoptimize:
694 raise error.Abort(_('unknown optimization action requested: %s') %
695 ', '.join(sorted(unknownoptimize)),
696 hint=_('run without arguments to see valid '
697 'optimizations'))
698
699 actions = upgradedetermineactions(repo, improvements, repo.requirements,
700 newreqs, optimize)
701
476 702 def printrequirements():
477 703 ui.write(_('requirements\n'))
478 704 ui.write(_(' preserved: %s\n') %
479 705 _(', ').join(sorted(newreqs & repo.requirements)))
480 706
481 707 if repo.requirements - newreqs:
482 708 ui.write(_(' removed: %s\n') %
483 709 _(', ').join(sorted(repo.requirements - newreqs)))
484 710
485 711 if newreqs - repo.requirements:
486 712 ui.write(_(' added: %s\n') %
487 713 _(', ').join(sorted(newreqs - repo.requirements)))
488 714
489 715 ui.write('\n')
490 716
717 def printupgradeactions():
718 for action in actions:
719 for i in improvements:
720 if i.name == action:
721 ui.write('%s\n %s\n\n' %
722 (i.name, i.upgrademessage))
723
491 724 if not run:
725 fromdefault = []
726 fromconfig = []
727 optimizations = []
728
729 for i in improvements:
730 assert i.type in (deficiency, optimisation)
731 if i.type == deficiency:
732 if i.fromdefault:
733 fromdefault.append(i)
734 if i.fromconfig:
735 fromconfig.append(i)
736 else:
737 optimizations.append(i)
738
739 if fromdefault or fromconfig:
740 fromconfignames = set(x.name for x in fromconfig)
741 onlydefault = [i for i in fromdefault
742 if i.name not in fromconfignames]
743
744 if fromconfig:
745 ui.write(_('repository lacks features recommended by '
746 'current config options:\n\n'))
747 for i in fromconfig:
748 ui.write('%s\n %s\n\n' % (i.name, i.description))
749
750 if onlydefault:
751 ui.write(_('repository lacks features used by the default '
752 'config options:\n\n'))
753 for i in onlydefault:
754 ui.write('%s\n %s\n\n' % (i.name, i.description))
755
756 ui.write('\n')
757 else:
758 ui.write(_('(no feature deficiencies found in existing '
759 'repository)\n'))
760
492 761 ui.write(_('performing an upgrade with "--run" will make the following '
493 762 'changes:\n\n'))
494 763
495 764 printrequirements()
765 printupgradeactions()
766
767 unusedoptimize = [i for i in improvements
768 if i.name not in actions and i.type == optimisation]
769 if unusedoptimize:
770 ui.write(_('additional optimizations are available by specifying '
771 '"--optimize <name>":\n\n'))
772 for i in unusedoptimize:
773 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
@@ -1,51 +1,182 b''
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [extensions]
3 3 > share =
4 4 > EOF
5 5
6 6 store and revlogv1 are required in source
7 7
8 8 $ hg --config format.usestore=false init no-store
9 9 $ hg -R no-store debugupgraderepo
10 10 abort: cannot upgrade repository; requirement missing: store
11 11 [255]
12 12
13 13 $ hg init no-revlogv1
14 14 $ cat > no-revlogv1/.hg/requires << EOF
15 15 > dotencode
16 16 > fncache
17 17 > generaldelta
18 18 > store
19 19 > EOF
20 20
21 21 $ hg -R no-revlogv1 debugupgraderepo
22 22 abort: cannot upgrade repository; requirement missing: revlogv1
23 23 [255]
24 24
25 25 Cannot upgrade shared repositories
26 26
27 27 $ hg init share-parent
28 28 $ hg -q share share-parent share-child
29 29
30 30 $ hg -R share-child debugupgraderepo
31 31 abort: cannot upgrade repository; unsupported source requirement: shared
32 32 [255]
33 33
34 34 Do not yet support upgrading manifestv2 and treemanifest repos
35 35
36 36 $ hg --config experimental.manifestv2=true init manifestv2
37 37 $ hg -R manifestv2 debugupgraderepo
38 38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
39 39 [255]
40 40
41 41 $ hg --config experimental.treemanifest=true init treemanifest
42 42 $ hg -R treemanifest debugupgraderepo
43 43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
44 44 [255]
45 45
46 46 Cannot add manifestv2 or treemanifest requirement during upgrade
47 47
48 48 $ hg init disallowaddedreq
49 49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
50 50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
51 51 [255]
52
53 An upgrade of a repository created with recommended settings only suggests optimizations
54
55 $ hg init empty
56 $ cd empty
57 $ hg debugupgraderepo
58 (no feature deficiencies found in existing repository)
59 performing an upgrade with "--run" will make the following changes:
60
61 requirements
62 preserved: dotencode, fncache, generaldelta, revlogv1, store
63
64 additional optimizations are available by specifying "--optimize <name>":
65
66 redeltaparent
67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
68
69 redeltamultibase
70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
71
72 redeltaall
73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
74
75
76 --optimize can be used to add optimizations
77
78 $ hg debugupgrade --optimize redeltaparent
79 (no feature deficiencies found in existing repository)
80 performing an upgrade with "--run" will make the following changes:
81
82 requirements
83 preserved: dotencode, fncache, generaldelta, revlogv1, store
84
85 redeltaparent
86 deltas within internal storage will choose a new base revision if needed
87
88 additional optimizations are available by specifying "--optimize <name>":
89
90 redeltamultibase
91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
92
93 redeltaall
94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
95
96
97 Various sub-optimal detections work
98
99 $ cat > .hg/requires << EOF
100 > revlogv1
101 > store
102 > EOF
103
104 $ hg debugupgraderepo
105 repository lacks features recommended by current config options:
106
107 fncache
108 long and reserved filenames may not work correctly; repository performance is sub-optimal
109
110 dotencode
111 storage of filenames beginning with a period or space may not work correctly
112
113 generaldelta
114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
115
116
117 performing an upgrade with "--run" will make the following changes:
118
119 requirements
120 preserved: revlogv1, store
121 added: dotencode, fncache, generaldelta
122
123 fncache
124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
125
126 dotencode
127 repository will be better able to store files beginning with a space or period
128
129 generaldelta
130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
131
132 additional optimizations are available by specifying "--optimize <name>":
133
134 redeltaparent
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
136
137 redeltamultibase
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
139
140 redeltaall
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
142
143
144 $ hg --config format.dotencode=false debugupgraderepo
145 repository lacks features recommended by current config options:
146
147 fncache
148 long and reserved filenames may not work correctly; repository performance is sub-optimal
149
150 generaldelta
151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
152
153 repository lacks features used by the default config options:
154
155 dotencode
156 storage of filenames beginning with a period or space may not work correctly
157
158
159 performing an upgrade with "--run" will make the following changes:
160
161 requirements
162 preserved: revlogv1, store
163 added: fncache, generaldelta
164
165 fncache
166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
167
168 generaldelta
169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
170
171 additional optimizations are available by specifying "--optimize <name>":
172
173 redeltaparent
174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
175
176 redeltamultibase
177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
178
179 redeltaall
180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
181
182
General Comments 0
You need to be logged in to leave comments. Login now