##// END OF EJS Templates
upgrade: properly filter action depending on planned work...
marmoute -
r45248:c36a3fcf stable
parent child Browse files
Show More
@@ -1,1400 +1,1398
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 changelog,
16 16 copies,
17 17 error,
18 18 filelog,
19 19 hg,
20 20 localrepo,
21 21 manifest,
22 22 pycompat,
23 23 revlog,
24 24 scmutil,
25 25 util,
26 26 vfs as vfsmod,
27 27 )
28 28
29 29 from .utils import compression
30 30
31 31 # list of requirements that request a clone of all revlog if added/removed
32 32 RECLONES_REQUIREMENTS = {
33 33 b'generaldelta',
34 34 localrepo.SPARSEREVLOG_REQUIREMENT,
35 35 }
36 36
37 37
38 38 def requiredsourcerequirements(repo):
39 39 """Obtain requirements required to be present to upgrade a repo.
40 40
41 41 An upgrade will not be allowed if the repository doesn't have the
42 42 requirements returned by this function.
43 43 """
44 44 return {
45 45 # Introduced in Mercurial 0.9.2.
46 46 b'revlogv1',
47 47 # Introduced in Mercurial 0.9.2.
48 48 b'store',
49 49 }
50 50
51 51
52 52 def blocksourcerequirements(repo):
53 53 """Obtain requirements that will prevent an upgrade from occurring.
54 54
55 55 An upgrade cannot be performed if the source repository contains a
56 56 requirements in the returned set.
57 57 """
58 58 return {
59 59 # The upgrade code does not yet support these experimental features.
60 60 # This is an artificial limitation.
61 61 b'treemanifest',
62 62 # This was a precursor to generaldelta and was never enabled by default.
63 63 # It should (hopefully) not exist in the wild.
64 64 b'parentdelta',
65 65 # Upgrade should operate on the actual store, not the shared link.
66 66 b'shared',
67 67 }
68 68
69 69
70 70 def supportremovedrequirements(repo):
71 71 """Obtain requirements that can be removed during an upgrade.
72 72
73 73 If an upgrade were to create a repository that dropped a requirement,
74 74 the dropped requirement must appear in the returned set for the upgrade
75 75 to be allowed.
76 76 """
77 77 supported = {
78 78 localrepo.SPARSEREVLOG_REQUIREMENT,
79 79 localrepo.SIDEDATA_REQUIREMENT,
80 80 localrepo.COPIESSDC_REQUIREMENT,
81 81 }
82 82 for name in compression.compengines:
83 83 engine = compression.compengines[name]
84 84 if engine.available() and engine.revlogheader():
85 85 supported.add(b'exp-compression-%s' % name)
86 86 if engine.name() == b'zstd':
87 87 supported.add(b'revlog-compression-zstd')
88 88 return supported
89 89
90 90
91 91 def supporteddestrequirements(repo):
92 92 """Obtain requirements that upgrade supports in the destination.
93 93
94 94 If the result of the upgrade would create requirements not in this set,
95 95 the upgrade is disallowed.
96 96
97 97 Extensions should monkeypatch this to add their custom requirements.
98 98 """
99 99 supported = {
100 100 b'dotencode',
101 101 b'fncache',
102 102 b'generaldelta',
103 103 b'revlogv1',
104 104 b'store',
105 105 localrepo.SPARSEREVLOG_REQUIREMENT,
106 106 localrepo.SIDEDATA_REQUIREMENT,
107 107 localrepo.COPIESSDC_REQUIREMENT,
108 108 }
109 109 for name in compression.compengines:
110 110 engine = compression.compengines[name]
111 111 if engine.available() and engine.revlogheader():
112 112 supported.add(b'exp-compression-%s' % name)
113 113 if engine.name() == b'zstd':
114 114 supported.add(b'revlog-compression-zstd')
115 115 return supported
116 116
117 117
118 118 def allowednewrequirements(repo):
119 119 """Obtain requirements that can be added to a repository during upgrade.
120 120
121 121 This is used to disallow proposed requirements from being added when
122 122 they weren't present before.
123 123
124 124 We use a list of allowed requirement additions instead of a list of known
125 125 bad additions because the whitelist approach is safer and will prevent
126 126 future, unknown requirements from accidentally being added.
127 127 """
128 128 supported = {
129 129 b'dotencode',
130 130 b'fncache',
131 131 b'generaldelta',
132 132 localrepo.SPARSEREVLOG_REQUIREMENT,
133 133 localrepo.SIDEDATA_REQUIREMENT,
134 134 localrepo.COPIESSDC_REQUIREMENT,
135 135 }
136 136 for name in compression.compengines:
137 137 engine = compression.compengines[name]
138 138 if engine.available() and engine.revlogheader():
139 139 supported.add(b'exp-compression-%s' % name)
140 140 if engine.name() == b'zstd':
141 141 supported.add(b'revlog-compression-zstd')
142 142 return supported
143 143
144 144
145 145 def preservedrequirements(repo):
146 146 return set()
147 147
148 148
149 149 deficiency = b'deficiency'
150 150 optimisation = b'optimization'
151 151
152 152
153 153 class improvement(object):
154 154 """Represents an improvement that can be made as part of an upgrade.
155 155
156 156 The following attributes are defined on each instance:
157 157
158 158 name
159 159 Machine-readable string uniquely identifying this improvement. It
160 160 will be mapped to an action later in the upgrade process.
161 161
162 162 type
163 163 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
164 164 problem. An optimization is an action (sometimes optional) that
165 165 can be taken to further improve the state of the repository.
166 166
167 167 description
168 168 Message intended for humans explaining the improvement in more detail,
169 169 including the implications of it. For ``deficiency`` types, should be
170 170 worded in the present tense. For ``optimisation`` types, should be
171 171 worded in the future tense.
172 172
173 173 upgrademessage
174 174 Message intended for humans explaining what an upgrade addressing this
175 175 issue will do. Should be worded in the future tense.
176 176 """
177 177
178 178 def __init__(self, name, type, description, upgrademessage):
179 179 self.name = name
180 180 self.type = type
181 181 self.description = description
182 182 self.upgrademessage = upgrademessage
183 183
184 184 def __eq__(self, other):
185 185 if not isinstance(other, improvement):
186 186 # This is what python tell use to do
187 187 return NotImplemented
188 188 return self.name == other.name
189 189
190 190 def __ne__(self, other):
191 191 return not (self == other)
192 192
193 193 def __hash__(self):
194 194 return hash(self.name)
195 195
196 196
197 197 allformatvariant = []
198 198
199 199
200 200 def registerformatvariant(cls):
201 201 allformatvariant.append(cls)
202 202 return cls
203 203
204 204
205 205 class formatvariant(improvement):
206 206 """an improvement subclass dedicated to repository format"""
207 207
208 208 type = deficiency
209 209 ### The following attributes should be defined for each class:
210 210
211 211 # machine-readable string uniquely identifying this improvement. it will be
212 212 # mapped to an action later in the upgrade process.
213 213 name = None
214 214
215 215 # message intended for humans explaining the improvement in more detail,
216 216 # including the implications of it ``deficiency`` types, should be worded
217 217 # in the present tense.
218 218 description = None
219 219
220 220 # message intended for humans explaining what an upgrade addressing this
221 221 # issue will do. should be worded in the future tense.
222 222 upgrademessage = None
223 223
224 224 # value of current Mercurial default for new repository
225 225 default = None
226 226
227 227 def __init__(self):
228 228 raise NotImplementedError()
229 229
230 230 @staticmethod
231 231 def fromrepo(repo):
232 232 """current value of the variant in the repository"""
233 233 raise NotImplementedError()
234 234
235 235 @staticmethod
236 236 def fromconfig(repo):
237 237 """current value of the variant in the configuration"""
238 238 raise NotImplementedError()
239 239
240 240
241 241 class requirementformatvariant(formatvariant):
242 242 """formatvariant based on a 'requirement' name.
243 243
244 244 Many format variant are controlled by a 'requirement'. We define a small
245 245 subclass to factor the code.
246 246 """
247 247
248 248 # the requirement that control this format variant
249 249 _requirement = None
250 250
251 251 @staticmethod
252 252 def _newreporequirements(ui):
253 253 return localrepo.newreporequirements(
254 254 ui, localrepo.defaultcreateopts(ui)
255 255 )
256 256
257 257 @classmethod
258 258 def fromrepo(cls, repo):
259 259 assert cls._requirement is not None
260 260 return cls._requirement in repo.requirements
261 261
262 262 @classmethod
263 263 def fromconfig(cls, repo):
264 264 assert cls._requirement is not None
265 265 return cls._requirement in cls._newreporequirements(repo.ui)
266 266
267 267
268 268 @registerformatvariant
269 269 class fncache(requirementformatvariant):
270 270 name = b'fncache'
271 271
272 272 _requirement = b'fncache'
273 273
274 274 default = True
275 275
276 276 description = _(
277 277 b'long and reserved filenames may not work correctly; '
278 278 b'repository performance is sub-optimal'
279 279 )
280 280
281 281 upgrademessage = _(
282 282 b'repository will be more resilient to storing '
283 283 b'certain paths and performance of certain '
284 284 b'operations should be improved'
285 285 )
286 286
287 287
288 288 @registerformatvariant
289 289 class dotencode(requirementformatvariant):
290 290 name = b'dotencode'
291 291
292 292 _requirement = b'dotencode'
293 293
294 294 default = True
295 295
296 296 description = _(
297 297 b'storage of filenames beginning with a period or '
298 298 b'space may not work correctly'
299 299 )
300 300
301 301 upgrademessage = _(
302 302 b'repository will be better able to store files '
303 303 b'beginning with a space or period'
304 304 )
305 305
306 306
307 307 @registerformatvariant
308 308 class generaldelta(requirementformatvariant):
309 309 name = b'generaldelta'
310 310
311 311 _requirement = b'generaldelta'
312 312
313 313 default = True
314 314
315 315 description = _(
316 316 b'deltas within internal storage are unable to '
317 317 b'choose optimal revisions; repository is larger and '
318 318 b'slower than it could be; interaction with other '
319 319 b'repositories may require extra network and CPU '
320 320 b'resources, making "hg push" and "hg pull" slower'
321 321 )
322 322
323 323 upgrademessage = _(
324 324 b'repository storage will be able to create '
325 325 b'optimal deltas; new repository data will be '
326 326 b'smaller and read times should decrease; '
327 327 b'interacting with other repositories using this '
328 328 b'storage model should require less network and '
329 329 b'CPU resources, making "hg push" and "hg pull" '
330 330 b'faster'
331 331 )
332 332
333 333
334 334 @registerformatvariant
335 335 class sparserevlog(requirementformatvariant):
336 336 name = b'sparserevlog'
337 337
338 338 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
339 339
340 340 default = True
341 341
342 342 description = _(
343 343 b'in order to limit disk reading and memory usage on older '
344 344 b'version, the span of a delta chain from its root to its '
345 345 b'end is limited, whatever the relevant data in this span. '
346 346 b'This can severly limit Mercurial ability to build good '
347 347 b'chain of delta resulting is much more storage space being '
348 348 b'taken and limit reusability of on disk delta during '
349 349 b'exchange.'
350 350 )
351 351
352 352 upgrademessage = _(
353 353 b'Revlog supports delta chain with more unused data '
354 354 b'between payload. These gaps will be skipped at read '
355 355 b'time. This allows for better delta chains, making a '
356 356 b'better compression and faster exchange with server.'
357 357 )
358 358
359 359
360 360 @registerformatvariant
361 361 class sidedata(requirementformatvariant):
362 362 name = b'sidedata'
363 363
364 364 _requirement = localrepo.SIDEDATA_REQUIREMENT
365 365
366 366 default = False
367 367
368 368 description = _(
369 369 b'Allows storage of extra data alongside a revision, '
370 370 b'unlocking various caching options.'
371 371 )
372 372
373 373 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
374 374
375 375
376 376 @registerformatvariant
377 377 class copiessdc(requirementformatvariant):
378 378 name = b'copies-sdc'
379 379
380 380 _requirement = localrepo.COPIESSDC_REQUIREMENT
381 381
382 382 default = False
383 383
384 384 description = _(b'Stores copies information alongside changesets.')
385 385
386 386 upgrademessage = _(
387 387 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
388 388 )
389 389
390 390
391 391 @registerformatvariant
392 392 class removecldeltachain(formatvariant):
393 393 name = b'plain-cl-delta'
394 394
395 395 default = True
396 396
397 397 description = _(
398 398 b'changelog storage is using deltas instead of '
399 399 b'raw entries; changelog reading and any '
400 400 b'operation relying on changelog data are slower '
401 401 b'than they could be'
402 402 )
403 403
404 404 upgrademessage = _(
405 405 b'changelog storage will be reformated to '
406 406 b'store raw entries; changelog reading will be '
407 407 b'faster; changelog size may be reduced'
408 408 )
409 409
410 410 @staticmethod
411 411 def fromrepo(repo):
412 412 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
413 413 # changelogs with deltas.
414 414 cl = repo.changelog
415 415 chainbase = cl.chainbase
416 416 return all(rev == chainbase(rev) for rev in cl)
417 417
418 418 @staticmethod
419 419 def fromconfig(repo):
420 420 return True
421 421
422 422
423 423 @registerformatvariant
424 424 class compressionengine(formatvariant):
425 425 name = b'compression'
426 426 default = b'zlib'
427 427
428 428 description = _(
429 429 b'Compresion algorithm used to compress data. '
430 430 b'Some engine are faster than other'
431 431 )
432 432
433 433 upgrademessage = _(
434 434 b'revlog content will be recompressed with the new algorithm.'
435 435 )
436 436
437 437 @classmethod
438 438 def fromrepo(cls, repo):
439 439 # we allow multiple compression engine requirement to co-exist because
440 440 # strickly speaking, revlog seems to support mixed compression style.
441 441 #
442 442 # The compression used for new entries will be "the last one"
443 443 compression = b'zlib'
444 444 for req in repo.requirements:
445 445 prefix = req.startswith
446 446 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
447 447 compression = req.split(b'-', 2)[2]
448 448 return compression
449 449
450 450 @classmethod
451 451 def fromconfig(cls, repo):
452 452 compengines = repo.ui.configlist(b'format', b'revlog-compression')
453 453 # return the first valid value as the selection code would do
454 454 for comp in compengines:
455 455 if comp in util.compengines:
456 456 return comp
457 457
458 458 # no valide compression found lets display it all for clarity
459 459 return b','.join(compengines)
460 460
461 461
462 462 @registerformatvariant
463 463 class compressionlevel(formatvariant):
464 464 name = b'compression-level'
465 465 default = b'default'
466 466
467 467 description = _(b'compression level')
468 468
469 469 upgrademessage = _(b'revlog content will be recompressed')
470 470
471 471 @classmethod
472 472 def fromrepo(cls, repo):
473 473 comp = compressionengine.fromrepo(repo)
474 474 level = None
475 475 if comp == b'zlib':
476 476 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
477 477 elif comp == b'zstd':
478 478 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
479 479 if level is None:
480 480 return b'default'
481 481 return bytes(level)
482 482
483 483 @classmethod
484 484 def fromconfig(cls, repo):
485 485 comp = compressionengine.fromconfig(repo)
486 486 level = None
487 487 if comp == b'zlib':
488 488 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
489 489 elif comp == b'zstd':
490 490 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
491 491 if level is None:
492 492 return b'default'
493 493 return bytes(level)
494 494
495 495
496 496 def finddeficiencies(repo):
497 497 """returns a list of deficiencies that the repo suffer from"""
498 498 deficiencies = []
499 499
500 500 # We could detect lack of revlogv1 and store here, but they were added
501 501 # in 0.9.2 and we don't support upgrading repos without these
502 502 # requirements, so let's not bother.
503 503
504 504 for fv in allformatvariant:
505 505 if not fv.fromrepo(repo):
506 506 deficiencies.append(fv)
507 507
508 508 return deficiencies
509 509
510 510
511 511 # search without '-' to support older form on newer client.
512 512 #
513 513 # We don't enforce backward compatibility for debug command so this
514 514 # might eventually be dropped. However, having to use two different
515 515 # forms in script when comparing result is anoying enough to add
516 516 # backward compatibility for a while.
517 517 legacy_opts_map = {
518 518 b'redeltaparent': b're-delta-parent',
519 519 b'redeltamultibase': b're-delta-multibase',
520 520 b'redeltaall': b're-delta-all',
521 521 b'redeltafulladd': b're-delta-fulladd',
522 522 }
523 523
524 524
525 525 def findoptimizations(repo):
526 526 """Determine optimisation that could be used during upgrade"""
527 527 # These are unconditionally added. There is logic later that figures out
528 528 # which ones to apply.
529 529 optimizations = []
530 530
531 531 optimizations.append(
532 532 improvement(
533 533 name=b're-delta-parent',
534 534 type=optimisation,
535 535 description=_(
536 536 b'deltas within internal storage will be recalculated to '
537 537 b'choose an optimal base revision where this was not '
538 538 b'already done; the size of the repository may shrink and '
539 539 b'various operations may become faster; the first time '
540 540 b'this optimization is performed could slow down upgrade '
541 541 b'execution considerably; subsequent invocations should '
542 542 b'not run noticeably slower'
543 543 ),
544 544 upgrademessage=_(
545 545 b'deltas within internal storage will choose a new '
546 546 b'base revision if needed'
547 547 ),
548 548 )
549 549 )
550 550
551 551 optimizations.append(
552 552 improvement(
553 553 name=b're-delta-multibase',
554 554 type=optimisation,
555 555 description=_(
556 556 b'deltas within internal storage will be recalculated '
557 557 b'against multiple base revision and the smallest '
558 558 b'difference will be used; the size of the repository may '
559 559 b'shrink significantly when there are many merges; this '
560 560 b'optimization will slow down execution in proportion to '
561 561 b'the number of merges in the repository and the amount '
562 562 b'of files in the repository; this slow down should not '
563 563 b'be significant unless there are tens of thousands of '
564 564 b'files and thousands of merges'
565 565 ),
566 566 upgrademessage=_(
567 567 b'deltas within internal storage will choose an '
568 568 b'optimal delta by computing deltas against multiple '
569 569 b'parents; may slow down execution time '
570 570 b'significantly'
571 571 ),
572 572 )
573 573 )
574 574
575 575 optimizations.append(
576 576 improvement(
577 577 name=b're-delta-all',
578 578 type=optimisation,
579 579 description=_(
580 580 b'deltas within internal storage will always be '
581 581 b'recalculated without reusing prior deltas; this will '
582 582 b'likely make execution run several times slower; this '
583 583 b'optimization is typically not needed'
584 584 ),
585 585 upgrademessage=_(
586 586 b'deltas within internal storage will be fully '
587 587 b'recomputed; this will likely drastically slow down '
588 588 b'execution time'
589 589 ),
590 590 )
591 591 )
592 592
593 593 optimizations.append(
594 594 improvement(
595 595 name=b're-delta-fulladd',
596 596 type=optimisation,
597 597 description=_(
598 598 b'every revision will be re-added as if it was new '
599 599 b'content. It will go through the full storage '
600 600 b'mechanism giving extensions a chance to process it '
601 601 b'(eg. lfs). This is similar to "re-delta-all" but even '
602 602 b'slower since more logic is involved.'
603 603 ),
604 604 upgrademessage=_(
605 605 b'each revision will be added as new content to the '
606 606 b'internal storage; this will likely drastically slow '
607 607 b'down execution time, but some extensions might need '
608 608 b'it'
609 609 ),
610 610 )
611 611 )
612 612
613 613 return optimizations
614 614
615 615
616 616 def determineactions(repo, deficiencies, sourcereqs, destreqs):
617 617 """Determine upgrade actions that will be performed.
618 618
619 619 Given a list of improvements as returned by ``finddeficiencies`` and
620 620 ``findoptimizations``, determine the list of upgrade actions that
621 621 will be performed.
622 622
623 623 The role of this function is to filter improvements if needed, apply
624 624 recommended optimizations from the improvements list that make sense,
625 625 etc.
626 626
627 627 Returns a list of action names.
628 628 """
629 629 newactions = []
630 630
631 knownreqs = supporteddestrequirements(repo)
632
633 631 for d in deficiencies:
634 name = d.name
632 name = d._requirement
635 633
636 634 # If the action is a requirement that doesn't show up in the
637 635 # destination requirements, prune the action.
638 if name in knownreqs and name not in destreqs:
636 if name is not None and name not in destreqs:
639 637 continue
640 638
641 639 newactions.append(d)
642 640
643 641 # FUTURE consider adding some optimizations here for certain transitions.
644 642 # e.g. adding generaldelta could schedule parent redeltas.
645 643
646 644 return newactions
647 645
648 646
649 647 def _revlogfrompath(repo, path):
650 648 """Obtain a revlog from a repo path.
651 649
652 650 An instance of the appropriate class is returned.
653 651 """
654 652 if path == b'00changelog.i':
655 653 return changelog.changelog(repo.svfs)
656 654 elif path.endswith(b'00manifest.i'):
657 655 mandir = path[: -len(b'00manifest.i')]
658 656 return manifest.manifestrevlog(repo.svfs, tree=mandir)
659 657 else:
660 658 # reverse of "/".join(("data", path + ".i"))
661 659 return filelog.filelog(repo.svfs, path[5:-2])
662 660
663 661
664 662 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
665 663 """copy all relevant files for `oldrl` into `destrepo` store
666 664
667 665 Files are copied "as is" without any transformation. The copy is performed
668 666 without extra checks. Callers are responsible for making sure the copied
669 667 content is compatible with format of the destination repository.
670 668 """
671 669 oldrl = getattr(oldrl, '_revlog', oldrl)
672 670 newrl = _revlogfrompath(destrepo, unencodedname)
673 671 newrl = getattr(newrl, '_revlog', newrl)
674 672
675 673 oldvfs = oldrl.opener
676 674 newvfs = newrl.opener
677 675 oldindex = oldvfs.join(oldrl.indexfile)
678 676 newindex = newvfs.join(newrl.indexfile)
679 677 olddata = oldvfs.join(oldrl.datafile)
680 678 newdata = newvfs.join(newrl.datafile)
681 679
682 680 with newvfs(newrl.indexfile, b'w'):
683 681 pass # create all the directories
684 682
685 683 util.copyfile(oldindex, newindex)
686 684 copydata = oldrl.opener.exists(oldrl.datafile)
687 685 if copydata:
688 686 util.copyfile(olddata, newdata)
689 687
690 688 if not (
691 689 unencodedname.endswith(b'00changelog.i')
692 690 or unencodedname.endswith(b'00manifest.i')
693 691 ):
694 692 destrepo.svfs.fncache.add(unencodedname)
695 693 if copydata:
696 694 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
697 695
698 696
699 697 UPGRADE_CHANGELOG = object()
700 698 UPGRADE_MANIFEST = object()
701 699 UPGRADE_FILELOG = object()
702 700
703 701 UPGRADE_ALL_REVLOGS = frozenset(
704 702 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
705 703 )
706 704
707 705
708 706 def getsidedatacompanion(srcrepo, dstrepo):
709 707 sidedatacompanion = None
710 708 removedreqs = srcrepo.requirements - dstrepo.requirements
711 709 addedreqs = dstrepo.requirements - srcrepo.requirements
712 710 if localrepo.SIDEDATA_REQUIREMENT in removedreqs:
713 711
714 712 def sidedatacompanion(rl, rev):
715 713 rl = getattr(rl, '_revlog', rl)
716 714 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
717 715 return True, (), {}
718 716 return False, (), {}
719 717
720 718 elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
721 719 sidedatacompanion = copies.getsidedataadder(srcrepo, dstrepo)
722 720 elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
723 721 sidedatacompanion = copies.getsidedataremover(srcrepo, dstrepo)
724 722 return sidedatacompanion
725 723
726 724
727 725 def matchrevlog(revlogfilter, entry):
728 726 """check is a revlog is selected for cloning
729 727
730 728 The store entry is checked against the passed filter"""
731 729 if entry.endswith(b'00changelog.i'):
732 730 return UPGRADE_CHANGELOG in revlogfilter
733 731 elif entry.endswith(b'00manifest.i'):
734 732 return UPGRADE_MANIFEST in revlogfilter
735 733 return UPGRADE_FILELOG in revlogfilter
736 734
737 735
738 736 def _clonerevlogs(
739 737 ui,
740 738 srcrepo,
741 739 dstrepo,
742 740 tr,
743 741 deltareuse,
744 742 forcedeltabothparents,
745 743 revlogs=UPGRADE_ALL_REVLOGS,
746 744 ):
747 745 """Copy revlogs between 2 repos."""
748 746 revcount = 0
749 747 srcsize = 0
750 748 srcrawsize = 0
751 749 dstsize = 0
752 750 fcount = 0
753 751 frevcount = 0
754 752 fsrcsize = 0
755 753 frawsize = 0
756 754 fdstsize = 0
757 755 mcount = 0
758 756 mrevcount = 0
759 757 msrcsize = 0
760 758 mrawsize = 0
761 759 mdstsize = 0
762 760 crevcount = 0
763 761 csrcsize = 0
764 762 crawsize = 0
765 763 cdstsize = 0
766 764
767 765 alldatafiles = list(srcrepo.store.walk())
768 766
769 767 # Perform a pass to collect metadata. This validates we can open all
770 768 # source files and allows a unified progress bar to be displayed.
771 769 for unencoded, encoded, size in alldatafiles:
772 770 if unencoded.endswith(b'.d'):
773 771 continue
774 772
775 773 rl = _revlogfrompath(srcrepo, unencoded)
776 774
777 775 info = rl.storageinfo(
778 776 exclusivefiles=True,
779 777 revisionscount=True,
780 778 trackedsize=True,
781 779 storedsize=True,
782 780 )
783 781
784 782 revcount += info[b'revisionscount'] or 0
785 783 datasize = info[b'storedsize'] or 0
786 784 rawsize = info[b'trackedsize'] or 0
787 785
788 786 srcsize += datasize
789 787 srcrawsize += rawsize
790 788
791 789 # This is for the separate progress bars.
792 790 if isinstance(rl, changelog.changelog):
793 791 crevcount += len(rl)
794 792 csrcsize += datasize
795 793 crawsize += rawsize
796 794 elif isinstance(rl, manifest.manifestrevlog):
797 795 mcount += 1
798 796 mrevcount += len(rl)
799 797 msrcsize += datasize
800 798 mrawsize += rawsize
801 799 elif isinstance(rl, filelog.filelog):
802 800 fcount += 1
803 801 frevcount += len(rl)
804 802 fsrcsize += datasize
805 803 frawsize += rawsize
806 804 else:
807 805 error.ProgrammingError(b'unknown revlog type')
808 806
809 807 if not revcount:
810 808 return
811 809
812 810 ui.write(
813 811 _(
814 812 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
815 813 b'%d in changelog)\n'
816 814 )
817 815 % (revcount, frevcount, mrevcount, crevcount)
818 816 )
819 817 ui.write(
820 818 _(b'migrating %s in store; %s tracked data\n')
821 819 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
822 820 )
823 821
824 822 # Used to keep track of progress.
825 823 progress = None
826 824
827 825 def oncopiedrevision(rl, rev, node):
828 826 progress.increment()
829 827
830 828 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
831 829
832 830 # Do the actual copying.
833 831 # FUTURE this operation can be farmed off to worker processes.
834 832 seen = set()
835 833 for unencoded, encoded, size in alldatafiles:
836 834 if unencoded.endswith(b'.d'):
837 835 continue
838 836
839 837 oldrl = _revlogfrompath(srcrepo, unencoded)
840 838
841 839 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
842 840 ui.write(
843 841 _(
844 842 b'finished migrating %d manifest revisions across %d '
845 843 b'manifests; change in size: %s\n'
846 844 )
847 845 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
848 846 )
849 847
850 848 ui.write(
851 849 _(
852 850 b'migrating changelog containing %d revisions '
853 851 b'(%s in store; %s tracked data)\n'
854 852 )
855 853 % (
856 854 crevcount,
857 855 util.bytecount(csrcsize),
858 856 util.bytecount(crawsize),
859 857 )
860 858 )
861 859 seen.add(b'c')
862 860 progress = srcrepo.ui.makeprogress(
863 861 _(b'changelog revisions'), total=crevcount
864 862 )
865 863 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
866 864 ui.write(
867 865 _(
868 866 b'finished migrating %d filelog revisions across %d '
869 867 b'filelogs; change in size: %s\n'
870 868 )
871 869 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
872 870 )
873 871
874 872 ui.write(
875 873 _(
876 874 b'migrating %d manifests containing %d revisions '
877 875 b'(%s in store; %s tracked data)\n'
878 876 )
879 877 % (
880 878 mcount,
881 879 mrevcount,
882 880 util.bytecount(msrcsize),
883 881 util.bytecount(mrawsize),
884 882 )
885 883 )
886 884 seen.add(b'm')
887 885 if progress:
888 886 progress.complete()
889 887 progress = srcrepo.ui.makeprogress(
890 888 _(b'manifest revisions'), total=mrevcount
891 889 )
892 890 elif b'f' not in seen:
893 891 ui.write(
894 892 _(
895 893 b'migrating %d filelogs containing %d revisions '
896 894 b'(%s in store; %s tracked data)\n'
897 895 )
898 896 % (
899 897 fcount,
900 898 frevcount,
901 899 util.bytecount(fsrcsize),
902 900 util.bytecount(frawsize),
903 901 )
904 902 )
905 903 seen.add(b'f')
906 904 if progress:
907 905 progress.complete()
908 906 progress = srcrepo.ui.makeprogress(
909 907 _(b'file revisions'), total=frevcount
910 908 )
911 909
912 910 if matchrevlog(revlogs, unencoded):
913 911 ui.note(
914 912 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
915 913 )
916 914 newrl = _revlogfrompath(dstrepo, unencoded)
917 915 oldrl.clone(
918 916 tr,
919 917 newrl,
920 918 addrevisioncb=oncopiedrevision,
921 919 deltareuse=deltareuse,
922 920 forcedeltabothparents=forcedeltabothparents,
923 921 sidedatacompanion=sidedatacompanion,
924 922 )
925 923 else:
926 924 msg = _(b'blindly copying %s containing %i revisions\n')
927 925 ui.note(msg % (unencoded, len(oldrl)))
928 926 _copyrevlog(tr, dstrepo, oldrl, unencoded)
929 927
930 928 newrl = _revlogfrompath(dstrepo, unencoded)
931 929
932 930 info = newrl.storageinfo(storedsize=True)
933 931 datasize = info[b'storedsize'] or 0
934 932
935 933 dstsize += datasize
936 934
937 935 if isinstance(newrl, changelog.changelog):
938 936 cdstsize += datasize
939 937 elif isinstance(newrl, manifest.manifestrevlog):
940 938 mdstsize += datasize
941 939 else:
942 940 fdstsize += datasize
943 941
944 942 progress.complete()
945 943
946 944 ui.write(
947 945 _(
948 946 b'finished migrating %d changelog revisions; change in size: '
949 947 b'%s\n'
950 948 )
951 949 % (crevcount, util.bytecount(cdstsize - csrcsize))
952 950 )
953 951
954 952 ui.write(
955 953 _(
956 954 b'finished migrating %d total revisions; total change in store '
957 955 b'size: %s\n'
958 956 )
959 957 % (revcount, util.bytecount(dstsize - srcsize))
960 958 )
961 959
962 960
963 961 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
964 962 """Determine whether to copy a store file during upgrade.
965 963
966 964 This function is called when migrating store files from ``srcrepo`` to
967 965 ``dstrepo`` as part of upgrading a repository.
968 966
969 967 Args:
970 968 srcrepo: repo we are copying from
971 969 dstrepo: repo we are copying to
972 970 requirements: set of requirements for ``dstrepo``
973 971 path: store file being examined
974 972 mode: the ``ST_MODE`` file type of ``path``
975 973 st: ``stat`` data structure for ``path``
976 974
977 975 Function should return ``True`` if the file is to be copied.
978 976 """
979 977 # Skip revlogs.
980 978 if path.endswith((b'.i', b'.d')):
981 979 return False
982 980 # Skip transaction related files.
983 981 if path.startswith(b'undo'):
984 982 return False
985 983 # Only copy regular files.
986 984 if mode != stat.S_IFREG:
987 985 return False
988 986 # Skip other skipped files.
989 987 if path in (b'lock', b'fncache'):
990 988 return False
991 989
992 990 return True
993 991
994 992
995 993 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
996 994 """Hook point for extensions to perform additional actions during upgrade.
997 995
998 996 This function is called after revlogs and store files have been copied but
999 997 before the new store is swapped into the original location.
1000 998 """
1001 999
1002 1000
1003 1001 def _upgraderepo(
1004 1002 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1005 1003 ):
1006 1004 """Do the low-level work of upgrading a repository.
1007 1005
1008 1006 The upgrade is effectively performed as a copy between a source
1009 1007 repository and a temporary destination repository.
1010 1008
1011 1009 The source repository is unmodified for as long as possible so the
1012 1010 upgrade can abort at any time without causing loss of service for
1013 1011 readers and without corrupting the source repository.
1014 1012 """
1015 1013 assert srcrepo.currentwlock()
1016 1014 assert dstrepo.currentwlock()
1017 1015
1018 1016 ui.write(
1019 1017 _(
1020 1018 b'(it is safe to interrupt this process any time before '
1021 1019 b'data migration completes)\n'
1022 1020 )
1023 1021 )
1024 1022
1025 1023 if b're-delta-all' in actions:
1026 1024 deltareuse = revlog.revlog.DELTAREUSENEVER
1027 1025 elif b're-delta-parent' in actions:
1028 1026 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1029 1027 elif b're-delta-multibase' in actions:
1030 1028 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1031 1029 elif b're-delta-fulladd' in actions:
1032 1030 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1033 1031 else:
1034 1032 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1035 1033
1036 1034 with dstrepo.transaction(b'upgrade') as tr:
1037 1035 _clonerevlogs(
1038 1036 ui,
1039 1037 srcrepo,
1040 1038 dstrepo,
1041 1039 tr,
1042 1040 deltareuse,
1043 1041 b're-delta-multibase' in actions,
1044 1042 revlogs=revlogs,
1045 1043 )
1046 1044
1047 1045 # Now copy other files in the store directory.
1048 1046 # The sorted() makes execution deterministic.
1049 1047 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1050 1048 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1051 1049 continue
1052 1050
1053 1051 srcrepo.ui.write(_(b'copying %s\n') % p)
1054 1052 src = srcrepo.store.rawvfs.join(p)
1055 1053 dst = dstrepo.store.rawvfs.join(p)
1056 1054 util.copyfile(src, dst, copystat=True)
1057 1055
1058 1056 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1059 1057
1060 1058 ui.write(_(b'data fully migrated to temporary repository\n'))
1061 1059
1062 1060 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1063 1061 backupvfs = vfsmod.vfs(backuppath)
1064 1062
1065 1063 # Make a backup of requires file first, as it is the first to be modified.
1066 1064 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1067 1065
1068 1066 # We install an arbitrary requirement that clients must not support
1069 1067 # as a mechanism to lock out new clients during the data swap. This is
1070 1068 # better than allowing a client to continue while the repository is in
1071 1069 # an inconsistent state.
1072 1070 ui.write(
1073 1071 _(
1074 1072 b'marking source repository as being upgraded; clients will be '
1075 1073 b'unable to read from repository\n'
1076 1074 )
1077 1075 )
1078 1076 scmutil.writerequires(
1079 1077 srcrepo.vfs, srcrepo.requirements | {b'upgradeinprogress'}
1080 1078 )
1081 1079
1082 1080 ui.write(_(b'starting in-place swap of repository data\n'))
1083 1081 ui.write(_(b'replaced files will be backed up at %s\n') % backuppath)
1084 1082
1085 1083 # Now swap in the new store directory. Doing it as a rename should make
1086 1084 # the operation nearly instantaneous and atomic (at least in well-behaved
1087 1085 # environments).
1088 1086 ui.write(_(b'replacing store...\n'))
1089 1087 tstart = util.timer()
1090 1088 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1091 1089 util.rename(dstrepo.spath, srcrepo.spath)
1092 1090 elapsed = util.timer() - tstart
1093 1091 ui.write(
1094 1092 _(
1095 1093 b'store replacement complete; repository was inconsistent for '
1096 1094 b'%0.1fs\n'
1097 1095 )
1098 1096 % elapsed
1099 1097 )
1100 1098
1101 1099 # We first write the requirements file. Any new requirements will lock
1102 1100 # out legacy clients.
1103 1101 ui.write(
1104 1102 _(
1105 1103 b'finalizing requirements file and making repository readable '
1106 1104 b'again\n'
1107 1105 )
1108 1106 )
1109 1107 scmutil.writerequires(srcrepo.vfs, requirements)
1110 1108
1111 1109 # The lock file from the old store won't be removed because nothing has a
1112 1110 # reference to its new location. So clean it up manually. Alternatively, we
1113 1111 # could update srcrepo.svfs and other variables to point to the new
1114 1112 # location. This is simpler.
1115 1113 backupvfs.unlink(b'store/lock')
1116 1114
1117 1115 return backuppath
1118 1116
1119 1117
1120 1118 def upgraderepo(
1121 1119 ui,
1122 1120 repo,
1123 1121 run=False,
1124 1122 optimize=None,
1125 1123 backup=True,
1126 1124 manifest=None,
1127 1125 changelog=None,
1128 1126 ):
1129 1127 """Upgrade a repository in place."""
1130 1128 if optimize is None:
1131 1129 optimize = []
1132 1130 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1133 1131 repo = repo.unfiltered()
1134 1132
1135 1133 revlogs = set(UPGRADE_ALL_REVLOGS)
1136 1134 specentries = ((b'c', changelog), (b'm', manifest))
1137 1135 specified = [(y, x) for (y, x) in specentries if x is not None]
1138 1136 if specified:
1139 1137 # we have some limitation on revlogs to be recloned
1140 1138 if any(x for y, x in specified):
1141 1139 revlogs = set()
1142 1140 for r, enabled in specified:
1143 1141 if enabled:
1144 1142 if r == b'c':
1145 1143 revlogs.add(UPGRADE_CHANGELOG)
1146 1144 elif r == b'm':
1147 1145 revlogs.add(UPGRADE_MANIFEST)
1148 1146 else:
1149 1147 # none are enabled
1150 1148 for r, __ in specified:
1151 1149 if r == b'c':
1152 1150 revlogs.discard(UPGRADE_CHANGELOG)
1153 1151 elif r == b'm':
1154 1152 revlogs.discard(UPGRADE_MANIFEST)
1155 1153
1156 1154 # Ensure the repository can be upgraded.
1157 1155 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1158 1156 if missingreqs:
1159 1157 raise error.Abort(
1160 1158 _(b'cannot upgrade repository; requirement missing: %s')
1161 1159 % _(b', ').join(sorted(missingreqs))
1162 1160 )
1163 1161
1164 1162 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1165 1163 if blockedreqs:
1166 1164 raise error.Abort(
1167 1165 _(
1168 1166 b'cannot upgrade repository; unsupported source '
1169 1167 b'requirement: %s'
1170 1168 )
1171 1169 % _(b', ').join(sorted(blockedreqs))
1172 1170 )
1173 1171
1174 1172 # FUTURE there is potentially a need to control the wanted requirements via
1175 1173 # command arguments or via an extension hook point.
1176 1174 newreqs = localrepo.newreporequirements(
1177 1175 repo.ui, localrepo.defaultcreateopts(repo.ui)
1178 1176 )
1179 1177 newreqs.update(preservedrequirements(repo))
1180 1178
1181 1179 noremovereqs = (
1182 1180 repo.requirements - newreqs - supportremovedrequirements(repo)
1183 1181 )
1184 1182 if noremovereqs:
1185 1183 raise error.Abort(
1186 1184 _(
1187 1185 b'cannot upgrade repository; requirement would be '
1188 1186 b'removed: %s'
1189 1187 )
1190 1188 % _(b', ').join(sorted(noremovereqs))
1191 1189 )
1192 1190
1193 1191 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1194 1192 if noaddreqs:
1195 1193 raise error.Abort(
1196 1194 _(
1197 1195 b'cannot upgrade repository; do not support adding '
1198 1196 b'requirement: %s'
1199 1197 )
1200 1198 % _(b', ').join(sorted(noaddreqs))
1201 1199 )
1202 1200
1203 1201 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1204 1202 if unsupportedreqs:
1205 1203 raise error.Abort(
1206 1204 _(
1207 1205 b'cannot upgrade repository; do not support '
1208 1206 b'destination requirement: %s'
1209 1207 )
1210 1208 % _(b', ').join(sorted(unsupportedreqs))
1211 1209 )
1212 1210
1213 1211 # Find and validate all improvements that can be made.
1214 1212 alloptimizations = findoptimizations(repo)
1215 1213
1216 1214 # Apply and Validate arguments.
1217 1215 optimizations = []
1218 1216 for o in alloptimizations:
1219 1217 if o.name in optimize:
1220 1218 optimizations.append(o)
1221 1219 optimize.discard(o.name)
1222 1220
1223 1221 if optimize: # anything left is unknown
1224 1222 raise error.Abort(
1225 1223 _(b'unknown optimization action requested: %s')
1226 1224 % b', '.join(sorted(optimize)),
1227 1225 hint=_(b'run without arguments to see valid optimizations'),
1228 1226 )
1229 1227
1230 1228 deficiencies = finddeficiencies(repo)
1231 1229 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1232 1230 actions.extend(
1233 1231 o
1234 1232 for o in sorted(optimizations)
1235 1233 # determineactions could have added optimisation
1236 1234 if o not in actions
1237 1235 )
1238 1236
1239 1237 removedreqs = repo.requirements - newreqs
1240 1238 addedreqs = newreqs - repo.requirements
1241 1239
1242 1240 if revlogs != UPGRADE_ALL_REVLOGS:
1243 1241 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1244 1242 if incompatible:
1245 1243 msg = _(
1246 1244 b'ignoring revlogs selection flags, format requirements '
1247 1245 b'change: %s\n'
1248 1246 )
1249 1247 ui.warn(msg % b', '.join(sorted(incompatible)))
1250 1248 revlogs = UPGRADE_ALL_REVLOGS
1251 1249
1252 1250 def write_labeled(l, label):
1253 1251 first = True
1254 1252 for r in sorted(l):
1255 1253 if not first:
1256 1254 ui.write(b', ')
1257 1255 ui.write(r, label=label)
1258 1256 first = False
1259 1257
1260 1258 def printrequirements():
1261 1259 ui.write(_(b'requirements\n'))
1262 1260 ui.write(_(b' preserved: '))
1263 1261 write_labeled(
1264 1262 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1265 1263 )
1266 1264 ui.write((b'\n'))
1267 1265 removed = repo.requirements - newreqs
1268 1266 if repo.requirements - newreqs:
1269 1267 ui.write(_(b' removed: '))
1270 1268 write_labeled(removed, "upgrade-repo.requirement.removed")
1271 1269 ui.write((b'\n'))
1272 1270 added = newreqs - repo.requirements
1273 1271 if added:
1274 1272 ui.write(_(b' added: '))
1275 1273 write_labeled(added, "upgrade-repo.requirement.added")
1276 1274 ui.write((b'\n'))
1277 1275 ui.write(b'\n')
1278 1276
1279 1277 def printupgradeactions():
1280 1278 for a in actions:
1281 1279 ui.write(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1282 1280
1283 1281 if not run:
1284 1282 fromconfig = []
1285 1283 onlydefault = []
1286 1284
1287 1285 for d in deficiencies:
1288 1286 if d.fromconfig(repo):
1289 1287 fromconfig.append(d)
1290 1288 elif d.default:
1291 1289 onlydefault.append(d)
1292 1290
1293 1291 if fromconfig or onlydefault:
1294 1292
1295 1293 if fromconfig:
1296 1294 ui.write(
1297 1295 _(
1298 1296 b'repository lacks features recommended by '
1299 1297 b'current config options:\n\n'
1300 1298 )
1301 1299 )
1302 1300 for i in fromconfig:
1303 1301 ui.write(b'%s\n %s\n\n' % (i.name, i.description))
1304 1302
1305 1303 if onlydefault:
1306 1304 ui.write(
1307 1305 _(
1308 1306 b'repository lacks features used by the default '
1309 1307 b'config options:\n\n'
1310 1308 )
1311 1309 )
1312 1310 for i in onlydefault:
1313 1311 ui.write(b'%s\n %s\n\n' % (i.name, i.description))
1314 1312
1315 1313 ui.write(b'\n')
1316 1314 else:
1317 1315 ui.write(
1318 1316 _(
1319 1317 b'(no feature deficiencies found in existing '
1320 1318 b'repository)\n'
1321 1319 )
1322 1320 )
1323 1321
1324 1322 ui.write(
1325 1323 _(
1326 1324 b'performing an upgrade with "--run" will make the following '
1327 1325 b'changes:\n\n'
1328 1326 )
1329 1327 )
1330 1328
1331 1329 printrequirements()
1332 1330 printupgradeactions()
1333 1331
1334 1332 unusedoptimize = [i for i in alloptimizations if i not in actions]
1335 1333
1336 1334 if unusedoptimize:
1337 1335 ui.write(
1338 1336 _(
1339 1337 b'additional optimizations are available by specifying '
1340 1338 b'"--optimize <name>":\n\n'
1341 1339 )
1342 1340 )
1343 1341 for i in unusedoptimize:
1344 1342 ui.write(_(b'%s\n %s\n\n') % (i.name, i.description))
1345 1343 return
1346 1344
1347 1345 # Else we're in the run=true case.
1348 1346 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1349 1347 printrequirements()
1350 1348 printupgradeactions()
1351 1349
1352 1350 upgradeactions = [a.name for a in actions]
1353 1351
1354 1352 ui.write(_(b'beginning upgrade...\n'))
1355 1353 with repo.wlock(), repo.lock():
1356 1354 ui.write(_(b'repository locked and read-only\n'))
1357 1355 # Our strategy for upgrading the repository is to create a new,
1358 1356 # temporary repository, write data to it, then do a swap of the
1359 1357 # data. There are less heavyweight ways to do this, but it is easier
1360 1358 # to create a new repo object than to instantiate all the components
1361 1359 # (like the store) separately.
1362 1360 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1363 1361 backuppath = None
1364 1362 try:
1365 1363 ui.write(
1366 1364 _(
1367 1365 b'creating temporary repository to stage migrated '
1368 1366 b'data: %s\n'
1369 1367 )
1370 1368 % tmppath
1371 1369 )
1372 1370
1373 1371 # clone ui without using ui.copy because repo.ui is protected
1374 1372 repoui = repo.ui.__class__(repo.ui)
1375 1373 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1376 1374
1377 1375 with dstrepo.wlock(), dstrepo.lock():
1378 1376 backuppath = _upgraderepo(
1379 1377 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1380 1378 )
1381 1379 if not (backup or backuppath is None):
1382 1380 ui.write(_(b'removing old repository content%s\n') % backuppath)
1383 1381 repo.vfs.rmtree(backuppath, forcibly=True)
1384 1382 backuppath = None
1385 1383
1386 1384 finally:
1387 1385 ui.write(_(b'removing temporary repository %s\n') % tmppath)
1388 1386 repo.vfs.rmtree(tmppath, forcibly=True)
1389 1387
1390 1388 if backuppath:
1391 1389 ui.warn(
1392 1390 _(b'copy of old repository backed up at %s\n') % backuppath
1393 1391 )
1394 1392 ui.warn(
1395 1393 _(
1396 1394 b'the old repository will not be deleted; remove '
1397 1395 b'it to free up disk space once the upgraded '
1398 1396 b'repository is verified\n'
1399 1397 )
1400 1398 )
@@ -1,721 +1,715
1 1 #testcases lfsremote-on lfsremote-off
2 2 #require serve no-reposimplestore no-chg
3 3
4 4 This test splits `hg serve` with and without using the extension into separate
5 5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
6 6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
7 7 indicates whether or not the extension is loaded. The "X" cases are not tested
8 8 individually, because the lfs requirement causes the process to bail early if
9 9 the extension is disabled.
10 10
11 11 . Server
12 12 .
13 13 . No-LFS LFS
14 14 . +----------------------------+
15 15 . | || D | E | D | E |
16 16 . |---++=======================|
17 17 . C | D || N/A | #1 | X | #4 |
18 18 . l No +---++-----------------------|
19 19 . i LFS | E || #2 | #2 | X | #5 |
20 20 . e +---++-----------------------|
21 21 . n | D || X | X | X | X |
22 22 . t LFS |---++-----------------------|
23 23 . | E || #3 | #3 | X | #6 |
24 24 . |---++-----------------------+
25 25
26 26 make command server magic visible
27 27
28 28 #if windows
29 29 $ PYTHONPATH="$TESTDIR/../contrib;$PYTHONPATH"
30 30 #else
31 31 $ PYTHONPATH="$TESTDIR/../contrib:$PYTHONPATH"
32 32 #endif
33 33 $ export PYTHONPATH
34 34
35 35 $ hg init server
36 36 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
37 37
38 38 $ cat > $TESTTMP/debugprocessors.py <<EOF
39 39 > from mercurial import (
40 40 > cmdutil,
41 41 > commands,
42 42 > pycompat,
43 43 > registrar,
44 44 > )
45 45 > cmdtable = {}
46 46 > command = registrar.command(cmdtable)
47 47 > @command(b'debugprocessors', [], b'FILE')
48 48 > def debugprocessors(ui, repo, file_=None, **opts):
49 49 > opts = pycompat.byteskwargs(opts)
50 50 > opts[b'changelog'] = False
51 51 > opts[b'manifest'] = False
52 52 > opts[b'dir'] = False
53 53 > rl = cmdutil.openrevlog(repo, b'debugprocessors', file_, opts)
54 54 > for flag, proc in rl._flagprocessors.items():
55 55 > ui.status(b"registered processor '%#x'\n" % (flag))
56 56 > EOF
57 57
58 58 Skip the experimental.changegroup3=True config. Failure to agree on this comes
59 59 first, and causes an "abort: no common changegroup version" if the extension is
60 60 only loaded on one side. If that *is* enabled, the subsequent failure is "abort:
61 61 missing processor for flag '0x2000'!" if the extension is only loaded on one side
62 62 (possibly also masked by the Internal Server Error message).
63 63 $ cat >> $HGRCPATH <<EOF
64 64 > [extensions]
65 65 > debugprocessors = $TESTTMP/debugprocessors.py
66 66 > [experimental]
67 67 > lfs.disableusercache = True
68 68 > lfs.worker-enable = False
69 69 > [lfs]
70 70 > threshold=10
71 71 > [web]
72 72 > allow_push=*
73 73 > push_ssl=False
74 74 > EOF
75 75
76 76 $ cp $HGRCPATH $HGRCPATH.orig
77 77
78 78 #if lfsremote-on
79 79 $ hg --config extensions.lfs= -R server \
80 80 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
81 81 #else
82 82 $ hg --config extensions.lfs=! -R server \
83 83 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
84 84 #endif
85 85
86 86 $ cat hg.pid >> $DAEMON_PIDS
87 87 $ hg clone -q http://localhost:$HGPORT client
88 88 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
89 89 [1]
90 90
91 91 This trivial repo will force commandserver to load the extension, but not call
92 92 reposetup() on another repo actually being operated on. This gives coverage
93 93 that wrapper functions are not assuming reposetup() was called.
94 94
95 95 $ hg init $TESTTMP/cmdservelfs
96 96 $ cat >> $TESTTMP/cmdservelfs/.hg/hgrc << EOF
97 97 > [extensions]
98 98 > lfs =
99 99 > EOF
100 100
101 101 --------------------------------------------------------------------------------
102 102 Case #1: client with non-lfs content and the extension disabled; server with
103 103 non-lfs content, and the extension enabled.
104 104
105 105 $ cd client
106 106 $ echo 'non-lfs' > nonlfs.txt
107 107 >>> from __future__ import absolute_import
108 108 >>> from hgclient import check, readchannel, runcommand
109 109 >>> @check
110 110 ... def diff(server):
111 111 ... readchannel(server)
112 112 ... # run an arbitrary command in the repo with the extension loaded
113 113 ... runcommand(server, [b'id', b'-R', b'../cmdservelfs'])
114 114 ... # now run a command in a repo without the extension to ensure that
115 115 ... # files are added safely..
116 116 ... runcommand(server, [b'ci', b'-Aqm', b'non-lfs'])
117 117 ... # .. and that scmutil.prefetchfiles() safely no-ops..
118 118 ... runcommand(server, [b'diff', b'-r', b'.~1'])
119 119 ... # .. and that debugupgraderepo safely no-ops.
120 120 ... runcommand(server, [b'debugupgraderepo', b'-q', b'--run'])
121 121 *** runcommand id -R ../cmdservelfs
122 122 000000000000 tip
123 123 *** runcommand ci -Aqm non-lfs
124 124 *** runcommand diff -r .~1
125 125 diff -r 000000000000 nonlfs.txt
126 126 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
127 127 +++ b/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
128 128 @@ -0,0 +1,1 @@
129 129 +non-lfs
130 130 *** runcommand debugupgraderepo -q --run
131 131 upgrade will perform the following actions:
132 132
133 133 requirements
134 134 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
135 135
136 sidedata
137 Allows storage of extra data alongside a revision.
138
139 copies-sdc
140 Allows to use more efficient algorithm to deal with copy tracing.
141
142 136 beginning upgrade...
143 137 repository locked and read-only
144 138 creating temporary repository to stage migrated data: * (glob)
145 139 (it is safe to interrupt this process any time before data migration completes)
146 140 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
147 141 migrating 324 bytes in store; 129 bytes tracked data
148 142 migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
149 143 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
150 144 migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
151 145 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
152 146 migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
153 147 finished migrating 1 changelog revisions; change in size: 0 bytes
154 148 finished migrating 3 total revisions; total change in store size: 0 bytes
155 149 copying phaseroots
156 150 data fully migrated to temporary repository
157 151 marking source repository as being upgraded; clients will be unable to read from repository
158 152 starting in-place swap of repository data
159 153 replaced files will be backed up at * (glob)
160 154 replacing store...
161 155 store replacement complete; repository was inconsistent for *s (glob)
162 156 finalizing requirements file and making repository readable again
163 157 removing temporary repository * (glob)
164 158 copy of old repository backed up at * (glob)
165 159 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
166 160
167 161 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
168 162 [1]
169 163
170 164 #if lfsremote-on
171 165
172 166 $ hg push -q
173 167 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
174 168 [1]
175 169
176 170 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
177 171 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
178 172 [1]
179 173
180 174 $ hg init $TESTTMP/client1_pull
181 175 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
182 176 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
183 177 [1]
184 178
185 179 $ hg identify http://localhost:$HGPORT
186 180 d437e1d24fbd
187 181
188 182 #endif
189 183
190 184 --------------------------------------------------------------------------------
191 185 Case #2: client with non-lfs content and the extension enabled; server with
192 186 non-lfs content, and the extension state controlled by #testcases.
193 187
194 188 $ cat >> $HGRCPATH <<EOF
195 189 > [extensions]
196 190 > lfs =
197 191 > EOF
198 192 $ echo 'non-lfs' > nonlfs2.txt
199 193 $ hg ci -Aqm 'non-lfs file with lfs client'
200 194
201 195 Since no lfs content has been added yet, the push is allowed, even when the
202 196 extension is not enabled remotely.
203 197
204 198 $ hg push -q
205 199 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
206 200 [1]
207 201
208 202 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
209 203 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
210 204 [1]
211 205
212 206 $ hg init $TESTTMP/client2_pull
213 207 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
214 208 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
215 209 [1]
216 210
217 211 $ hg identify http://localhost:$HGPORT
218 212 1477875038c6
219 213
220 214 --------------------------------------------------------------------------------
221 215 Case #3: client with lfs content and the extension enabled; server with
222 216 non-lfs content, and the extension state controlled by #testcases. The server
223 217 should have an 'lfs' requirement after it picks up its first commit with a blob.
224 218
225 219 $ echo 'this is a big lfs file' > lfs.bin
226 220 $ hg ci -Aqm 'lfs'
227 221 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
228 222 .hg/requires:lfs
229 223
230 224 #if lfsremote-off
231 225 $ hg push -q
232 226 abort: required features are not supported in the destination: lfs
233 227 (enable the lfs extension on the server)
234 228 [255]
235 229 #else
236 230 $ hg push -q
237 231 #endif
238 232 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
239 233 .hg/requires:lfs
240 234 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
241 235
242 236 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
243 237 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
244 238 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
245 239 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
246 240
247 241 $ hg init $TESTTMP/client3_pull
248 242 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
249 243 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
250 244 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
251 245 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
252 246
253 247 Test that the commit/changegroup requirement check hook can be run multiple
254 248 times.
255 249
256 250 $ hg clone -qr 0 http://localhost:$HGPORT $TESTTMP/cmdserve_client3
257 251
258 252 $ cd ../cmdserve_client3
259 253
260 254 >>> from __future__ import absolute_import
261 255 >>> from hgclient import check, readchannel, runcommand
262 256 >>> @check
263 257 ... def addrequirement(server):
264 258 ... readchannel(server)
265 259 ... # change the repo in a way that adds the lfs requirement
266 260 ... runcommand(server, [b'pull', b'-qu'])
267 261 ... # Now cause the requirement adding hook to fire again, without going
268 262 ... # through reposetup() again.
269 263 ... with open('file.txt', 'wb') as fp:
270 264 ... fp.write(b'data')
271 265 ... runcommand(server, [b'ci', b'-Aqm', b'non-lfs'])
272 266 *** runcommand pull -qu
273 267 *** runcommand ci -Aqm non-lfs
274 268
275 269 $ cd ../client
276 270
277 271 The difference here is the push failed above when the extension isn't
278 272 enabled on the server.
279 273 $ hg identify http://localhost:$HGPORT
280 274 8374dc4052cb (lfsremote-on !)
281 275 1477875038c6 (lfsremote-off !)
282 276
283 277 Don't bother testing the lfsremote-off cases- the server won't be able
284 278 to launch if there's lfs content and the extension is disabled.
285 279
286 280 #if lfsremote-on
287 281
288 282 --------------------------------------------------------------------------------
289 283 Case #4: client with non-lfs content and the extension disabled; server with
290 284 lfs content, and the extension enabled.
291 285
292 286 $ cat >> $HGRCPATH <<EOF
293 287 > [extensions]
294 288 > lfs = !
295 289 > EOF
296 290
297 291 $ hg init $TESTTMP/client4
298 292 $ cd $TESTTMP/client4
299 293 $ cat >> .hg/hgrc <<EOF
300 294 > [paths]
301 295 > default = http://localhost:$HGPORT
302 296 > EOF
303 297 $ echo 'non-lfs' > nonlfs2.txt
304 298 $ hg ci -Aqm 'non-lfs'
305 299 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
306 300 $TESTTMP/server/.hg/requires:lfs
307 301
308 302 $ hg push -q --force
309 303 warning: repository is unrelated
310 304 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
311 305 $TESTTMP/server/.hg/requires:lfs
312 306
313 307 $ hg clone http://localhost:$HGPORT $TESTTMP/client4_clone
314 308 (remote is using large file support (lfs), but it is explicitly disabled in the local configuration)
315 309 abort: repository requires features unknown to this Mercurial: lfs!
316 310 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
317 311 [255]
318 312 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
319 313 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
320 314 $TESTTMP/server/.hg/requires:lfs
321 315 [2]
322 316
323 317 TODO: fail more gracefully.
324 318
325 319 $ hg init $TESTTMP/client4_pull
326 320 $ hg -R $TESTTMP/client4_pull pull http://localhost:$HGPORT
327 321 pulling from http://localhost:$HGPORT/
328 322 requesting all changes
329 323 remote: abort: no common changegroup version
330 324 abort: pull failed on remote
331 325 [255]
332 326 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
333 327 $TESTTMP/server/.hg/requires:lfs
334 328
335 329 $ hg identify http://localhost:$HGPORT
336 330 03b080fa9d93
337 331
338 332 --------------------------------------------------------------------------------
339 333 Case #5: client with non-lfs content and the extension enabled; server with
340 334 lfs content, and the extension enabled.
341 335
342 336 $ cat >> $HGRCPATH <<EOF
343 337 > [extensions]
344 338 > lfs =
345 339 > EOF
346 340 $ echo 'non-lfs' > nonlfs3.txt
347 341 $ hg ci -Aqm 'non-lfs file with lfs client'
348 342
349 343 $ hg push -q
350 344 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
351 345 $TESTTMP/server/.hg/requires:lfs
352 346
353 347 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
354 348 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
355 349 $TESTTMP/client5_clone/.hg/requires:lfs
356 350 $TESTTMP/server/.hg/requires:lfs
357 351
358 352 $ hg init $TESTTMP/client5_pull
359 353 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
360 354 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
361 355 $TESTTMP/client5_pull/.hg/requires:lfs
362 356 $TESTTMP/server/.hg/requires:lfs
363 357
364 358 $ hg identify http://localhost:$HGPORT
365 359 c729025cc5e3
366 360
367 361 $ mv $HGRCPATH $HGRCPATH.tmp
368 362 $ cp $HGRCPATH.orig $HGRCPATH
369 363
370 364 >>> from __future__ import absolute_import
371 365 >>> from hgclient import bprint, check, readchannel, runcommand, stdout
372 366 >>> @check
373 367 ... def checkflags(server):
374 368 ... readchannel(server)
375 369 ... bprint(b'')
376 370 ... bprint(b'# LFS required- both lfs and non-lfs revlogs have 0x2000 flag')
377 371 ... stdout.flush()
378 372 ... runcommand(server, [b'debugprocessors', b'lfs.bin', b'-R',
379 373 ... b'../server'])
380 374 ... runcommand(server, [b'debugprocessors', b'nonlfs2.txt', b'-R',
381 375 ... b'../server'])
382 376 ... runcommand(server, [b'config', b'extensions', b'--cwd',
383 377 ... b'../server'])
384 378 ...
385 379 ... bprint(b"\n# LFS not enabled- revlogs don't have 0x2000 flag")
386 380 ... stdout.flush()
387 381 ... runcommand(server, [b'debugprocessors', b'nonlfs3.txt'])
388 382 ... runcommand(server, [b'config', b'extensions'])
389 383
390 384 # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
391 385 *** runcommand debugprocessors lfs.bin -R ../server
392 386 registered processor '0x8000'
393 387 registered processor '0x2000'
394 388 *** runcommand debugprocessors nonlfs2.txt -R ../server
395 389 registered processor '0x8000'
396 390 registered processor '0x2000'
397 391 *** runcommand config extensions --cwd ../server
398 392 extensions.debugprocessors=$TESTTMP/debugprocessors.py
399 393 extensions.lfs=
400 394
401 395 # LFS not enabled- revlogs don't have 0x2000 flag
402 396 *** runcommand debugprocessors nonlfs3.txt
403 397 registered processor '0x8000'
404 398 *** runcommand config extensions
405 399 extensions.debugprocessors=$TESTTMP/debugprocessors.py
406 400
407 401 $ rm $HGRCPATH
408 402 $ mv $HGRCPATH.tmp $HGRCPATH
409 403
410 404 $ hg clone $TESTTMP/client $TESTTMP/nonlfs -qr 0 --config extensions.lfs=
411 405 $ cat >> $TESTTMP/nonlfs/.hg/hgrc <<EOF
412 406 > [extensions]
413 407 > lfs = !
414 408 > EOF
415 409
416 410 >>> from __future__ import absolute_import, print_function
417 411 >>> from hgclient import bprint, check, readchannel, runcommand, stdout
418 412 >>> @check
419 413 ... def checkflags2(server):
420 414 ... readchannel(server)
421 415 ... bprint(b'')
422 416 ... bprint(b'# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag')
423 417 ... stdout.flush()
424 418 ... runcommand(server, [b'debugprocessors', b'lfs.bin', b'-R',
425 419 ... b'../server'])
426 420 ... runcommand(server, [b'debugprocessors', b'nonlfs2.txt', b'-R',
427 421 ... b'../server'])
428 422 ... runcommand(server, [b'config', b'extensions', b'--cwd',
429 423 ... b'../server'])
430 424 ...
431 425 ... bprint(b'\n# LFS enabled without requirement- revlogs have 0x2000 flag')
432 426 ... stdout.flush()
433 427 ... runcommand(server, [b'debugprocessors', b'nonlfs3.txt'])
434 428 ... runcommand(server, [b'config', b'extensions'])
435 429 ...
436 430 ... bprint(b"\n# LFS disabled locally- revlogs don't have 0x2000 flag")
437 431 ... stdout.flush()
438 432 ... runcommand(server, [b'debugprocessors', b'nonlfs.txt', b'-R',
439 433 ... b'../nonlfs'])
440 434 ... runcommand(server, [b'config', b'extensions', b'--cwd',
441 435 ... b'../nonlfs'])
442 436
443 437 # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
444 438 *** runcommand debugprocessors lfs.bin -R ../server
445 439 registered processor '0x8000'
446 440 registered processor '0x2000'
447 441 *** runcommand debugprocessors nonlfs2.txt -R ../server
448 442 registered processor '0x8000'
449 443 registered processor '0x2000'
450 444 *** runcommand config extensions --cwd ../server
451 445 extensions.debugprocessors=$TESTTMP/debugprocessors.py
452 446 extensions.lfs=
453 447
454 448 # LFS enabled without requirement- revlogs have 0x2000 flag
455 449 *** runcommand debugprocessors nonlfs3.txt
456 450 registered processor '0x8000'
457 451 registered processor '0x2000'
458 452 *** runcommand config extensions
459 453 extensions.debugprocessors=$TESTTMP/debugprocessors.py
460 454 extensions.lfs=
461 455
462 456 # LFS disabled locally- revlogs don't have 0x2000 flag
463 457 *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
464 458 registered processor '0x8000'
465 459 *** runcommand config extensions --cwd ../nonlfs
466 460 extensions.debugprocessors=$TESTTMP/debugprocessors.py
467 461 extensions.lfs=!
468 462
469 463 --------------------------------------------------------------------------------
470 464 Case #6: client with lfs content and the extension enabled; server with
471 465 lfs content, and the extension enabled.
472 466
473 467 $ echo 'this is another lfs file' > lfs2.txt
474 468 $ hg ci -Aqm 'lfs file with lfs client'
475 469
476 470 $ hg --config paths.default= push -v http://localhost:$HGPORT
477 471 pushing to http://localhost:$HGPORT/
478 472 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
479 473 searching for changes
480 474 remote has heads on branch 'default' that are not known locally: 8374dc4052cb
481 475 lfs: uploading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
482 476 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
483 477 lfs: uploaded 1 files (25 bytes)
484 478 1 changesets found
485 479 uncompressed size of bundle content:
486 480 206 (changelog)
487 481 172 (manifests)
488 482 275 lfs2.txt
489 483 remote: adding changesets
490 484 remote: adding manifests
491 485 remote: adding file changes
492 486 remote: added 1 changesets with 1 changes to 1 files
493 487 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
494 488 .hg/requires:lfs
495 489 $TESTTMP/server/.hg/requires:lfs
496 490
497 491 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
498 492 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
499 493 $TESTTMP/client6_clone/.hg/requires:lfs
500 494 $TESTTMP/server/.hg/requires:lfs
501 495
502 496 $ hg init $TESTTMP/client6_pull
503 497 $ hg -R $TESTTMP/client6_pull pull -u -v http://localhost:$HGPORT
504 498 pulling from http://localhost:$HGPORT/
505 499 requesting all changes
506 500 adding changesets
507 501 adding manifests
508 502 adding file changes
509 503 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
510 504 added 6 changesets with 5 changes to 5 files (+1 heads)
511 505 new changesets d437e1d24fbd:d3b84d50eacb
512 506 resolving manifests
513 507 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
514 508 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
515 509 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
516 510 lfs: downloaded 1 files (25 bytes)
517 511 getting lfs2.txt
518 512 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
519 513 getting nonlfs2.txt
520 514 getting nonlfs3.txt
521 515 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
522 516 updated to "d3b84d50eacb: lfs file with lfs client"
523 517 1 other heads for branch "default"
524 518 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
525 519 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
526 520 $TESTTMP/client6_pull/.hg/requires:lfs
527 521 $TESTTMP/server/.hg/requires:lfs
528 522
529 523 $ hg identify http://localhost:$HGPORT
530 524 d3b84d50eacb
531 525
532 526 --------------------------------------------------------------------------------
533 527 Misc: process dies early if a requirement exists and the extension is disabled
534 528
535 529 $ hg --config extensions.lfs=! summary
536 530 abort: repository requires features unknown to this Mercurial: lfs!
537 531 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
538 532 [255]
539 533
540 534 $ echo 'this is an lfs file' > $TESTTMP/client6_clone/lfspair1.bin
541 535 $ echo 'this is an lfs file too' > $TESTTMP/client6_clone/lfspair2.bin
542 536 $ hg -R $TESTTMP/client6_clone ci -Aqm 'add lfs pair'
543 537 $ hg -R $TESTTMP/client6_clone push -q
544 538
545 539 $ hg clone -qU http://localhost:$HGPORT $TESTTMP/bulkfetch
546 540
547 541 Cat doesn't prefetch unless data is needed (e.g. '-T {rawdata}' doesn't need it)
548 542
549 543 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T '{rawdata}\n{path}\n'
550 544 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
551 545 version https://git-lfs.github.com/spec/v1
552 546 oid sha256:cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
553 547 size 20
554 548 x-is-binary 0
555 549
556 550 lfspair1.bin
557 551
558 552 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T json
559 553 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
560 554 [lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
561 555 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
562 556 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
563 557 lfs: downloaded 1 files (20 bytes)
564 558 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
565 559
566 560 {
567 561 "data": "this is an lfs file\n",
568 562 "path": "lfspair1.bin",
569 563 "rawdata": "version https://git-lfs.github.com/spec/v1\noid sha256:cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782\nsize 20\nx-is-binary 0\n"
570 564 }
571 565 ]
572 566
573 567 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
574 568
575 569 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T '{data}\n'
576 570 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
577 571 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
578 572 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
579 573 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
580 574 lfs: downloaded 1 files (20 bytes)
581 575 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
582 576 this is an lfs file
583 577
584 578 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair2.bin
585 579 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
586 580 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
587 581 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
588 582 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
589 583 lfs: downloaded 1 files (24 bytes)
590 584 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
591 585 this is an lfs file too
592 586
593 587 Export will prefetch all needed files across all needed revisions
594 588
595 589 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
596 590 $ hg -R $TESTTMP/bulkfetch -v export -r 0:tip -o all.export
597 591 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
598 592 exporting patches:
599 593 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
600 594 lfs: need to transfer 4 objects (92 bytes)
601 595 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
602 596 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
603 597 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
604 598 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
605 599 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
606 600 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
607 601 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
608 602 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
609 603 lfs: downloaded 4 files (92 bytes)
610 604 all.export
611 605 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
612 606 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
613 607 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
614 608 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
615 609
616 610 Export with selected files is used with `extdiff --patch`
617 611
618 612 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
619 613 $ hg --config extensions.extdiff= \
620 614 > -R $TESTTMP/bulkfetch -v extdiff -r 2:tip --patch $TESTTMP/bulkfetch/lfs.bin
621 615 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
622 616 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
623 617 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
624 618 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
625 619 lfs: downloaded 1 files (23 bytes)
626 620 */hg-8374dc4052cb.patch (glob)
627 621 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
628 622 */hg-9640b57e77b1.patch (glob)
629 623 --- */hg-8374dc4052cb.patch * (glob)
630 624 +++ */hg-9640b57e77b1.patch * (glob)
631 625 @@ -2,12 +2,7 @@
632 626 # User test
633 627 # Date 0 0
634 628 # Thu Jan 01 00:00:00 1970 +0000
635 629 -# Node ID 8374dc4052cbd388e79d9dc4ddb29784097aa354
636 630 -# Parent 1477875038c60152e391238920a16381c627b487
637 631 -lfs
638 632 +# Node ID 9640b57e77b14c3a0144fb4478b6cc13e13ea0d1
639 633 +# Parent d3b84d50eacbd56638e11abce6b8616aaba54420
640 634 +add lfs pair
641 635
642 636 -diff -r 1477875038c6 -r 8374dc4052cb lfs.bin
643 637 ---- /dev/null Thu Jan 01 00:00:00 1970 +0000
644 638 -+++ b/lfs.bin Thu Jan 01 00:00:00 1970 +0000
645 639 -@@ -0,0 +1,1 @@
646 640 -+this is a big lfs file
647 641 cleaning up temp directory
648 642 [1]
649 643
650 644 Diff will prefetch files
651 645
652 646 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
653 647 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip
654 648 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
655 649 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
656 650 lfs: need to transfer 4 objects (92 bytes)
657 651 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
658 652 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
659 653 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
660 654 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
661 655 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
662 656 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
663 657 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
664 658 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
665 659 lfs: downloaded 4 files (92 bytes)
666 660 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
667 661 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
668 662 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
669 663 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
670 664 diff -r 8374dc4052cb -r 9640b57e77b1 lfs.bin
671 665 --- a/lfs.bin Thu Jan 01 00:00:00 1970 +0000
672 666 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
673 667 @@ -1,1 +0,0 @@
674 668 -this is a big lfs file
675 669 diff -r 8374dc4052cb -r 9640b57e77b1 lfs2.txt
676 670 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
677 671 +++ b/lfs2.txt Thu Jan 01 00:00:00 1970 +0000
678 672 @@ -0,0 +1,1 @@
679 673 +this is another lfs file
680 674 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair1.bin
681 675 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
682 676 +++ b/lfspair1.bin Thu Jan 01 00:00:00 1970 +0000
683 677 @@ -0,0 +1,1 @@
684 678 +this is an lfs file
685 679 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
686 680 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
687 681 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
688 682 @@ -0,0 +1,1 @@
689 683 +this is an lfs file too
690 684 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs.txt
691 685 --- a/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
692 686 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
693 687 @@ -1,1 +0,0 @@
694 688 -non-lfs
695 689 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs3.txt
696 690 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
697 691 +++ b/nonlfs3.txt Thu Jan 01 00:00:00 1970 +0000
698 692 @@ -0,0 +1,1 @@
699 693 +non-lfs
700 694
701 695 Only the files required by diff are prefetched
702 696
703 697 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
704 698 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip $TESTTMP/bulkfetch/lfspair2.bin
705 699 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
706 700 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
707 701 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
708 702 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
709 703 lfs: downloaded 1 files (24 bytes)
710 704 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
711 705 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
712 706 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
713 707 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
714 708 @@ -0,0 +1,1 @@
715 709 +this is an lfs file too
716 710
717 711 #endif
718 712
719 713 $ "$PYTHON" $TESTDIR/killdaemons.py $DAEMON_PIDS
720 714
721 715 $ cat $TESTTMP/errors.log
@@ -1,1493 +1,1379
1 1 #require no-reposimplestore
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > share =
6 6 > EOF
7 7
8 8 store and revlogv1 are required in source
9 9
10 10 $ hg --config format.usestore=false init no-store
11 11 $ hg -R no-store debugupgraderepo
12 12 abort: cannot upgrade repository; requirement missing: store
13 13 [255]
14 14
15 15 $ hg init no-revlogv1
16 16 $ cat > no-revlogv1/.hg/requires << EOF
17 17 > dotencode
18 18 > fncache
19 19 > generaldelta
20 20 > store
21 21 > EOF
22 22
23 23 $ hg -R no-revlogv1 debugupgraderepo
24 24 abort: cannot upgrade repository; requirement missing: revlogv1
25 25 [255]
26 26
27 27 Cannot upgrade shared repositories
28 28
29 29 $ hg init share-parent
30 30 $ hg -q share share-parent share-child
31 31
32 32 $ hg -R share-child debugupgraderepo
33 33 abort: cannot upgrade repository; unsupported source requirement: shared
34 34 [255]
35 35
36 36 Do not yet support upgrading treemanifest repos
37 37
38 38 $ hg --config experimental.treemanifest=true init treemanifest
39 39 $ hg -R treemanifest debugupgraderepo
40 40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 41 [255]
42 42
43 43 Cannot add treemanifest requirement during upgrade
44 44
45 45 $ hg init disallowaddedreq
46 46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 48 [255]
49 49
50 50 An upgrade of a repository created with recommended settings only suggests optimizations
51 51
52 52 $ hg init empty
53 53 $ cd empty
54 54 $ hg debugformat
55 55 format-variant repo
56 56 fncache: yes
57 57 dotencode: yes
58 58 generaldelta: yes
59 59 sparserevlog: yes
60 60 sidedata: no
61 61 copies-sdc: no
62 62 plain-cl-delta: yes
63 63 compression: zlib
64 64 compression-level: default
65 65 $ hg debugformat --verbose
66 66 format-variant repo config default
67 67 fncache: yes yes yes
68 68 dotencode: yes yes yes
69 69 generaldelta: yes yes yes
70 70 sparserevlog: yes yes yes
71 71 sidedata: no no no
72 72 copies-sdc: no no no
73 73 plain-cl-delta: yes yes yes
74 74 compression: zlib zlib zlib
75 75 compression-level: default default default
76 76 $ hg debugformat --verbose --config format.usefncache=no
77 77 format-variant repo config default
78 78 fncache: yes no yes
79 79 dotencode: yes no yes
80 80 generaldelta: yes yes yes
81 81 sparserevlog: yes yes yes
82 82 sidedata: no no no
83 83 copies-sdc: no no no
84 84 plain-cl-delta: yes yes yes
85 85 compression: zlib zlib zlib
86 86 compression-level: default default default
87 87 $ hg debugformat --verbose --config format.usefncache=no --color=debug
88 88 format-variant repo config default
89 89 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
90 90 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
91 91 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
92 92 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
93 93 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
94 94 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
95 95 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
96 96 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
97 97 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
98 98 $ hg debugformat -Tjson
99 99 [
100 100 {
101 101 "config": true,
102 102 "default": true,
103 103 "name": "fncache",
104 104 "repo": true
105 105 },
106 106 {
107 107 "config": true,
108 108 "default": true,
109 109 "name": "dotencode",
110 110 "repo": true
111 111 },
112 112 {
113 113 "config": true,
114 114 "default": true,
115 115 "name": "generaldelta",
116 116 "repo": true
117 117 },
118 118 {
119 119 "config": true,
120 120 "default": true,
121 121 "name": "sparserevlog",
122 122 "repo": true
123 123 },
124 124 {
125 125 "config": false,
126 126 "default": false,
127 127 "name": "sidedata",
128 128 "repo": false
129 129 },
130 130 {
131 131 "config": false,
132 132 "default": false,
133 133 "name": "copies-sdc",
134 134 "repo": false
135 135 },
136 136 {
137 137 "config": true,
138 138 "default": true,
139 139 "name": "plain-cl-delta",
140 140 "repo": true
141 141 },
142 142 {
143 143 "config": "zlib",
144 144 "default": "zlib",
145 145 "name": "compression",
146 146 "repo": "zlib"
147 147 },
148 148 {
149 149 "config": "default",
150 150 "default": "default",
151 151 "name": "compression-level",
152 152 "repo": "default"
153 153 }
154 154 ]
155 155 $ hg debugupgraderepo
156 156 (no feature deficiencies found in existing repository)
157 157 performing an upgrade with "--run" will make the following changes:
158 158
159 159 requirements
160 160 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
161 161
162 sidedata
163 Allows storage of extra data alongside a revision.
164
165 copies-sdc
166 Allows to use more efficient algorithm to deal with copy tracing.
167
168 162 additional optimizations are available by specifying "--optimize <name>":
169 163
170 164 re-delta-parent
171 165 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
172 166
173 167 re-delta-multibase
174 168 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
175 169
176 170 re-delta-all
177 171 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
178 172
179 173 re-delta-fulladd
180 174 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
181 175
182 176
183 177 --optimize can be used to add optimizations
184 178
185 179 $ hg debugupgrade --optimize redeltaparent
186 180 (no feature deficiencies found in existing repository)
187 181 performing an upgrade with "--run" will make the following changes:
188 182
189 183 requirements
190 184 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
191 185
192 sidedata
193 Allows storage of extra data alongside a revision.
194
195 copies-sdc
196 Allows to use more efficient algorithm to deal with copy tracing.
197
198 186 re-delta-parent
199 187 deltas within internal storage will choose a new base revision if needed
200 188
201 189 additional optimizations are available by specifying "--optimize <name>":
202 190
203 191 re-delta-multibase
204 192 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
205 193
206 194 re-delta-all
207 195 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
208 196
209 197 re-delta-fulladd
210 198 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
211 199
212 200
213 201 modern form of the option
214 202
215 203 $ hg debugupgrade --optimize re-delta-parent
216 204 (no feature deficiencies found in existing repository)
217 205 performing an upgrade with "--run" will make the following changes:
218 206
219 207 requirements
220 208 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
221 209
222 sidedata
223 Allows storage of extra data alongside a revision.
224
225 copies-sdc
226 Allows to use more efficient algorithm to deal with copy tracing.
227
228 210 re-delta-parent
229 211 deltas within internal storage will choose a new base revision if needed
230 212
231 213 additional optimizations are available by specifying "--optimize <name>":
232 214
233 215 re-delta-multibase
234 216 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
235 217
236 218 re-delta-all
237 219 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
238 220
239 221 re-delta-fulladd
240 222 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
241 223
242 224
243 225 unknown optimization:
244 226
245 227 $ hg debugupgrade --optimize foobar
246 228 abort: unknown optimization action requested: foobar
247 229 (run without arguments to see valid optimizations)
248 230 [255]
249 231
250 232 Various sub-optimal detections work
251 233
252 234 $ cat > .hg/requires << EOF
253 235 > revlogv1
254 236 > store
255 237 > EOF
256 238
257 239 $ hg debugformat
258 240 format-variant repo
259 241 fncache: no
260 242 dotencode: no
261 243 generaldelta: no
262 244 sparserevlog: no
263 245 sidedata: no
264 246 copies-sdc: no
265 247 plain-cl-delta: yes
266 248 compression: zlib
267 249 compression-level: default
268 250 $ hg debugformat --verbose
269 251 format-variant repo config default
270 252 fncache: no yes yes
271 253 dotencode: no yes yes
272 254 generaldelta: no yes yes
273 255 sparserevlog: no yes yes
274 256 sidedata: no no no
275 257 copies-sdc: no no no
276 258 plain-cl-delta: yes yes yes
277 259 compression: zlib zlib zlib
278 260 compression-level: default default default
279 261 $ hg debugformat --verbose --config format.usegeneraldelta=no
280 262 format-variant repo config default
281 263 fncache: no yes yes
282 264 dotencode: no yes yes
283 265 generaldelta: no no yes
284 266 sparserevlog: no no yes
285 267 sidedata: no no no
286 268 copies-sdc: no no no
287 269 plain-cl-delta: yes yes yes
288 270 compression: zlib zlib zlib
289 271 compression-level: default default default
290 272 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
291 273 format-variant repo config default
292 274 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
293 275 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
294 276 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
295 277 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
296 278 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
297 279 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
298 280 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
299 281 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
300 282 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
301 283 $ hg debugupgraderepo
302 284 repository lacks features recommended by current config options:
303 285
304 286 fncache
305 287 long and reserved filenames may not work correctly; repository performance is sub-optimal
306 288
307 289 dotencode
308 290 storage of filenames beginning with a period or space may not work correctly
309 291
310 292 generaldelta
311 293 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
312 294
313 295 sparserevlog
314 296 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
315 297
316 298
317 299 performing an upgrade with "--run" will make the following changes:
318 300
319 301 requirements
320 302 preserved: revlogv1, store
321 303 added: dotencode, fncache, generaldelta, sparserevlog
322 304
323 305 fncache
324 306 repository will be more resilient to storing certain paths and performance of certain operations should be improved
325 307
326 308 dotencode
327 309 repository will be better able to store files beginning with a space or period
328 310
329 311 generaldelta
330 312 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
331 313
332 314 sparserevlog
333 315 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
334 316
335 sidedata
336 Allows storage of extra data alongside a revision.
337
338 copies-sdc
339 Allows to use more efficient algorithm to deal with copy tracing.
340
341 317 additional optimizations are available by specifying "--optimize <name>":
342 318
343 319 re-delta-parent
344 320 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
345 321
346 322 re-delta-multibase
347 323 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
348 324
349 325 re-delta-all
350 326 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
351 327
352 328 re-delta-fulladd
353 329 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
354 330
355 331
356 332 $ hg --config format.dotencode=false debugupgraderepo
357 333 repository lacks features recommended by current config options:
358 334
359 335 fncache
360 336 long and reserved filenames may not work correctly; repository performance is sub-optimal
361 337
362 338 generaldelta
363 339 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
364 340
365 341 sparserevlog
366 342 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
367 343
368 344 repository lacks features used by the default config options:
369 345
370 346 dotencode
371 347 storage of filenames beginning with a period or space may not work correctly
372 348
373 349
374 350 performing an upgrade with "--run" will make the following changes:
375 351
376 352 requirements
377 353 preserved: revlogv1, store
378 354 added: fncache, generaldelta, sparserevlog
379 355
380 356 fncache
381 357 repository will be more resilient to storing certain paths and performance of certain operations should be improved
382 358
383 359 generaldelta
384 360 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
385 361
386 362 sparserevlog
387 363 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
388 364
389 sidedata
390 Allows storage of extra data alongside a revision.
391
392 copies-sdc
393 Allows to use more efficient algorithm to deal with copy tracing.
394
395 365 additional optimizations are available by specifying "--optimize <name>":
396 366
397 367 re-delta-parent
398 368 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
399 369
400 370 re-delta-multibase
401 371 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
402 372
403 373 re-delta-all
404 374 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
405 375
406 376 re-delta-fulladd
407 377 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
408 378
409 379
410 380 $ cd ..
411 381
412 382 Upgrading a repository that is already modern essentially no-ops
413 383
414 384 $ hg init modern
415 385 $ hg -R modern debugupgraderepo --run
416 386 upgrade will perform the following actions:
417 387
418 388 requirements
419 389 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
420 390
421 sidedata
422 Allows storage of extra data alongside a revision.
423
424 copies-sdc
425 Allows to use more efficient algorithm to deal with copy tracing.
426
427 391 beginning upgrade...
428 392 repository locked and read-only
429 393 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
430 394 (it is safe to interrupt this process any time before data migration completes)
431 395 data fully migrated to temporary repository
432 396 marking source repository as being upgraded; clients will be unable to read from repository
433 397 starting in-place swap of repository data
434 398 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
435 399 replacing store...
436 400 store replacement complete; repository was inconsistent for *s (glob)
437 401 finalizing requirements file and making repository readable again
438 402 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
439 403 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
440 404 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
441 405
442 406 Upgrading a repository to generaldelta works
443 407
444 408 $ hg --config format.usegeneraldelta=false init upgradegd
445 409 $ cd upgradegd
446 410 $ touch f0
447 411 $ hg -q commit -A -m initial
448 412 $ mkdir FooBarDirectory.d
449 413 $ touch FooBarDirectory.d/f1
450 414 $ hg -q commit -A -m 'add f1'
451 415 $ hg -q up -r 0
452 416 >>> from __future__ import absolute_import, print_function
453 417 >>> import random
454 418 >>> random.seed(0) # have a reproducible content
455 419 >>> with open("f2", "wb") as f:
456 420 ... for i in range(100000):
457 421 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
458 422 $ hg -q commit -A -m 'add f2'
459 423
460 424 make sure we have a .d file
461 425
462 426 $ ls -d .hg/store/data/*
463 427 .hg/store/data/_foo_bar_directory.d.hg
464 428 .hg/store/data/f0.i
465 429 .hg/store/data/f2.d
466 430 .hg/store/data/f2.i
467 431
468 432 $ hg debugupgraderepo --run --config format.sparse-revlog=false
469 433 upgrade will perform the following actions:
470 434
471 435 requirements
472 436 preserved: dotencode, fncache, revlogv1, store
473 437 added: generaldelta
474 438
475 439 generaldelta
476 440 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
477 441
478 sidedata
479 Allows storage of extra data alongside a revision.
480
481 copies-sdc
482 Allows to use more efficient algorithm to deal with copy tracing.
483
484 442 beginning upgrade...
485 443 repository locked and read-only
486 444 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
487 445 (it is safe to interrupt this process any time before data migration completes)
488 446 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
489 447 migrating 519 KB in store; 1.05 MB tracked data
490 448 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
491 449 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
492 450 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
493 451 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
494 452 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
495 453 finished migrating 3 changelog revisions; change in size: 0 bytes
496 454 finished migrating 9 total revisions; total change in store size: -17 bytes
497 455 copying phaseroots
498 456 data fully migrated to temporary repository
499 457 marking source repository as being upgraded; clients will be unable to read from repository
500 458 starting in-place swap of repository data
501 459 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
502 460 replacing store...
503 461 store replacement complete; repository was inconsistent for *s (glob)
504 462 finalizing requirements file and making repository readable again
505 463 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
506 464 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
507 465 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
508 466
509 467 Original requirements backed up
510 468
511 469 $ cat .hg/upgradebackup.*/requires
512 470 dotencode
513 471 fncache
514 472 revlogv1
515 473 store
516 474
517 475 generaldelta added to original requirements files
518 476
519 477 $ cat .hg/requires
520 478 dotencode
521 479 fncache
522 480 generaldelta
523 481 revlogv1
524 482 store
525 483
526 484 store directory has files we expect
527 485
528 486 $ ls .hg/store
529 487 00changelog.i
530 488 00manifest.i
531 489 data
532 490 fncache
533 491 phaseroots
534 492 undo
535 493 undo.backupfiles
536 494 undo.phaseroots
537 495
538 496 manifest should be generaldelta
539 497
540 498 $ hg debugrevlog -m | grep flags
541 499 flags : inline, generaldelta
542 500
543 501 verify should be happy
544 502
545 503 $ hg verify
546 504 checking changesets
547 505 checking manifests
548 506 crosschecking files in changesets and manifests
549 507 checking files
550 508 checked 3 changesets with 3 changes to 3 files
551 509
552 510 old store should be backed up
553 511
554 512 $ ls -d .hg/upgradebackup.*/
555 513 .hg/upgradebackup.*/ (glob)
556 514 $ ls .hg/upgradebackup.*/store
557 515 00changelog.i
558 516 00manifest.i
559 517 data
560 518 fncache
561 519 phaseroots
562 520 undo
563 521 undo.backup.fncache
564 522 undo.backupfiles
565 523 undo.phaseroots
566 524
567 525 unless --no-backup is passed
568 526
569 527 $ rm -rf .hg/upgradebackup.*/
570 528 $ hg debugupgraderepo --run --no-backup
571 529 upgrade will perform the following actions:
572 530
573 531 requirements
574 532 preserved: dotencode, fncache, generaldelta, revlogv1, store
575 533 added: sparserevlog
576 534
577 535 sparserevlog
578 536 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
579 537
580 sidedata
581 Allows storage of extra data alongside a revision.
582
583 copies-sdc
584 Allows to use more efficient algorithm to deal with copy tracing.
585
586 538 beginning upgrade...
587 539 repository locked and read-only
588 540 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
589 541 (it is safe to interrupt this process any time before data migration completes)
590 542 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
591 543 migrating 519 KB in store; 1.05 MB tracked data
592 544 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
593 545 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
594 546 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
595 547 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
596 548 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
597 549 finished migrating 3 changelog revisions; change in size: 0 bytes
598 550 finished migrating 9 total revisions; total change in store size: 0 bytes
599 551 copying phaseroots
600 552 data fully migrated to temporary repository
601 553 marking source repository as being upgraded; clients will be unable to read from repository
602 554 starting in-place swap of repository data
603 555 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
604 556 replacing store...
605 557 store replacement complete; repository was inconsistent for * (glob)
606 558 finalizing requirements file and making repository readable again
607 559 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
608 560 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
609 561 $ ls -1 .hg/ | grep upgradebackup
610 562 [1]
611 563
612 564 We can restrict optimization to some revlog:
613 565
614 566 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
615 567 upgrade will perform the following actions:
616 568
617 569 requirements
618 570 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
619 571
620 sidedata
621 Allows storage of extra data alongside a revision.
622
623 copies-sdc
624 Allows to use more efficient algorithm to deal with copy tracing.
625
626 572 re-delta-parent
627 573 deltas within internal storage will choose a new base revision if needed
628 574
629 575 beginning upgrade...
630 576 repository locked and read-only
631 577 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
632 578 (it is safe to interrupt this process any time before data migration completes)
633 579 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
634 580 migrating 519 KB in store; 1.05 MB tracked data
635 581 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
636 582 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
637 583 blindly copying data/f0.i containing 1 revisions
638 584 blindly copying data/f2.i containing 1 revisions
639 585 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
640 586 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
641 587 cloning 3 revisions from 00manifest.i
642 588 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
643 589 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
644 590 blindly copying 00changelog.i containing 3 revisions
645 591 finished migrating 3 changelog revisions; change in size: 0 bytes
646 592 finished migrating 9 total revisions; total change in store size: 0 bytes
647 593 copying phaseroots
648 594 data fully migrated to temporary repository
649 595 marking source repository as being upgraded; clients will be unable to read from repository
650 596 starting in-place swap of repository data
651 597 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
652 598 replacing store...
653 599 store replacement complete; repository was inconsistent for *s (glob)
654 600 finalizing requirements file and making repository readable again
655 601 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
656 602 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
657 603
658 604 Check that the repo still works fine
659 605
660 606 $ hg log -G --stat
661 607 @ changeset: 2:76d4395f5413 (no-py3 !)
662 608 @ changeset: 2:fca376863211 (py3 !)
663 609 | tag: tip
664 610 | parent: 0:ba592bf28da2
665 611 | user: test
666 612 | date: Thu Jan 01 00:00:00 1970 +0000
667 613 | summary: add f2
668 614 |
669 615 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
670 616 | 1 files changed, 100000 insertions(+), 0 deletions(-)
671 617 |
672 618 | o changeset: 1:2029ce2354e2
673 619 |/ user: test
674 620 | date: Thu Jan 01 00:00:00 1970 +0000
675 621 | summary: add f1
676 622 |
677 623 |
678 624 o changeset: 0:ba592bf28da2
679 625 user: test
680 626 date: Thu Jan 01 00:00:00 1970 +0000
681 627 summary: initial
682 628
683 629
684 630
685 631 $ hg verify
686 632 checking changesets
687 633 checking manifests
688 634 crosschecking files in changesets and manifests
689 635 checking files
690 636 checked 3 changesets with 3 changes to 3 files
691 637
692 638 Check we can select negatively
693 639
694 640 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
695 641 upgrade will perform the following actions:
696 642
697 643 requirements
698 644 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
699 645
700 sidedata
701 Allows storage of extra data alongside a revision.
702
703 copies-sdc
704 Allows to use more efficient algorithm to deal with copy tracing.
705
706 646 re-delta-parent
707 647 deltas within internal storage will choose a new base revision if needed
708 648
709 649 beginning upgrade...
710 650 repository locked and read-only
711 651 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
712 652 (it is safe to interrupt this process any time before data migration completes)
713 653 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
714 654 migrating 519 KB in store; 1.05 MB tracked data
715 655 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
716 656 cloning 1 revisions from data/FooBarDirectory.d/f1.i
717 657 cloning 1 revisions from data/f0.i
718 658 cloning 1 revisions from data/f2.i
719 659 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
720 660 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
721 661 blindly copying 00manifest.i containing 3 revisions
722 662 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
723 663 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
724 664 cloning 3 revisions from 00changelog.i
725 665 finished migrating 3 changelog revisions; change in size: 0 bytes
726 666 finished migrating 9 total revisions; total change in store size: 0 bytes
727 667 copying phaseroots
728 668 data fully migrated to temporary repository
729 669 marking source repository as being upgraded; clients will be unable to read from repository
730 670 starting in-place swap of repository data
731 671 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
732 672 replacing store...
733 673 store replacement complete; repository was inconsistent for *s (glob)
734 674 finalizing requirements file and making repository readable again
735 675 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
736 676 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
737 677 $ hg verify
738 678 checking changesets
739 679 checking manifests
740 680 crosschecking files in changesets and manifests
741 681 checking files
742 682 checked 3 changesets with 3 changes to 3 files
743 683
744 684 Check that we can select changelog only
745 685
746 686 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
747 687 upgrade will perform the following actions:
748 688
749 689 requirements
750 690 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
751 691
752 sidedata
753 Allows storage of extra data alongside a revision.
754
755 copies-sdc
756 Allows to use more efficient algorithm to deal with copy tracing.
757
758 692 re-delta-parent
759 693 deltas within internal storage will choose a new base revision if needed
760 694
761 695 beginning upgrade...
762 696 repository locked and read-only
763 697 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
764 698 (it is safe to interrupt this process any time before data migration completes)
765 699 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
766 700 migrating 519 KB in store; 1.05 MB tracked data
767 701 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
768 702 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
769 703 blindly copying data/f0.i containing 1 revisions
770 704 blindly copying data/f2.i containing 1 revisions
771 705 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
772 706 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
773 707 blindly copying 00manifest.i containing 3 revisions
774 708 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
775 709 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
776 710 cloning 3 revisions from 00changelog.i
777 711 finished migrating 3 changelog revisions; change in size: 0 bytes
778 712 finished migrating 9 total revisions; total change in store size: 0 bytes
779 713 copying phaseroots
780 714 data fully migrated to temporary repository
781 715 marking source repository as being upgraded; clients will be unable to read from repository
782 716 starting in-place swap of repository data
783 717 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
784 718 replacing store...
785 719 store replacement complete; repository was inconsistent for *s (glob)
786 720 finalizing requirements file and making repository readable again
787 721 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
788 722 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
789 723 $ hg verify
790 724 checking changesets
791 725 checking manifests
792 726 crosschecking files in changesets and manifests
793 727 checking files
794 728 checked 3 changesets with 3 changes to 3 files
795 729
796 730 Check that we can select filelog only
797 731
798 732 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
799 733 upgrade will perform the following actions:
800 734
801 735 requirements
802 736 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
803 737
804 sidedata
805 Allows storage of extra data alongside a revision.
806
807 copies-sdc
808 Allows to use more efficient algorithm to deal with copy tracing.
809
810 738 re-delta-parent
811 739 deltas within internal storage will choose a new base revision if needed
812 740
813 741 beginning upgrade...
814 742 repository locked and read-only
815 743 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
816 744 (it is safe to interrupt this process any time before data migration completes)
817 745 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
818 746 migrating 519 KB in store; 1.05 MB tracked data
819 747 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
820 748 cloning 1 revisions from data/FooBarDirectory.d/f1.i
821 749 cloning 1 revisions from data/f0.i
822 750 cloning 1 revisions from data/f2.i
823 751 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
824 752 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
825 753 blindly copying 00manifest.i containing 3 revisions
826 754 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
827 755 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
828 756 blindly copying 00changelog.i containing 3 revisions
829 757 finished migrating 3 changelog revisions; change in size: 0 bytes
830 758 finished migrating 9 total revisions; total change in store size: 0 bytes
831 759 copying phaseroots
832 760 data fully migrated to temporary repository
833 761 marking source repository as being upgraded; clients will be unable to read from repository
834 762 starting in-place swap of repository data
835 763 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
836 764 replacing store...
837 765 store replacement complete; repository was inconsistent for *s (glob)
838 766 finalizing requirements file and making repository readable again
839 767 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
840 768 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
841 769 $ hg verify
842 770 checking changesets
843 771 checking manifests
844 772 crosschecking files in changesets and manifests
845 773 checking files
846 774 checked 3 changesets with 3 changes to 3 files
847 775
848 776
849 777 Check you can't skip revlog clone during important format downgrade
850 778
851 779 $ echo "[format]" > .hg/hgrc
852 780 $ echo "sparse-revlog=no" >> .hg/hgrc
853 781 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
854 782 ignoring revlogs selection flags, format requirements change: sparserevlog
855 783 upgrade will perform the following actions:
856 784
857 785 requirements
858 786 preserved: dotencode, fncache, generaldelta, revlogv1, store
859 787 removed: sparserevlog
860 788
861 sidedata
862 Allows storage of extra data alongside a revision.
863
864 copies-sdc
865 Allows to use more efficient algorithm to deal with copy tracing.
866
867 789 re-delta-parent
868 790 deltas within internal storage will choose a new base revision if needed
869 791
870 792 beginning upgrade...
871 793 repository locked and read-only
872 794 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
873 795 (it is safe to interrupt this process any time before data migration completes)
874 796 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
875 797 migrating 519 KB in store; 1.05 MB tracked data
876 798 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
877 799 cloning 1 revisions from data/FooBarDirectory.d/f1.i
878 800 cloning 1 revisions from data/f0.i
879 801 cloning 1 revisions from data/f2.i
880 802 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
881 803 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
882 804 cloning 3 revisions from 00manifest.i
883 805 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
884 806 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
885 807 cloning 3 revisions from 00changelog.i
886 808 finished migrating 3 changelog revisions; change in size: 0 bytes
887 809 finished migrating 9 total revisions; total change in store size: 0 bytes
888 810 copying phaseroots
889 811 data fully migrated to temporary repository
890 812 marking source repository as being upgraded; clients will be unable to read from repository
891 813 starting in-place swap of repository data
892 814 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
893 815 replacing store...
894 816 store replacement complete; repository was inconsistent for *s (glob)
895 817 finalizing requirements file and making repository readable again
896 818 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
897 819 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
898 820 $ hg verify
899 821 checking changesets
900 822 checking manifests
901 823 crosschecking files in changesets and manifests
902 824 checking files
903 825 checked 3 changesets with 3 changes to 3 files
904 826
905 827 Check you can't skip revlog clone during important format upgrade
906 828
907 829 $ echo "sparse-revlog=yes" >> .hg/hgrc
908 830 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
909 831 ignoring revlogs selection flags, format requirements change: sparserevlog
910 832 upgrade will perform the following actions:
911 833
912 834 requirements
913 835 preserved: dotencode, fncache, generaldelta, revlogv1, store
914 836 added: sparserevlog
915 837
916 838 sparserevlog
917 839 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
918 840
919 sidedata
920 Allows storage of extra data alongside a revision.
921
922 copies-sdc
923 Allows to use more efficient algorithm to deal with copy tracing.
924
925 841 re-delta-parent
926 842 deltas within internal storage will choose a new base revision if needed
927 843
928 844 beginning upgrade...
929 845 repository locked and read-only
930 846 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
931 847 (it is safe to interrupt this process any time before data migration completes)
932 848 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
933 849 migrating 519 KB in store; 1.05 MB tracked data
934 850 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
935 851 cloning 1 revisions from data/FooBarDirectory.d/f1.i
936 852 cloning 1 revisions from data/f0.i
937 853 cloning 1 revisions from data/f2.i
938 854 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
939 855 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
940 856 cloning 3 revisions from 00manifest.i
941 857 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
942 858 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
943 859 cloning 3 revisions from 00changelog.i
944 860 finished migrating 3 changelog revisions; change in size: 0 bytes
945 861 finished migrating 9 total revisions; total change in store size: 0 bytes
946 862 copying phaseroots
947 863 data fully migrated to temporary repository
948 864 marking source repository as being upgraded; clients will be unable to read from repository
949 865 starting in-place swap of repository data
950 866 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
951 867 replacing store...
952 868 store replacement complete; repository was inconsistent for *s (glob)
953 869 finalizing requirements file and making repository readable again
954 870 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
955 871 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
956 872 $ hg verify
957 873 checking changesets
958 874 checking manifests
959 875 crosschecking files in changesets and manifests
960 876 checking files
961 877 checked 3 changesets with 3 changes to 3 files
962 878
963 879 $ cd ..
964 880
965 881 store files with special filenames aren't encoded during copy
966 882
967 883 $ hg init store-filenames
968 884 $ cd store-filenames
969 885 $ touch foo
970 886 $ hg -q commit -A -m initial
971 887 $ touch .hg/store/.XX_special_filename
972 888
973 889 $ hg debugupgraderepo --run
974 890 upgrade will perform the following actions:
975 891
976 892 requirements
977 893 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
978 894
979 sidedata
980 Allows storage of extra data alongside a revision.
981
982 copies-sdc
983 Allows to use more efficient algorithm to deal with copy tracing.
984
985 895 beginning upgrade...
986 896 repository locked and read-only
987 897 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
988 898 (it is safe to interrupt this process any time before data migration completes)
989 899 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
990 900 migrating 301 bytes in store; 107 bytes tracked data
991 901 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
992 902 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
993 903 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
994 904 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
995 905 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
996 906 finished migrating 1 changelog revisions; change in size: 0 bytes
997 907 finished migrating 3 total revisions; total change in store size: 0 bytes
998 908 copying .XX_special_filename
999 909 copying phaseroots
1000 910 data fully migrated to temporary repository
1001 911 marking source repository as being upgraded; clients will be unable to read from repository
1002 912 starting in-place swap of repository data
1003 913 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1004 914 replacing store...
1005 915 store replacement complete; repository was inconsistent for *s (glob)
1006 916 finalizing requirements file and making repository readable again
1007 917 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1008 918 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1009 919 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1010 920 $ hg debugupgraderepo --run --optimize redeltafulladd
1011 921 upgrade will perform the following actions:
1012 922
1013 923 requirements
1014 924 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1015 925
1016 sidedata
1017 Allows storage of extra data alongside a revision.
1018
1019 copies-sdc
1020 Allows to use more efficient algorithm to deal with copy tracing.
1021
1022 926 re-delta-fulladd
1023 927 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1024 928
1025 929 beginning upgrade...
1026 930 repository locked and read-only
1027 931 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1028 932 (it is safe to interrupt this process any time before data migration completes)
1029 933 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1030 934 migrating 301 bytes in store; 107 bytes tracked data
1031 935 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1032 936 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1033 937 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1034 938 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1035 939 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1036 940 finished migrating 1 changelog revisions; change in size: 0 bytes
1037 941 finished migrating 3 total revisions; total change in store size: 0 bytes
1038 942 copying .XX_special_filename
1039 943 copying phaseroots
1040 944 data fully migrated to temporary repository
1041 945 marking source repository as being upgraded; clients will be unable to read from repository
1042 946 starting in-place swap of repository data
1043 947 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1044 948 replacing store...
1045 949 store replacement complete; repository was inconsistent for *s (glob)
1046 950 finalizing requirements file and making repository readable again
1047 951 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1048 952 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1049 953 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1050 954
1051 955 fncache is valid after upgrade
1052 956
1053 957 $ hg debugrebuildfncache
1054 958 fncache already up to date
1055 959
1056 960 $ cd ..
1057 961
1058 962 Check upgrading a large file repository
1059 963 ---------------------------------------
1060 964
1061 965 $ hg init largefilesrepo
1062 966 $ cat << EOF >> largefilesrepo/.hg/hgrc
1063 967 > [extensions]
1064 968 > largefiles =
1065 969 > EOF
1066 970
1067 971 $ cd largefilesrepo
1068 972 $ touch foo
1069 973 $ hg add --large foo
1070 974 $ hg -q commit -m initial
1071 975 $ cat .hg/requires
1072 976 dotencode
1073 977 fncache
1074 978 generaldelta
1075 979 largefiles
1076 980 revlogv1
1077 981 sparserevlog
1078 982 store
1079 983
1080 984 $ hg debugupgraderepo --run
1081 985 upgrade will perform the following actions:
1082 986
1083 987 requirements
1084 988 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
1085 989
1086 sidedata
1087 Allows storage of extra data alongside a revision.
1088
1089 copies-sdc
1090 Allows to use more efficient algorithm to deal with copy tracing.
1091
1092 990 beginning upgrade...
1093 991 repository locked and read-only
1094 992 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1095 993 (it is safe to interrupt this process any time before data migration completes)
1096 994 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1097 995 migrating 355 bytes in store; 160 bytes tracked data
1098 996 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
1099 997 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1100 998 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
1101 999 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1102 1000 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
1103 1001 finished migrating 1 changelog revisions; change in size: 0 bytes
1104 1002 finished migrating 3 total revisions; total change in store size: 0 bytes
1105 1003 copying phaseroots
1106 1004 data fully migrated to temporary repository
1107 1005 marking source repository as being upgraded; clients will be unable to read from repository
1108 1006 starting in-place swap of repository data
1109 1007 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1110 1008 replacing store...
1111 1009 store replacement complete; repository was inconsistent for *s (glob)
1112 1010 finalizing requirements file and making repository readable again
1113 1011 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1114 1012 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1115 1013 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1116 1014 $ cat .hg/requires
1117 1015 dotencode
1118 1016 fncache
1119 1017 generaldelta
1120 1018 largefiles
1121 1019 revlogv1
1122 1020 sparserevlog
1123 1021 store
1124 1022
1125 1023 $ cat << EOF >> .hg/hgrc
1126 1024 > [extensions]
1127 1025 > lfs =
1128 1026 > [lfs]
1129 1027 > threshold = 10
1130 1028 > EOF
1131 1029 $ echo '123456789012345' > lfs.bin
1132 1030 $ hg ci -Am 'lfs.bin'
1133 1031 adding lfs.bin
1134 1032 $ grep lfs .hg/requires
1135 1033 lfs
1136 1034 $ find .hg/store/lfs -type f
1137 1035 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1138 1036
1139 1037 $ hg debugupgraderepo --run
1140 1038 upgrade will perform the following actions:
1141 1039
1142 1040 requirements
1143 1041 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
1144 1042
1145 sidedata
1146 Allows storage of extra data alongside a revision.
1147
1148 copies-sdc
1149 Allows to use more efficient algorithm to deal with copy tracing.
1150
1151 1043 beginning upgrade...
1152 1044 repository locked and read-only
1153 1045 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1154 1046 (it is safe to interrupt this process any time before data migration completes)
1155 1047 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
1156 1048 migrating 801 bytes in store; 467 bytes tracked data
1157 1049 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
1158 1050 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
1159 1051 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
1160 1052 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
1161 1053 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
1162 1054 finished migrating 2 changelog revisions; change in size: 0 bytes
1163 1055 finished migrating 6 total revisions; total change in store size: 0 bytes
1164 1056 copying phaseroots
1165 1057 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1166 1058 data fully migrated to temporary repository
1167 1059 marking source repository as being upgraded; clients will be unable to read from repository
1168 1060 starting in-place swap of repository data
1169 1061 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1170 1062 replacing store...
1171 1063 store replacement complete; repository was inconsistent for *s (glob)
1172 1064 finalizing requirements file and making repository readable again
1173 1065 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1174 1066 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1175 1067 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1176 1068
1177 1069 $ grep lfs .hg/requires
1178 1070 lfs
1179 1071 $ find .hg/store/lfs -type f
1180 1072 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1181 1073 $ hg verify
1182 1074 checking changesets
1183 1075 checking manifests
1184 1076 crosschecking files in changesets and manifests
1185 1077 checking files
1186 1078 checked 2 changesets with 2 changes to 2 files
1187 1079 $ hg debugdata lfs.bin 0
1188 1080 version https://git-lfs.github.com/spec/v1
1189 1081 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1190 1082 size 16
1191 1083 x-is-binary 0
1192 1084
1193 1085 $ cd ..
1194 1086
1195 1087 repository config is taken in account
1196 1088 -------------------------------------
1197 1089
1198 1090 $ cat << EOF >> $HGRCPATH
1199 1091 > [format]
1200 1092 > maxchainlen = 1
1201 1093 > EOF
1202 1094
1203 1095 $ hg init localconfig
1204 1096 $ cd localconfig
1205 1097 $ cat << EOF > file
1206 1098 > some content
1207 1099 > with some length
1208 1100 > to make sure we get a delta
1209 1101 > after changes
1210 1102 > very long
1211 1103 > very long
1212 1104 > very long
1213 1105 > very long
1214 1106 > very long
1215 1107 > very long
1216 1108 > very long
1217 1109 > very long
1218 1110 > very long
1219 1111 > very long
1220 1112 > very long
1221 1113 > EOF
1222 1114 $ hg -q commit -A -m A
1223 1115 $ echo "new line" >> file
1224 1116 $ hg -q commit -m B
1225 1117 $ echo "new line" >> file
1226 1118 $ hg -q commit -m C
1227 1119
1228 1120 $ cat << EOF >> .hg/hgrc
1229 1121 > [format]
1230 1122 > maxchainlen = 9001
1231 1123 > EOF
1232 1124 $ hg config format
1233 1125 format.maxchainlen=9001
1234 1126 $ hg debugdeltachain file
1235 1127 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1236 1128 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1237 1129 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1238 1130 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1239 1131
1240 1132 $ hg debugupgraderepo --run --optimize redeltaall
1241 1133 upgrade will perform the following actions:
1242 1134
1243 1135 requirements
1244 1136 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1245 1137
1246 sidedata
1247 Allows storage of extra data alongside a revision.
1248
1249 copies-sdc
1250 Allows to use more efficient algorithm to deal with copy tracing.
1251
1252 1138 re-delta-all
1253 1139 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1254 1140
1255 1141 beginning upgrade...
1256 1142 repository locked and read-only
1257 1143 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1258 1144 (it is safe to interrupt this process any time before data migration completes)
1259 1145 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1260 1146 migrating 1019 bytes in store; 882 bytes tracked data
1261 1147 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1262 1148 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1263 1149 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1264 1150 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1265 1151 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1266 1152 finished migrating 3 changelog revisions; change in size: 0 bytes
1267 1153 finished migrating 9 total revisions; total change in store size: -9 bytes
1268 1154 copying phaseroots
1269 1155 data fully migrated to temporary repository
1270 1156 marking source repository as being upgraded; clients will be unable to read from repository
1271 1157 starting in-place swap of repository data
1272 1158 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1273 1159 replacing store...
1274 1160 store replacement complete; repository was inconsistent for *s (glob)
1275 1161 finalizing requirements file and making repository readable again
1276 1162 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1277 1163 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1278 1164 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1279 1165 $ hg debugdeltachain file
1280 1166 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1281 1167 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1282 1168 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1283 1169 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1284 1170 $ cd ..
1285 1171
1286 1172 $ cat << EOF >> $HGRCPATH
1287 1173 > [format]
1288 1174 > maxchainlen = 9001
1289 1175 > EOF
1290 1176
1291 1177 Check upgrading a sparse-revlog repository
1292 1178 ---------------------------------------
1293 1179
1294 1180 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1295 1181 $ cd sparserevlogrepo
1296 1182 $ touch foo
1297 1183 $ hg add foo
1298 1184 $ hg -q commit -m "foo"
1299 1185 $ cat .hg/requires
1300 1186 dotencode
1301 1187 fncache
1302 1188 generaldelta
1303 1189 revlogv1
1304 1190 store
1305 1191
1306 1192 Check that we can add the sparse-revlog format requirement
1307 1193 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
1308 1194 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1309 1195 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1310 1196 $ cat .hg/requires
1311 1197 dotencode
1312 1198 fncache
1313 1199 generaldelta
1314 1200 revlogv1
1315 1201 sparserevlog
1316 1202 store
1317 1203
1318 1204 Check that we can remove the sparse-revlog format requirement
1319 1205 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
1320 1206 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1321 1207 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1322 1208 $ cat .hg/requires
1323 1209 dotencode
1324 1210 fncache
1325 1211 generaldelta
1326 1212 revlogv1
1327 1213 store
1328 1214
1329 1215 #if zstd
1330 1216
1331 1217 Check upgrading to a zstd revlog
1332 1218 --------------------------------
1333 1219
1334 1220 upgrade
1335 1221
1336 1222 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
1337 1223 $ hg debugformat -v
1338 1224 format-variant repo config default
1339 1225 fncache: yes yes yes
1340 1226 dotencode: yes yes yes
1341 1227 generaldelta: yes yes yes
1342 1228 sparserevlog: yes yes yes
1343 1229 sidedata: no no no
1344 1230 copies-sdc: no no no
1345 1231 plain-cl-delta: yes yes yes
1346 1232 compression: zstd zlib zlib
1347 1233 compression-level: default default default
1348 1234 $ cat .hg/requires
1349 1235 dotencode
1350 1236 fncache
1351 1237 generaldelta
1352 1238 revlog-compression-zstd
1353 1239 revlogv1
1354 1240 sparserevlog
1355 1241 store
1356 1242
1357 1243 downgrade
1358 1244
1359 1245 $ hg debugupgraderepo --run --no-backup > /dev/null
1360 1246 $ hg debugformat -v
1361 1247 format-variant repo config default
1362 1248 fncache: yes yes yes
1363 1249 dotencode: yes yes yes
1364 1250 generaldelta: yes yes yes
1365 1251 sparserevlog: yes yes yes
1366 1252 sidedata: no no no
1367 1253 copies-sdc: no no no
1368 1254 plain-cl-delta: yes yes yes
1369 1255 compression: zlib zlib zlib
1370 1256 compression-level: default default default
1371 1257 $ cat .hg/requires
1372 1258 dotencode
1373 1259 fncache
1374 1260 generaldelta
1375 1261 revlogv1
1376 1262 sparserevlog
1377 1263 store
1378 1264
1379 1265 upgrade from hgrc
1380 1266
1381 1267 $ cat >> .hg/hgrc << EOF
1382 1268 > [format]
1383 1269 > revlog-compression=zstd
1384 1270 > EOF
1385 1271 $ hg debugupgraderepo --run --no-backup > /dev/null
1386 1272 $ hg debugformat -v
1387 1273 format-variant repo config default
1388 1274 fncache: yes yes yes
1389 1275 dotencode: yes yes yes
1390 1276 generaldelta: yes yes yes
1391 1277 sparserevlog: yes yes yes
1392 1278 sidedata: no no no
1393 1279 copies-sdc: no no no
1394 1280 plain-cl-delta: yes yes yes
1395 1281 compression: zstd zstd zlib
1396 1282 compression-level: default default default
1397 1283 $ cat .hg/requires
1398 1284 dotencode
1399 1285 fncache
1400 1286 generaldelta
1401 1287 revlog-compression-zstd
1402 1288 revlogv1
1403 1289 sparserevlog
1404 1290 store
1405 1291
1406 1292 #endif
1407 1293
1408 1294 Check upgrading to a side-data revlog
1409 1295 -------------------------------------
1410 1296
1411 1297 upgrade
1412 1298
1413 1299 $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" >/dev/null
1414 1300 $ hg debugformat -v
1415 1301 format-variant repo config default
1416 1302 fncache: yes yes yes
1417 1303 dotencode: yes yes yes
1418 1304 generaldelta: yes yes yes
1419 1305 sparserevlog: yes yes yes
1420 1306 sidedata: yes no no
1421 1307 copies-sdc: no no no
1422 1308 plain-cl-delta: yes yes yes
1423 1309 compression: zstd zstd zlib (zstd !)
1424 1310 compression: zlib zlib zlib (no-zstd !)
1425 1311 compression-level: default default default
1426 1312 $ cat .hg/requires
1427 1313 dotencode
1428 1314 exp-sidedata-flag
1429 1315 fncache
1430 1316 generaldelta
1431 1317 revlog-compression-zstd (zstd !)
1432 1318 revlogv1
1433 1319 sparserevlog
1434 1320 store
1435 1321 $ hg debugsidedata -c 0
1436 1322 2 sidedata entries
1437 1323 entry-0001 size 4
1438 1324 entry-0002 size 32
1439 1325
1440 1326 downgrade
1441 1327
1442 1328 $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup > /dev/null
1443 1329 $ hg debugformat -v
1444 1330 format-variant repo config default
1445 1331 fncache: yes yes yes
1446 1332 dotencode: yes yes yes
1447 1333 generaldelta: yes yes yes
1448 1334 sparserevlog: yes yes yes
1449 1335 sidedata: no no no
1450 1336 copies-sdc: no no no
1451 1337 plain-cl-delta: yes yes yes
1452 1338 compression: zstd zstd zlib (zstd !)
1453 1339 compression: zlib zlib zlib (no-zstd !)
1454 1340 compression-level: default default default
1455 1341 $ cat .hg/requires
1456 1342 dotencode
1457 1343 fncache
1458 1344 generaldelta
1459 1345 revlog-compression-zstd (zstd !)
1460 1346 revlogv1
1461 1347 sparserevlog
1462 1348 store
1463 1349 $ hg debugsidedata -c 0
1464 1350
1465 1351 upgrade from hgrc
1466 1352
1467 1353 $ cat >> .hg/hgrc << EOF
1468 1354 > [format]
1469 1355 > exp-use-side-data=yes
1470 1356 > EOF
1471 1357 $ hg debugupgraderepo --run --no-backup > /dev/null
1472 1358 $ hg debugformat -v
1473 1359 format-variant repo config default
1474 1360 fncache: yes yes yes
1475 1361 dotencode: yes yes yes
1476 1362 generaldelta: yes yes yes
1477 1363 sparserevlog: yes yes yes
1478 1364 sidedata: yes yes no
1479 1365 copies-sdc: no no no
1480 1366 plain-cl-delta: yes yes yes
1481 1367 compression: zstd zstd zlib (zstd !)
1482 1368 compression: zlib zlib zlib (no-zstd !)
1483 1369 compression-level: default default default
1484 1370 $ cat .hg/requires
1485 1371 dotencode
1486 1372 exp-sidedata-flag
1487 1373 fncache
1488 1374 generaldelta
1489 1375 revlog-compression-zstd (zstd !)
1490 1376 revlogv1
1491 1377 sparserevlog
1492 1378 store
1493 1379 $ hg debugsidedata -c 0
General Comments 0
You need to be logged in to leave comments. Login now