##// END OF EJS Templates
cleanup: say goodbye to manifestv2 format...
Augie Fackler -
r36391:0147a473 default
parent child Browse files
Show More
@@ -1,1311 +1,1308 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18 def loadconfigtable(ui, extname, configtable):
19 19 """update config item known to the ui with the extension ones"""
20 20 for section, items in sorted(configtable.items()):
21 21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 22 knownkeys = set(knownitems)
23 23 newkeys = set(items)
24 24 for key in sorted(knownkeys & newkeys):
25 25 msg = "extension '%s' overwrite config item '%s.%s'"
26 26 msg %= (extname, section, key)
27 27 ui.develwarn(msg, config='warn-config')
28 28
29 29 knownitems.update(items)
30 30
31 31 class configitem(object):
32 32 """represent a known config item
33 33
34 34 :section: the official config section where to find this item,
35 35 :name: the official name within the section,
36 36 :default: default value for this item,
37 37 :alias: optional list of tuples as alternatives,
38 38 :generic: this is a generic definition, match name using regular expression.
39 39 """
40 40
41 41 def __init__(self, section, name, default=None, alias=(),
42 42 generic=False, priority=0):
43 43 self.section = section
44 44 self.name = name
45 45 self.default = default
46 46 self.alias = list(alias)
47 47 self.generic = generic
48 48 self.priority = priority
49 49 self._re = None
50 50 if generic:
51 51 self._re = re.compile(self.name)
52 52
53 53 class itemregister(dict):
54 54 """A specialized dictionary that can handle wild-card selection"""
55 55
56 56 def __init__(self):
57 57 super(itemregister, self).__init__()
58 58 self._generics = set()
59 59
60 60 def update(self, other):
61 61 super(itemregister, self).update(other)
62 62 self._generics.update(other._generics)
63 63
64 64 def __setitem__(self, key, item):
65 65 super(itemregister, self).__setitem__(key, item)
66 66 if item.generic:
67 67 self._generics.add(item)
68 68
69 69 def get(self, key):
70 70 baseitem = super(itemregister, self).get(key)
71 71 if baseitem is not None and not baseitem.generic:
72 72 return baseitem
73 73
74 74 # search for a matching generic item
75 75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 76 for item in generics:
77 77 # we use 'match' instead of 'search' to make the matching simpler
78 78 # for people unfamiliar with regular expression. Having the match
79 79 # rooted to the start of the string will produce less surprising
80 80 # result for user writing simple regex for sub-attribute.
81 81 #
82 82 # For example using "color\..*" match produces an unsurprising
83 83 # result, while using search could suddenly match apparently
84 84 # unrelated configuration that happens to contains "color."
85 85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 86 # some match to avoid the need to prefix most pattern with "^".
87 87 # The "^" seems more error prone.
88 88 if item._re.match(key):
89 89 return item
90 90
91 91 return None
92 92
93 93 coreitems = {}
94 94
95 95 def _register(configtable, *args, **kwargs):
96 96 item = configitem(*args, **kwargs)
97 97 section = configtable.setdefault(item.section, itemregister())
98 98 if item.name in section:
99 99 msg = "duplicated config item registration for '%s.%s'"
100 100 raise error.ProgrammingError(msg % (item.section, item.name))
101 101 section[item.name] = item
102 102
103 103 # special value for case where the default is derived from other values
104 104 dynamicdefault = object()
105 105
106 106 # Registering actual config items
107 107
108 108 def getitemregister(configtable):
109 109 f = functools.partial(_register, configtable)
110 110 # export pseudo enum as configitem.*
111 111 f.dynamicdefault = dynamicdefault
112 112 return f
113 113
114 114 coreconfigitem = getitemregister(coreitems)
115 115
116 116 coreconfigitem('alias', '.*',
117 117 default=None,
118 118 generic=True,
119 119 )
120 120 coreconfigitem('annotate', 'nodates',
121 121 default=False,
122 122 )
123 123 coreconfigitem('annotate', 'showfunc',
124 124 default=False,
125 125 )
126 126 coreconfigitem('annotate', 'unified',
127 127 default=None,
128 128 )
129 129 coreconfigitem('annotate', 'git',
130 130 default=False,
131 131 )
132 132 coreconfigitem('annotate', 'ignorews',
133 133 default=False,
134 134 )
135 135 coreconfigitem('annotate', 'ignorewsamount',
136 136 default=False,
137 137 )
138 138 coreconfigitem('annotate', 'ignoreblanklines',
139 139 default=False,
140 140 )
141 141 coreconfigitem('annotate', 'ignorewseol',
142 142 default=False,
143 143 )
144 144 coreconfigitem('annotate', 'nobinary',
145 145 default=False,
146 146 )
147 147 coreconfigitem('annotate', 'noprefix',
148 148 default=False,
149 149 )
150 150 coreconfigitem('auth', 'cookiefile',
151 151 default=None,
152 152 )
153 153 # bookmarks.pushing: internal hack for discovery
154 154 coreconfigitem('bookmarks', 'pushing',
155 155 default=list,
156 156 )
157 157 # bundle.mainreporoot: internal hack for bundlerepo
158 158 coreconfigitem('bundle', 'mainreporoot',
159 159 default='',
160 160 )
161 161 # bundle.reorder: experimental config
162 162 coreconfigitem('bundle', 'reorder',
163 163 default='auto',
164 164 )
165 165 coreconfigitem('censor', 'policy',
166 166 default='abort',
167 167 )
168 168 coreconfigitem('chgserver', 'idletimeout',
169 169 default=3600,
170 170 )
171 171 coreconfigitem('chgserver', 'skiphash',
172 172 default=False,
173 173 )
174 174 coreconfigitem('cmdserver', 'log',
175 175 default=None,
176 176 )
177 177 coreconfigitem('color', '.*',
178 178 default=None,
179 179 generic=True,
180 180 )
181 181 coreconfigitem('color', 'mode',
182 182 default='auto',
183 183 )
184 184 coreconfigitem('color', 'pagermode',
185 185 default=dynamicdefault,
186 186 )
187 187 coreconfigitem('commands', 'show.aliasprefix',
188 188 default=list,
189 189 )
190 190 coreconfigitem('commands', 'status.relative',
191 191 default=False,
192 192 )
193 193 coreconfigitem('commands', 'status.skipstates',
194 194 default=[],
195 195 )
196 196 coreconfigitem('commands', 'status.verbose',
197 197 default=False,
198 198 )
199 199 coreconfigitem('commands', 'update.check',
200 200 default=None,
201 201 # Deprecated, remove after 4.4 release
202 202 alias=[('experimental', 'updatecheck')]
203 203 )
204 204 coreconfigitem('commands', 'update.requiredest',
205 205 default=False,
206 206 )
207 207 coreconfigitem('committemplate', '.*',
208 208 default=None,
209 209 generic=True,
210 210 )
211 211 coreconfigitem('convert', 'cvsps.cache',
212 212 default=True,
213 213 )
214 214 coreconfigitem('convert', 'cvsps.fuzz',
215 215 default=60,
216 216 )
217 217 coreconfigitem('convert', 'cvsps.logencoding',
218 218 default=None,
219 219 )
220 220 coreconfigitem('convert', 'cvsps.mergefrom',
221 221 default=None,
222 222 )
223 223 coreconfigitem('convert', 'cvsps.mergeto',
224 224 default=None,
225 225 )
226 226 coreconfigitem('convert', 'git.committeractions',
227 227 default=lambda: ['messagedifferent'],
228 228 )
229 229 coreconfigitem('convert', 'git.extrakeys',
230 230 default=list,
231 231 )
232 232 coreconfigitem('convert', 'git.findcopiesharder',
233 233 default=False,
234 234 )
235 235 coreconfigitem('convert', 'git.remoteprefix',
236 236 default='remote',
237 237 )
238 238 coreconfigitem('convert', 'git.renamelimit',
239 239 default=400,
240 240 )
241 241 coreconfigitem('convert', 'git.saverev',
242 242 default=True,
243 243 )
244 244 coreconfigitem('convert', 'git.similarity',
245 245 default=50,
246 246 )
247 247 coreconfigitem('convert', 'git.skipsubmodules',
248 248 default=False,
249 249 )
250 250 coreconfigitem('convert', 'hg.clonebranches',
251 251 default=False,
252 252 )
253 253 coreconfigitem('convert', 'hg.ignoreerrors',
254 254 default=False,
255 255 )
256 256 coreconfigitem('convert', 'hg.revs',
257 257 default=None,
258 258 )
259 259 coreconfigitem('convert', 'hg.saverev',
260 260 default=False,
261 261 )
262 262 coreconfigitem('convert', 'hg.sourcename',
263 263 default=None,
264 264 )
265 265 coreconfigitem('convert', 'hg.startrev',
266 266 default=None,
267 267 )
268 268 coreconfigitem('convert', 'hg.tagsbranch',
269 269 default='default',
270 270 )
271 271 coreconfigitem('convert', 'hg.usebranchnames',
272 272 default=True,
273 273 )
274 274 coreconfigitem('convert', 'ignoreancestorcheck',
275 275 default=False,
276 276 )
277 277 coreconfigitem('convert', 'localtimezone',
278 278 default=False,
279 279 )
280 280 coreconfigitem('convert', 'p4.encoding',
281 281 default=dynamicdefault,
282 282 )
283 283 coreconfigitem('convert', 'p4.startrev',
284 284 default=0,
285 285 )
286 286 coreconfigitem('convert', 'skiptags',
287 287 default=False,
288 288 )
289 289 coreconfigitem('convert', 'svn.debugsvnlog',
290 290 default=True,
291 291 )
292 292 coreconfigitem('convert', 'svn.trunk',
293 293 default=None,
294 294 )
295 295 coreconfigitem('convert', 'svn.tags',
296 296 default=None,
297 297 )
298 298 coreconfigitem('convert', 'svn.branches',
299 299 default=None,
300 300 )
301 301 coreconfigitem('convert', 'svn.startrev',
302 302 default=0,
303 303 )
304 304 coreconfigitem('debug', 'dirstate.delaywrite',
305 305 default=0,
306 306 )
307 307 coreconfigitem('defaults', '.*',
308 308 default=None,
309 309 generic=True,
310 310 )
311 311 coreconfigitem('devel', 'all-warnings',
312 312 default=False,
313 313 )
314 314 coreconfigitem('devel', 'bundle2.debug',
315 315 default=False,
316 316 )
317 317 coreconfigitem('devel', 'cache-vfs',
318 318 default=None,
319 319 )
320 320 coreconfigitem('devel', 'check-locks',
321 321 default=False,
322 322 )
323 323 coreconfigitem('devel', 'check-relroot',
324 324 default=False,
325 325 )
326 326 coreconfigitem('devel', 'default-date',
327 327 default=None,
328 328 )
329 329 coreconfigitem('devel', 'deprec-warn',
330 330 default=False,
331 331 )
332 332 coreconfigitem('devel', 'disableloaddefaultcerts',
333 333 default=False,
334 334 )
335 335 coreconfigitem('devel', 'warn-empty-changegroup',
336 336 default=False,
337 337 )
338 338 coreconfigitem('devel', 'legacy.exchange',
339 339 default=list,
340 340 )
341 341 coreconfigitem('devel', 'servercafile',
342 342 default='',
343 343 )
344 344 coreconfigitem('devel', 'serverexactprotocol',
345 345 default='',
346 346 )
347 347 coreconfigitem('devel', 'serverrequirecert',
348 348 default=False,
349 349 )
350 350 coreconfigitem('devel', 'strip-obsmarkers',
351 351 default=True,
352 352 )
353 353 coreconfigitem('devel', 'warn-config',
354 354 default=None,
355 355 )
356 356 coreconfigitem('devel', 'warn-config-default',
357 357 default=None,
358 358 )
359 359 coreconfigitem('devel', 'user.obsmarker',
360 360 default=None,
361 361 )
362 362 coreconfigitem('devel', 'warn-config-unknown',
363 363 default=None,
364 364 )
365 365 coreconfigitem('devel', 'debug.peer-request',
366 366 default=False,
367 367 )
368 368 coreconfigitem('diff', 'nodates',
369 369 default=False,
370 370 )
371 371 coreconfigitem('diff', 'showfunc',
372 372 default=False,
373 373 )
374 374 coreconfigitem('diff', 'unified',
375 375 default=None,
376 376 )
377 377 coreconfigitem('diff', 'git',
378 378 default=False,
379 379 )
380 380 coreconfigitem('diff', 'ignorews',
381 381 default=False,
382 382 )
383 383 coreconfigitem('diff', 'ignorewsamount',
384 384 default=False,
385 385 )
386 386 coreconfigitem('diff', 'ignoreblanklines',
387 387 default=False,
388 388 )
389 389 coreconfigitem('diff', 'ignorewseol',
390 390 default=False,
391 391 )
392 392 coreconfigitem('diff', 'nobinary',
393 393 default=False,
394 394 )
395 395 coreconfigitem('diff', 'noprefix',
396 396 default=False,
397 397 )
398 398 coreconfigitem('email', 'bcc',
399 399 default=None,
400 400 )
401 401 coreconfigitem('email', 'cc',
402 402 default=None,
403 403 )
404 404 coreconfigitem('email', 'charsets',
405 405 default=list,
406 406 )
407 407 coreconfigitem('email', 'from',
408 408 default=None,
409 409 )
410 410 coreconfigitem('email', 'method',
411 411 default='smtp',
412 412 )
413 413 coreconfigitem('email', 'reply-to',
414 414 default=None,
415 415 )
416 416 coreconfigitem('email', 'to',
417 417 default=None,
418 418 )
419 419 coreconfigitem('experimental', 'archivemetatemplate',
420 420 default=dynamicdefault,
421 421 )
422 422 coreconfigitem('experimental', 'bundle-phases',
423 423 default=False,
424 424 )
425 425 coreconfigitem('experimental', 'bundle2-advertise',
426 426 default=True,
427 427 )
428 428 coreconfigitem('experimental', 'bundle2-output-capture',
429 429 default=False,
430 430 )
431 431 coreconfigitem('experimental', 'bundle2.pushback',
432 432 default=False,
433 433 )
434 434 coreconfigitem('experimental', 'bundle2.stream',
435 435 default=False,
436 436 )
437 437 coreconfigitem('experimental', 'bundle2lazylocking',
438 438 default=False,
439 439 )
440 440 coreconfigitem('experimental', 'bundlecomplevel',
441 441 default=None,
442 442 )
443 443 coreconfigitem('experimental', 'changegroup3',
444 444 default=False,
445 445 )
446 446 coreconfigitem('experimental', 'clientcompressionengines',
447 447 default=list,
448 448 )
449 449 coreconfigitem('experimental', 'copytrace',
450 450 default='on',
451 451 )
452 452 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
453 453 default=100,
454 454 )
455 455 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
456 456 default=100,
457 457 )
458 458 coreconfigitem('experimental', 'crecordtest',
459 459 default=None,
460 460 )
461 461 coreconfigitem('experimental', 'directaccess',
462 462 default=False,
463 463 )
464 464 coreconfigitem('experimental', 'directaccess.revnums',
465 465 default=False,
466 466 )
467 467 coreconfigitem('experimental', 'editortmpinhg',
468 468 default=False,
469 469 )
470 470 coreconfigitem('experimental', 'evolution',
471 471 default=list,
472 472 )
473 473 coreconfigitem('experimental', 'evolution.allowdivergence',
474 474 default=False,
475 475 alias=[('experimental', 'allowdivergence')]
476 476 )
477 477 coreconfigitem('experimental', 'evolution.allowunstable',
478 478 default=None,
479 479 )
480 480 coreconfigitem('experimental', 'evolution.createmarkers',
481 481 default=None,
482 482 )
483 483 coreconfigitem('experimental', 'evolution.effect-flags',
484 484 default=True,
485 485 alias=[('experimental', 'effect-flags')]
486 486 )
487 487 coreconfigitem('experimental', 'evolution.exchange',
488 488 default=None,
489 489 )
490 490 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
491 491 default=False,
492 492 )
493 493 coreconfigitem('experimental', 'evolution.report-instabilities',
494 494 default=True,
495 495 )
496 496 coreconfigitem('experimental', 'evolution.track-operation',
497 497 default=True,
498 498 )
499 499 coreconfigitem('experimental', 'worddiff',
500 500 default=False,
501 501 )
502 502 coreconfigitem('experimental', 'maxdeltachainspan',
503 503 default=-1,
504 504 )
505 505 coreconfigitem('experimental', 'mmapindexthreshold',
506 506 default=None,
507 507 )
508 508 coreconfigitem('experimental', 'nonnormalparanoidcheck',
509 509 default=False,
510 510 )
511 511 coreconfigitem('experimental', 'exportableenviron',
512 512 default=list,
513 513 )
514 514 coreconfigitem('experimental', 'extendedheader.index',
515 515 default=None,
516 516 )
517 517 coreconfigitem('experimental', 'extendedheader.similarity',
518 518 default=False,
519 519 )
520 520 coreconfigitem('experimental', 'format.compression',
521 521 default='zlib',
522 522 )
523 523 coreconfigitem('experimental', 'graphshorten',
524 524 default=False,
525 525 )
526 526 coreconfigitem('experimental', 'graphstyle.parent',
527 527 default=dynamicdefault,
528 528 )
529 529 coreconfigitem('experimental', 'graphstyle.missing',
530 530 default=dynamicdefault,
531 531 )
532 532 coreconfigitem('experimental', 'graphstyle.grandparent',
533 533 default=dynamicdefault,
534 534 )
535 535 coreconfigitem('experimental', 'hook-track-tags',
536 536 default=False,
537 537 )
538 538 coreconfigitem('experimental', 'httppostargs',
539 539 default=False,
540 540 )
541 coreconfigitem('experimental', 'manifestv2',
542 default=False,
543 )
544 541 coreconfigitem('experimental', 'mergedriver',
545 542 default=None,
546 543 )
547 544 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
548 545 default=False,
549 546 )
550 547 coreconfigitem('experimental', 'remotenames',
551 548 default=False,
552 549 )
553 550 coreconfigitem('experimental', 'revlogv2',
554 551 default=None,
555 552 )
556 553 coreconfigitem('experimental', 'single-head-per-branch',
557 554 default=False,
558 555 )
559 556 coreconfigitem('experimental', 'sshserver.support-v2',
560 557 default=False,
561 558 )
562 559 coreconfigitem('experimental', 'spacemovesdown',
563 560 default=False,
564 561 )
565 562 coreconfigitem('experimental', 'sparse-read',
566 563 default=False,
567 564 )
568 565 coreconfigitem('experimental', 'sparse-read.density-threshold',
569 566 default=0.25,
570 567 )
571 568 coreconfigitem('experimental', 'sparse-read.min-gap-size',
572 569 default='256K',
573 570 )
574 571 coreconfigitem('experimental', 'treemanifest',
575 572 default=False,
576 573 )
577 574 coreconfigitem('experimental', 'update.atomic-file',
578 575 default=False,
579 576 )
580 577 coreconfigitem('experimental', 'sshpeer.advertise-v2',
581 578 default=False,
582 579 )
583 580 coreconfigitem('extensions', '.*',
584 581 default=None,
585 582 generic=True,
586 583 )
587 584 coreconfigitem('extdata', '.*',
588 585 default=None,
589 586 generic=True,
590 587 )
591 588 coreconfigitem('format', 'aggressivemergedeltas',
592 589 default=False,
593 590 )
594 591 coreconfigitem('format', 'chunkcachesize',
595 592 default=None,
596 593 )
597 594 coreconfigitem('format', 'dotencode',
598 595 default=True,
599 596 )
600 597 coreconfigitem('format', 'generaldelta',
601 598 default=False,
602 599 )
603 600 coreconfigitem('format', 'manifestcachesize',
604 601 default=None,
605 602 )
606 603 coreconfigitem('format', 'maxchainlen',
607 604 default=None,
608 605 )
609 606 coreconfigitem('format', 'obsstore-version',
610 607 default=None,
611 608 )
612 609 coreconfigitem('format', 'usefncache',
613 610 default=True,
614 611 )
615 612 coreconfigitem('format', 'usegeneraldelta',
616 613 default=True,
617 614 )
618 615 coreconfigitem('format', 'usestore',
619 616 default=True,
620 617 )
621 618 coreconfigitem('fsmonitor', 'warn_when_unused',
622 619 default=True,
623 620 )
624 621 coreconfigitem('fsmonitor', 'warn_update_file_count',
625 622 default=50000,
626 623 )
627 624 coreconfigitem('hooks', '.*',
628 625 default=dynamicdefault,
629 626 generic=True,
630 627 )
631 628 coreconfigitem('hgweb-paths', '.*',
632 629 default=list,
633 630 generic=True,
634 631 )
635 632 coreconfigitem('hostfingerprints', '.*',
636 633 default=list,
637 634 generic=True,
638 635 )
639 636 coreconfigitem('hostsecurity', 'ciphers',
640 637 default=None,
641 638 )
642 639 coreconfigitem('hostsecurity', 'disabletls10warning',
643 640 default=False,
644 641 )
645 642 coreconfigitem('hostsecurity', 'minimumprotocol',
646 643 default=dynamicdefault,
647 644 )
648 645 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
649 646 default=dynamicdefault,
650 647 generic=True,
651 648 )
652 649 coreconfigitem('hostsecurity', '.*:ciphers$',
653 650 default=dynamicdefault,
654 651 generic=True,
655 652 )
656 653 coreconfigitem('hostsecurity', '.*:fingerprints$',
657 654 default=list,
658 655 generic=True,
659 656 )
660 657 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
661 658 default=None,
662 659 generic=True,
663 660 )
664 661
665 662 coreconfigitem('http_proxy', 'always',
666 663 default=False,
667 664 )
668 665 coreconfigitem('http_proxy', 'host',
669 666 default=None,
670 667 )
671 668 coreconfigitem('http_proxy', 'no',
672 669 default=list,
673 670 )
674 671 coreconfigitem('http_proxy', 'passwd',
675 672 default=None,
676 673 )
677 674 coreconfigitem('http_proxy', 'user',
678 675 default=None,
679 676 )
680 677 coreconfigitem('logtoprocess', 'commandexception',
681 678 default=None,
682 679 )
683 680 coreconfigitem('logtoprocess', 'commandfinish',
684 681 default=None,
685 682 )
686 683 coreconfigitem('logtoprocess', 'command',
687 684 default=None,
688 685 )
689 686 coreconfigitem('logtoprocess', 'develwarn',
690 687 default=None,
691 688 )
692 689 coreconfigitem('logtoprocess', 'uiblocked',
693 690 default=None,
694 691 )
695 692 coreconfigitem('merge', 'checkunknown',
696 693 default='abort',
697 694 )
698 695 coreconfigitem('merge', 'checkignored',
699 696 default='abort',
700 697 )
701 698 coreconfigitem('experimental', 'merge.checkpathconflicts',
702 699 default=False,
703 700 )
704 701 coreconfigitem('merge', 'followcopies',
705 702 default=True,
706 703 )
707 704 coreconfigitem('merge', 'on-failure',
708 705 default='continue',
709 706 )
710 707 coreconfigitem('merge', 'preferancestor',
711 708 default=lambda: ['*'],
712 709 )
713 710 coreconfigitem('merge-tools', '.*',
714 711 default=None,
715 712 generic=True,
716 713 )
717 714 coreconfigitem('merge-tools', br'.*\.args$',
718 715 default="$local $base $other",
719 716 generic=True,
720 717 priority=-1,
721 718 )
722 719 coreconfigitem('merge-tools', br'.*\.binary$',
723 720 default=False,
724 721 generic=True,
725 722 priority=-1,
726 723 )
727 724 coreconfigitem('merge-tools', br'.*\.check$',
728 725 default=list,
729 726 generic=True,
730 727 priority=-1,
731 728 )
732 729 coreconfigitem('merge-tools', br'.*\.checkchanged$',
733 730 default=False,
734 731 generic=True,
735 732 priority=-1,
736 733 )
737 734 coreconfigitem('merge-tools', br'.*\.executable$',
738 735 default=dynamicdefault,
739 736 generic=True,
740 737 priority=-1,
741 738 )
742 739 coreconfigitem('merge-tools', br'.*\.fixeol$',
743 740 default=False,
744 741 generic=True,
745 742 priority=-1,
746 743 )
747 744 coreconfigitem('merge-tools', br'.*\.gui$',
748 745 default=False,
749 746 generic=True,
750 747 priority=-1,
751 748 )
752 749 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
753 750 default='basic',
754 751 generic=True,
755 752 priority=-1,
756 753 )
757 754 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
758 755 default=dynamicdefault, # take from ui.mergemarkertemplate
759 756 generic=True,
760 757 priority=-1,
761 758 )
762 759 coreconfigitem('merge-tools', br'.*\.priority$',
763 760 default=0,
764 761 generic=True,
765 762 priority=-1,
766 763 )
767 764 coreconfigitem('merge-tools', br'.*\.premerge$',
768 765 default=dynamicdefault,
769 766 generic=True,
770 767 priority=-1,
771 768 )
772 769 coreconfigitem('merge-tools', br'.*\.symlink$',
773 770 default=False,
774 771 generic=True,
775 772 priority=-1,
776 773 )
777 774 coreconfigitem('pager', 'attend-.*',
778 775 default=dynamicdefault,
779 776 generic=True,
780 777 )
781 778 coreconfigitem('pager', 'ignore',
782 779 default=list,
783 780 )
784 781 coreconfigitem('pager', 'pager',
785 782 default=dynamicdefault,
786 783 )
787 784 coreconfigitem('patch', 'eol',
788 785 default='strict',
789 786 )
790 787 coreconfigitem('patch', 'fuzz',
791 788 default=2,
792 789 )
793 790 coreconfigitem('paths', 'default',
794 791 default=None,
795 792 )
796 793 coreconfigitem('paths', 'default-push',
797 794 default=None,
798 795 )
799 796 coreconfigitem('paths', '.*',
800 797 default=None,
801 798 generic=True,
802 799 )
803 800 coreconfigitem('phases', 'checksubrepos',
804 801 default='follow',
805 802 )
806 803 coreconfigitem('phases', 'new-commit',
807 804 default='draft',
808 805 )
809 806 coreconfigitem('phases', 'publish',
810 807 default=True,
811 808 )
812 809 coreconfigitem('profiling', 'enabled',
813 810 default=False,
814 811 )
815 812 coreconfigitem('profiling', 'format',
816 813 default='text',
817 814 )
818 815 coreconfigitem('profiling', 'freq',
819 816 default=1000,
820 817 )
821 818 coreconfigitem('profiling', 'limit',
822 819 default=30,
823 820 )
824 821 coreconfigitem('profiling', 'nested',
825 822 default=0,
826 823 )
827 824 coreconfigitem('profiling', 'output',
828 825 default=None,
829 826 )
830 827 coreconfigitem('profiling', 'showmax',
831 828 default=0.999,
832 829 )
833 830 coreconfigitem('profiling', 'showmin',
834 831 default=dynamicdefault,
835 832 )
836 833 coreconfigitem('profiling', 'sort',
837 834 default='inlinetime',
838 835 )
839 836 coreconfigitem('profiling', 'statformat',
840 837 default='hotpath',
841 838 )
842 839 coreconfigitem('profiling', 'type',
843 840 default='stat',
844 841 )
845 842 coreconfigitem('progress', 'assume-tty',
846 843 default=False,
847 844 )
848 845 coreconfigitem('progress', 'changedelay',
849 846 default=1,
850 847 )
851 848 coreconfigitem('progress', 'clear-complete',
852 849 default=True,
853 850 )
854 851 coreconfigitem('progress', 'debug',
855 852 default=False,
856 853 )
857 854 coreconfigitem('progress', 'delay',
858 855 default=3,
859 856 )
860 857 coreconfigitem('progress', 'disable',
861 858 default=False,
862 859 )
863 860 coreconfigitem('progress', 'estimateinterval',
864 861 default=60.0,
865 862 )
866 863 coreconfigitem('progress', 'format',
867 864 default=lambda: ['topic', 'bar', 'number', 'estimate'],
868 865 )
869 866 coreconfigitem('progress', 'refresh',
870 867 default=0.1,
871 868 )
872 869 coreconfigitem('progress', 'width',
873 870 default=dynamicdefault,
874 871 )
875 872 coreconfigitem('push', 'pushvars.server',
876 873 default=False,
877 874 )
878 875 coreconfigitem('server', 'bookmarks-pushkey-compat',
879 876 default=True,
880 877 )
881 878 coreconfigitem('server', 'bundle1',
882 879 default=True,
883 880 )
884 881 coreconfigitem('server', 'bundle1gd',
885 882 default=None,
886 883 )
887 884 coreconfigitem('server', 'bundle1.pull',
888 885 default=None,
889 886 )
890 887 coreconfigitem('server', 'bundle1gd.pull',
891 888 default=None,
892 889 )
893 890 coreconfigitem('server', 'bundle1.push',
894 891 default=None,
895 892 )
896 893 coreconfigitem('server', 'bundle1gd.push',
897 894 default=None,
898 895 )
899 896 coreconfigitem('server', 'compressionengines',
900 897 default=list,
901 898 )
902 899 coreconfigitem('server', 'concurrent-push-mode',
903 900 default='strict',
904 901 )
905 902 coreconfigitem('server', 'disablefullbundle',
906 903 default=False,
907 904 )
908 905 coreconfigitem('server', 'maxhttpheaderlen',
909 906 default=1024,
910 907 )
911 908 coreconfigitem('server', 'preferuncompressed',
912 909 default=False,
913 910 )
914 911 coreconfigitem('server', 'uncompressed',
915 912 default=True,
916 913 )
917 914 coreconfigitem('server', 'uncompressedallowsecret',
918 915 default=False,
919 916 )
920 917 coreconfigitem('server', 'validate',
921 918 default=False,
922 919 )
923 920 coreconfigitem('server', 'zliblevel',
924 921 default=-1,
925 922 )
926 923 coreconfigitem('share', 'pool',
927 924 default=None,
928 925 )
929 926 coreconfigitem('share', 'poolnaming',
930 927 default='identity',
931 928 )
932 929 coreconfigitem('smtp', 'host',
933 930 default=None,
934 931 )
935 932 coreconfigitem('smtp', 'local_hostname',
936 933 default=None,
937 934 )
938 935 coreconfigitem('smtp', 'password',
939 936 default=None,
940 937 )
941 938 coreconfigitem('smtp', 'port',
942 939 default=dynamicdefault,
943 940 )
944 941 coreconfigitem('smtp', 'tls',
945 942 default='none',
946 943 )
947 944 coreconfigitem('smtp', 'username',
948 945 default=None,
949 946 )
950 947 coreconfigitem('sparse', 'missingwarning',
951 948 default=True,
952 949 )
953 950 coreconfigitem('subrepos', 'allowed',
954 951 default=dynamicdefault, # to make backporting simpler
955 952 )
956 953 coreconfigitem('subrepos', 'hg:allowed',
957 954 default=dynamicdefault,
958 955 )
959 956 coreconfigitem('subrepos', 'git:allowed',
960 957 default=dynamicdefault,
961 958 )
962 959 coreconfigitem('subrepos', 'svn:allowed',
963 960 default=dynamicdefault,
964 961 )
965 962 coreconfigitem('templates', '.*',
966 963 default=None,
967 964 generic=True,
968 965 )
969 966 coreconfigitem('trusted', 'groups',
970 967 default=list,
971 968 )
972 969 coreconfigitem('trusted', 'users',
973 970 default=list,
974 971 )
975 972 coreconfigitem('ui', '_usedassubrepo',
976 973 default=False,
977 974 )
978 975 coreconfigitem('ui', 'allowemptycommit',
979 976 default=False,
980 977 )
981 978 coreconfigitem('ui', 'archivemeta',
982 979 default=True,
983 980 )
984 981 coreconfigitem('ui', 'askusername',
985 982 default=False,
986 983 )
987 984 coreconfigitem('ui', 'clonebundlefallback',
988 985 default=False,
989 986 )
990 987 coreconfigitem('ui', 'clonebundleprefers',
991 988 default=list,
992 989 )
993 990 coreconfigitem('ui', 'clonebundles',
994 991 default=True,
995 992 )
996 993 coreconfigitem('ui', 'color',
997 994 default='auto',
998 995 )
999 996 coreconfigitem('ui', 'commitsubrepos',
1000 997 default=False,
1001 998 )
1002 999 coreconfigitem('ui', 'debug',
1003 1000 default=False,
1004 1001 )
1005 1002 coreconfigitem('ui', 'debugger',
1006 1003 default=None,
1007 1004 )
1008 1005 coreconfigitem('ui', 'editor',
1009 1006 default=dynamicdefault,
1010 1007 )
1011 1008 coreconfigitem('ui', 'fallbackencoding',
1012 1009 default=None,
1013 1010 )
1014 1011 coreconfigitem('ui', 'forcecwd',
1015 1012 default=None,
1016 1013 )
1017 1014 coreconfigitem('ui', 'forcemerge',
1018 1015 default=None,
1019 1016 )
1020 1017 coreconfigitem('ui', 'formatdebug',
1021 1018 default=False,
1022 1019 )
1023 1020 coreconfigitem('ui', 'formatjson',
1024 1021 default=False,
1025 1022 )
1026 1023 coreconfigitem('ui', 'formatted',
1027 1024 default=None,
1028 1025 )
1029 1026 coreconfigitem('ui', 'graphnodetemplate',
1030 1027 default=None,
1031 1028 )
1032 1029 coreconfigitem('ui', 'http2debuglevel',
1033 1030 default=None,
1034 1031 )
1035 1032 coreconfigitem('ui', 'interactive',
1036 1033 default=None,
1037 1034 )
1038 1035 coreconfigitem('ui', 'interface',
1039 1036 default=None,
1040 1037 )
1041 1038 coreconfigitem('ui', 'interface.chunkselector',
1042 1039 default=None,
1043 1040 )
1044 1041 coreconfigitem('ui', 'logblockedtimes',
1045 1042 default=False,
1046 1043 )
1047 1044 coreconfigitem('ui', 'logtemplate',
1048 1045 default=None,
1049 1046 )
1050 1047 coreconfigitem('ui', 'merge',
1051 1048 default=None,
1052 1049 )
1053 1050 coreconfigitem('ui', 'mergemarkers',
1054 1051 default='basic',
1055 1052 )
1056 1053 coreconfigitem('ui', 'mergemarkertemplate',
1057 1054 default=('{node|short} '
1058 1055 '{ifeq(tags, "tip", "", '
1059 1056 'ifeq(tags, "", "", "{tags} "))}'
1060 1057 '{if(bookmarks, "{bookmarks} ")}'
1061 1058 '{ifeq(branch, "default", "", "{branch} ")}'
1062 1059 '- {author|user}: {desc|firstline}')
1063 1060 )
1064 1061 coreconfigitem('ui', 'nontty',
1065 1062 default=False,
1066 1063 )
1067 1064 coreconfigitem('ui', 'origbackuppath',
1068 1065 default=None,
1069 1066 )
1070 1067 coreconfigitem('ui', 'paginate',
1071 1068 default=True,
1072 1069 )
1073 1070 coreconfigitem('ui', 'patch',
1074 1071 default=None,
1075 1072 )
1076 1073 coreconfigitem('ui', 'portablefilenames',
1077 1074 default='warn',
1078 1075 )
1079 1076 coreconfigitem('ui', 'promptecho',
1080 1077 default=False,
1081 1078 )
1082 1079 coreconfigitem('ui', 'quiet',
1083 1080 default=False,
1084 1081 )
1085 1082 coreconfigitem('ui', 'quietbookmarkmove',
1086 1083 default=False,
1087 1084 )
1088 1085 coreconfigitem('ui', 'remotecmd',
1089 1086 default='hg',
1090 1087 )
1091 1088 coreconfigitem('ui', 'report_untrusted',
1092 1089 default=True,
1093 1090 )
1094 1091 coreconfigitem('ui', 'rollback',
1095 1092 default=True,
1096 1093 )
1097 1094 coreconfigitem('ui', 'slash',
1098 1095 default=False,
1099 1096 )
1100 1097 coreconfigitem('ui', 'ssh',
1101 1098 default='ssh',
1102 1099 )
1103 1100 coreconfigitem('ui', 'ssherrorhint',
1104 1101 default=None,
1105 1102 )
1106 1103 coreconfigitem('ui', 'statuscopies',
1107 1104 default=False,
1108 1105 )
1109 1106 coreconfigitem('ui', 'strict',
1110 1107 default=False,
1111 1108 )
1112 1109 coreconfigitem('ui', 'style',
1113 1110 default='',
1114 1111 )
1115 1112 coreconfigitem('ui', 'supportcontact',
1116 1113 default=None,
1117 1114 )
1118 1115 coreconfigitem('ui', 'textwidth',
1119 1116 default=78,
1120 1117 )
1121 1118 coreconfigitem('ui', 'timeout',
1122 1119 default='600',
1123 1120 )
1124 1121 coreconfigitem('ui', 'timeout.warn',
1125 1122 default=0,
1126 1123 )
1127 1124 coreconfigitem('ui', 'traceback',
1128 1125 default=False,
1129 1126 )
1130 1127 coreconfigitem('ui', 'tweakdefaults',
1131 1128 default=False,
1132 1129 )
1133 1130 coreconfigitem('ui', 'usehttp2',
1134 1131 default=False,
1135 1132 )
1136 1133 coreconfigitem('ui', 'username',
1137 1134 alias=[('ui', 'user')]
1138 1135 )
1139 1136 coreconfigitem('ui', 'verbose',
1140 1137 default=False,
1141 1138 )
1142 1139 coreconfigitem('verify', 'skipflags',
1143 1140 default=None,
1144 1141 )
1145 1142 coreconfigitem('web', 'allowbz2',
1146 1143 default=False,
1147 1144 )
1148 1145 coreconfigitem('web', 'allowgz',
1149 1146 default=False,
1150 1147 )
1151 1148 coreconfigitem('web', 'allow-pull',
1152 1149 alias=[('web', 'allowpull')],
1153 1150 default=True,
1154 1151 )
1155 1152 coreconfigitem('web', 'allow-push',
1156 1153 alias=[('web', 'allow_push')],
1157 1154 default=list,
1158 1155 )
1159 1156 coreconfigitem('web', 'allowzip',
1160 1157 default=False,
1161 1158 )
1162 1159 coreconfigitem('web', 'archivesubrepos',
1163 1160 default=False,
1164 1161 )
1165 1162 coreconfigitem('web', 'cache',
1166 1163 default=True,
1167 1164 )
1168 1165 coreconfigitem('web', 'contact',
1169 1166 default=None,
1170 1167 )
1171 1168 coreconfigitem('web', 'deny_push',
1172 1169 default=list,
1173 1170 )
1174 1171 coreconfigitem('web', 'guessmime',
1175 1172 default=False,
1176 1173 )
1177 1174 coreconfigitem('web', 'hidden',
1178 1175 default=False,
1179 1176 )
1180 1177 coreconfigitem('web', 'labels',
1181 1178 default=list,
1182 1179 )
1183 1180 coreconfigitem('web', 'logoimg',
1184 1181 default='hglogo.png',
1185 1182 )
1186 1183 coreconfigitem('web', 'logourl',
1187 1184 default='https://mercurial-scm.org/',
1188 1185 )
1189 1186 coreconfigitem('web', 'accesslog',
1190 1187 default='-',
1191 1188 )
1192 1189 coreconfigitem('web', 'address',
1193 1190 default='',
1194 1191 )
1195 1192 coreconfigitem('web', 'allow_archive',
1196 1193 default=list,
1197 1194 )
1198 1195 coreconfigitem('web', 'allow_read',
1199 1196 default=list,
1200 1197 )
1201 1198 coreconfigitem('web', 'baseurl',
1202 1199 default=None,
1203 1200 )
1204 1201 coreconfigitem('web', 'cacerts',
1205 1202 default=None,
1206 1203 )
1207 1204 coreconfigitem('web', 'certificate',
1208 1205 default=None,
1209 1206 )
1210 1207 coreconfigitem('web', 'collapse',
1211 1208 default=False,
1212 1209 )
1213 1210 coreconfigitem('web', 'csp',
1214 1211 default=None,
1215 1212 )
1216 1213 coreconfigitem('web', 'deny_read',
1217 1214 default=list,
1218 1215 )
1219 1216 coreconfigitem('web', 'descend',
1220 1217 default=True,
1221 1218 )
1222 1219 coreconfigitem('web', 'description',
1223 1220 default="",
1224 1221 )
1225 1222 coreconfigitem('web', 'encoding',
1226 1223 default=lambda: encoding.encoding,
1227 1224 )
1228 1225 coreconfigitem('web', 'errorlog',
1229 1226 default='-',
1230 1227 )
1231 1228 coreconfigitem('web', 'ipv6',
1232 1229 default=False,
1233 1230 )
1234 1231 coreconfigitem('web', 'maxchanges',
1235 1232 default=10,
1236 1233 )
1237 1234 coreconfigitem('web', 'maxfiles',
1238 1235 default=10,
1239 1236 )
1240 1237 coreconfigitem('web', 'maxshortchanges',
1241 1238 default=60,
1242 1239 )
1243 1240 coreconfigitem('web', 'motd',
1244 1241 default='',
1245 1242 )
1246 1243 coreconfigitem('web', 'name',
1247 1244 default=dynamicdefault,
1248 1245 )
1249 1246 coreconfigitem('web', 'port',
1250 1247 default=8000,
1251 1248 )
1252 1249 coreconfigitem('web', 'prefix',
1253 1250 default='',
1254 1251 )
1255 1252 coreconfigitem('web', 'push_ssl',
1256 1253 default=True,
1257 1254 )
1258 1255 coreconfigitem('web', 'refreshinterval',
1259 1256 default=20,
1260 1257 )
1261 1258 coreconfigitem('web', 'staticurl',
1262 1259 default=None,
1263 1260 )
1264 1261 coreconfigitem('web', 'stripes',
1265 1262 default=1,
1266 1263 )
1267 1264 coreconfigitem('web', 'style',
1268 1265 default='paper',
1269 1266 )
1270 1267 coreconfigitem('web', 'templates',
1271 1268 default=None,
1272 1269 )
1273 1270 coreconfigitem('web', 'view',
1274 1271 default='served',
1275 1272 )
1276 1273 coreconfigitem('worker', 'backgroundclose',
1277 1274 default=dynamicdefault,
1278 1275 )
1279 1276 # Windows defaults to a limit of 512 open files. A buffer of 128
1280 1277 # should give us enough headway.
1281 1278 coreconfigitem('worker', 'backgroundclosemaxqueue',
1282 1279 default=384,
1283 1280 )
1284 1281 coreconfigitem('worker', 'backgroundcloseminfilecount',
1285 1282 default=2048,
1286 1283 )
1287 1284 coreconfigitem('worker', 'backgroundclosethreadcount',
1288 1285 default=4,
1289 1286 )
1290 1287 coreconfigitem('worker', 'enabled',
1291 1288 default=True,
1292 1289 )
1293 1290 coreconfigitem('worker', 'numcpus',
1294 1291 default=None,
1295 1292 )
1296 1293
1297 1294 # Rebase related configuration moved to core because other extension are doing
1298 1295 # strange things. For example, shelve import the extensions to reuse some bit
1299 1296 # without formally loading it.
1300 1297 coreconfigitem('commands', 'rebase.requiredest',
1301 1298 default=False,
1302 1299 )
1303 1300 coreconfigitem('experimental', 'rebaseskipobsolete',
1304 1301 default=True,
1305 1302 )
1306 1303 coreconfigitem('rebase', 'singletransaction',
1307 1304 default=False,
1308 1305 )
1309 1306 coreconfigitem('rebase', 'experimental.inmemory',
1310 1307 default=False,
1311 1308 )
@@ -1,130 +1,131 b''
1
2 1 Repositories contain a file (``.hg/requires``) containing a list of
3 2 features/capabilities that are *required* for clients to interface
4 3 with the repository. This file has been present in Mercurial since
5 4 version 0.9.2 (released December 2006).
6 5
7 6 One of the first things clients do when opening a repository is read
8 7 ``.hg/requires`` and verify that all listed requirements are supported,
9 8 aborting if not. Requirements are therefore a strong mechanism to
10 9 prevent incompatible clients from reading from unknown repository
11 10 formats or even corrupting them by writing to them.
12 11
13 12 Extensions may add requirements. When they do this, clients not running
14 13 an extension will be unable to read from repositories.
15 14
16 15 The following sections describe the requirements defined by the
17 16 Mercurial core distribution.
18 17
19 18 revlogv1
20 19 ========
21 20
22 21 When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced
23 22 in 2006. The ``revlogv1`` requirement has been enabled by default
24 23 since the ``requires`` file was introduced in Mercurial 0.9.2.
25 24
26 25 If this requirement is not present, version 0 revlogs are assumed.
27 26
28 27 store
29 28 =====
30 29
31 30 The *store* repository layout should be used.
32 31
33 32 This requirement has been enabled by default since the ``requires`` file
34 33 was introduced in Mercurial 0.9.2.
35 34
36 35 fncache
37 36 =======
38 37
39 38 The *fncache* repository layout should be used.
40 39
41 40 The *fncache* layout hash encodes filenames with long paths and
42 41 encodes reserved filenames.
43 42
44 43 This requirement is enabled by default when the *store* requirement is
45 44 enabled (which is the default behavior). It was introduced in Mercurial
46 45 1.1 (released December 2008).
47 46
48 47 shared
49 48 ======
50 49
51 50 Denotes that the store for a repository is shared from another location
52 51 (defined by the ``.hg/sharedpath`` file).
53 52
54 53 This requirement is set when a repository is created via :hg:`share`.
55 54
56 55 The requirement was added in Mercurial 1.3 (released July 2009).
57 56
58 57 relshared
59 58 =========
60 59
61 60 Derivative of ``shared``; the location of the store is relative to the
62 61 store of this repository.
63 62
64 63 This requirement is set when a repository is created via :hg:`share`
65 64 using the ``--relative`` option.
66 65
67 66 The requirement was added in Mercurial 4.2 (released May 2017).
68 67
69 68 dotencode
70 69 =========
71 70
72 71 The *dotencode* repository layout should be used.
73 72
74 73 The *dotencode* layout encodes the first period or space in filenames
75 74 to prevent issues on OS X and Windows.
76 75
77 76 This requirement is enabled by default when the *store* requirement
78 77 is enabled (which is the default behavior). It was introduced in
79 78 Mercurial 1.7 (released November 2010).
80 79
81 80 parentdelta
82 81 ===========
83 82
84 83 Denotes a revlog delta encoding format that was experimental and
85 84 replaced by *generaldelta*. It should not be seen in the wild because
86 85 it was never enabled by default.
87 86
88 87 This requirement was added in Mercurial 1.7 and removed in Mercurial
89 88 1.9.
90 89
91 90 generaldelta
92 91 ============
93 92
94 93 Revlogs should be created with the *generaldelta* flag enabled. The
95 94 generaldelta flag will cause deltas to be encoded against a parent
96 95 revision instead of the previous revision in the revlog.
97 96
98 97 Support for this requirement was added in Mercurial 1.9 (released
99 98 July 2011). The requirement was disabled on new repositories by
100 99 default until Mercurial 3.7 (released February 2016).
101 100
102 101 manifestv2
103 102 ==========
104 103
105 104 Denotes that version 2 of manifests are being used.
106 105
107 106 Support for this requirement was added in Mercurial 3.4 (released
108 May 2015). The requirement is currently experimental and is disabled
109 by default.
107 May 2015). The new format failed to meet expectations and support
108 for the format and requirement were removed in Mercurial 4.6
109 (released May 2018) since the feature never graduated frome experiment
110 status.
110 111
111 112 treemanifest
112 113 ============
113 114
114 115 Denotes that tree manifests are being used. Tree manifests are
115 116 one manifest per directory (as opposed to a single flat manifest).
116 117
117 118 Support for this requirement was added in Mercurial 3.4 (released
118 119 August 2015). The requirement is currently experimental and is
119 120 disabled by default.
120 121
121 122 exp-sparse
122 123 ==========
123 124
124 125 The working directory is sparse (only contains a subset of files).
125 126
126 127 Support for this requirement was added in Mercurial 4.3 (released
127 128 August 2017). This requirement and feature are experimental and may
128 129 disappear in a future Mercurial release. The requirement will only
129 130 be present on repositories that have opted in to a sparse working
130 131 directory.
@@ -1,2274 +1,2275 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 hex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from . import (
24 24 bookmarks,
25 25 branchmap,
26 26 bundle2,
27 27 changegroup,
28 28 changelog,
29 29 color,
30 30 context,
31 31 dirstate,
32 32 dirstateguard,
33 33 discovery,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repository,
53 53 repoview,
54 54 revset,
55 55 revsetlang,
56 56 scmutil,
57 57 sparse,
58 58 store,
59 59 subrepoutil,
60 60 tags as tagsmod,
61 61 transaction,
62 62 txnutil,
63 63 util,
64 64 vfs as vfsmod,
65 65 )
66 66
67 67 release = lockmod.release
68 68 urlerr = util.urlerr
69 69 urlreq = util.urlreq
70 70
71 71 # set of (path, vfs-location) tuples. vfs-location is:
72 72 # - 'plain for vfs relative paths
73 73 # - '' for svfs relative paths
74 74 _cachedfiles = set()
75 75
76 76 class _basefilecache(scmutil.filecache):
77 77 """All filecache usage on repo are done for logic that should be unfiltered
78 78 """
79 79 def __get__(self, repo, type=None):
80 80 if repo is None:
81 81 return self
82 82 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 83 def __set__(self, repo, value):
84 84 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 85 def __delete__(self, repo):
86 86 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 87
88 88 class repofilecache(_basefilecache):
89 89 """filecache for files in .hg but outside of .hg/store"""
90 90 def __init__(self, *paths):
91 91 super(repofilecache, self).__init__(*paths)
92 92 for path in paths:
93 93 _cachedfiles.add((path, 'plain'))
94 94
95 95 def join(self, obj, fname):
96 96 return obj.vfs.join(fname)
97 97
98 98 class storecache(_basefilecache):
99 99 """filecache for files in the store"""
100 100 def __init__(self, *paths):
101 101 super(storecache, self).__init__(*paths)
102 102 for path in paths:
103 103 _cachedfiles.add((path, ''))
104 104
105 105 def join(self, obj, fname):
106 106 return obj.sjoin(fname)
107 107
108 108 def isfilecached(repo, name):
109 109 """check if a repo has already cached "name" filecache-ed property
110 110
111 111 This returns (cachedobj-or-None, iscached) tuple.
112 112 """
113 113 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 114 if not cacheentry:
115 115 return None, False
116 116 return cacheentry.obj, True
117 117
118 118 class unfilteredpropertycache(util.propertycache):
119 119 """propertycache that apply to unfiltered repo only"""
120 120
121 121 def __get__(self, repo, type=None):
122 122 unfi = repo.unfiltered()
123 123 if unfi is repo:
124 124 return super(unfilteredpropertycache, self).__get__(unfi)
125 125 return getattr(unfi, self.name)
126 126
127 127 class filteredpropertycache(util.propertycache):
128 128 """propertycache that must take filtering in account"""
129 129
130 130 def cachevalue(self, obj, value):
131 131 object.__setattr__(obj, self.name, value)
132 132
133 133
134 134 def hasunfilteredcache(repo, name):
135 135 """check if a repo has an unfilteredpropertycache value for <name>"""
136 136 return name in vars(repo.unfiltered())
137 137
138 138 def unfilteredmethod(orig):
139 139 """decorate method that always need to be run on unfiltered version"""
140 140 def wrapper(repo, *args, **kwargs):
141 141 return orig(repo.unfiltered(), *args, **kwargs)
142 142 return wrapper
143 143
144 144 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 145 'unbundle'}
146 146 legacycaps = moderncaps.union({'changegroupsubset'})
147 147
148 148 class localpeer(repository.peer):
149 149 '''peer for a local repo; reflects only the most recent API'''
150 150
151 151 def __init__(self, repo, caps=None):
152 152 super(localpeer, self).__init__()
153 153
154 154 if caps is None:
155 155 caps = moderncaps.copy()
156 156 self._repo = repo.filtered('served')
157 157 self._ui = repo.ui
158 158 self._caps = repo._restrictcapabilities(caps)
159 159
160 160 # Begin of _basepeer interface.
161 161
162 162 @util.propertycache
163 163 def ui(self):
164 164 return self._ui
165 165
166 166 def url(self):
167 167 return self._repo.url()
168 168
169 169 def local(self):
170 170 return self._repo
171 171
172 172 def peer(self):
173 173 return self
174 174
175 175 def canpush(self):
176 176 return True
177 177
178 178 def close(self):
179 179 self._repo.close()
180 180
181 181 # End of _basepeer interface.
182 182
183 183 # Begin of _basewirecommands interface.
184 184
185 185 def branchmap(self):
186 186 return self._repo.branchmap()
187 187
188 188 def capabilities(self):
189 189 return self._caps
190 190
191 191 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 192 """Used to test argument passing over the wire"""
193 193 return "%s %s %s %s %s" % (one, two, three, four, five)
194 194
195 195 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 196 **kwargs):
197 197 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 198 common=common, bundlecaps=bundlecaps,
199 199 **kwargs)[1]
200 200 cb = util.chunkbuffer(chunks)
201 201
202 202 if exchange.bundle2requested(bundlecaps):
203 203 # When requesting a bundle2, getbundle returns a stream to make the
204 204 # wire level function happier. We need to build a proper object
205 205 # from it in local peer.
206 206 return bundle2.getunbundler(self.ui, cb)
207 207 else:
208 208 return changegroup.getunbundler('01', cb, None)
209 209
210 210 def heads(self):
211 211 return self._repo.heads()
212 212
213 213 def known(self, nodes):
214 214 return self._repo.known(nodes)
215 215
216 216 def listkeys(self, namespace):
217 217 return self._repo.listkeys(namespace)
218 218
219 219 def lookup(self, key):
220 220 return self._repo.lookup(key)
221 221
222 222 def pushkey(self, namespace, key, old, new):
223 223 return self._repo.pushkey(namespace, key, old, new)
224 224
225 225 def stream_out(self):
226 226 raise error.Abort(_('cannot perform stream clone against local '
227 227 'peer'))
228 228
229 229 def unbundle(self, cg, heads, url):
230 230 """apply a bundle on a repo
231 231
232 232 This function handles the repo locking itself."""
233 233 try:
234 234 try:
235 235 cg = exchange.readbundle(self.ui, cg, None)
236 236 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 237 if util.safehasattr(ret, 'getchunks'):
238 238 # This is a bundle20 object, turn it into an unbundler.
239 239 # This little dance should be dropped eventually when the
240 240 # API is finally improved.
241 241 stream = util.chunkbuffer(ret.getchunks())
242 242 ret = bundle2.getunbundler(self.ui, stream)
243 243 return ret
244 244 except Exception as exc:
245 245 # If the exception contains output salvaged from a bundle2
246 246 # reply, we need to make sure it is printed before continuing
247 247 # to fail. So we build a bundle2 with such output and consume
248 248 # it directly.
249 249 #
250 250 # This is not very elegant but allows a "simple" solution for
251 251 # issue4594
252 252 output = getattr(exc, '_bundle2salvagedoutput', ())
253 253 if output:
254 254 bundler = bundle2.bundle20(self._repo.ui)
255 255 for out in output:
256 256 bundler.addpart(out)
257 257 stream = util.chunkbuffer(bundler.getchunks())
258 258 b = bundle2.getunbundler(self.ui, stream)
259 259 bundle2.processbundle(self._repo, b)
260 260 raise
261 261 except error.PushRaced as exc:
262 262 raise error.ResponseError(_('push failed:'), str(exc))
263 263
264 264 # End of _basewirecommands interface.
265 265
266 266 # Begin of peer interface.
267 267
268 268 def iterbatch(self):
269 269 return peer.localiterbatcher(self)
270 270
271 271 # End of peer interface.
272 272
273 273 class locallegacypeer(repository.legacypeer, localpeer):
274 274 '''peer extension which implements legacy methods too; used for tests with
275 275 restricted capabilities'''
276 276
277 277 def __init__(self, repo):
278 278 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 279
280 280 # Begin of baselegacywirecommands interface.
281 281
282 282 def between(self, pairs):
283 283 return self._repo.between(pairs)
284 284
285 285 def branches(self, nodes):
286 286 return self._repo.branches(nodes)
287 287
288 288 def changegroup(self, basenodes, source):
289 289 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 290 missingheads=self._repo.heads())
291 291 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 292
293 293 def changegroupsubset(self, bases, heads, source):
294 294 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 295 missingheads=heads)
296 296 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 297
298 298 # End of baselegacywirecommands interface.
299 299
300 300 # Increment the sub-version when the revlog v2 format changes to lock out old
301 301 # clients.
302 302 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 303
304 304 class localrepository(object):
305 305
306 # obsolete experimental requirements:
307 # - manifestv2: An experimental new manifest format that allowed
308 # for stem compression of long paths. Experiment ended up not
309 # being successful (repository sizes went up due to worse delta
310 # chains), and the code was deleted in 4.6.
306 311 supportedformats = {
307 312 'revlogv1',
308 313 'generaldelta',
309 314 'treemanifest',
310 'manifestv2',
311 315 REVLOGV2_REQUIREMENT,
312 316 }
313 317 _basesupported = supportedformats | {
314 318 'store',
315 319 'fncache',
316 320 'shared',
317 321 'relshared',
318 322 'dotencode',
319 323 'exp-sparse',
320 324 }
321 325 openerreqs = {
322 326 'revlogv1',
323 327 'generaldelta',
324 328 'treemanifest',
325 'manifestv2',
326 329 }
327 330
328 331 # a list of (ui, featureset) functions.
329 332 # only functions defined in module of enabled extensions are invoked
330 333 featuresetupfuncs = set()
331 334
332 335 # list of prefix for file which can be written without 'wlock'
333 336 # Extensions should extend this list when needed
334 337 _wlockfreeprefix = {
335 338 # We migh consider requiring 'wlock' for the next
336 339 # two, but pretty much all the existing code assume
337 340 # wlock is not needed so we keep them excluded for
338 341 # now.
339 342 'hgrc',
340 343 'requires',
341 344 # XXX cache is a complicatged business someone
342 345 # should investigate this in depth at some point
343 346 'cache/',
344 347 # XXX shouldn't be dirstate covered by the wlock?
345 348 'dirstate',
346 349 # XXX bisect was still a bit too messy at the time
347 350 # this changeset was introduced. Someone should fix
348 351 # the remainig bit and drop this line
349 352 'bisect.state',
350 353 }
351 354
352 355 def __init__(self, baseui, path, create=False):
353 356 self.requirements = set()
354 357 self.filtername = None
355 358 # wvfs: rooted at the repository root, used to access the working copy
356 359 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 360 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 361 self.vfs = None
359 362 # svfs: usually rooted at .hg/store, used to access repository history
360 363 # If this is a shared repository, this vfs may point to another
361 364 # repository's .hg/store directory.
362 365 self.svfs = None
363 366 self.root = self.wvfs.base
364 367 self.path = self.wvfs.join(".hg")
365 368 self.origroot = path
366 369 # This is only used by context.workingctx.match in order to
367 370 # detect files in subrepos.
368 371 self.auditor = pathutil.pathauditor(
369 372 self.root, callback=self._checknested)
370 373 # This is only used by context.basectx.match in order to detect
371 374 # files in subrepos.
372 375 self.nofsauditor = pathutil.pathauditor(
373 376 self.root, callback=self._checknested, realfs=False, cached=True)
374 377 self.baseui = baseui
375 378 self.ui = baseui.copy()
376 379 self.ui.copy = baseui.copy # prevent copying repo configuration
377 380 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
378 381 if (self.ui.configbool('devel', 'all-warnings') or
379 382 self.ui.configbool('devel', 'check-locks')):
380 383 self.vfs.audit = self._getvfsward(self.vfs.audit)
381 384 # A list of callback to shape the phase if no data were found.
382 385 # Callback are in the form: func(repo, roots) --> processed root.
383 386 # This list it to be filled by extension during repo setup
384 387 self._phasedefaults = []
385 388 try:
386 389 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
387 390 self._loadextensions()
388 391 except IOError:
389 392 pass
390 393
391 394 if self.featuresetupfuncs:
392 395 self.supported = set(self._basesupported) # use private copy
393 396 extmods = set(m.__name__ for n, m
394 397 in extensions.extensions(self.ui))
395 398 for setupfunc in self.featuresetupfuncs:
396 399 if setupfunc.__module__ in extmods:
397 400 setupfunc(self.ui, self.supported)
398 401 else:
399 402 self.supported = self._basesupported
400 403 color.setup(self.ui)
401 404
402 405 # Add compression engines.
403 406 for name in util.compengines:
404 407 engine = util.compengines[name]
405 408 if engine.revlogheader():
406 409 self.supported.add('exp-compression-%s' % name)
407 410
408 411 if not self.vfs.isdir():
409 412 if create:
410 413 self.requirements = newreporequirements(self)
411 414
412 415 if not self.wvfs.exists():
413 416 self.wvfs.makedirs()
414 417 self.vfs.makedir(notindexed=True)
415 418
416 419 if 'store' in self.requirements:
417 420 self.vfs.mkdir("store")
418 421
419 422 # create an invalid changelog
420 423 self.vfs.append(
421 424 "00changelog.i",
422 425 '\0\0\0\2' # represents revlogv2
423 426 ' dummy changelog to prevent using the old repo layout'
424 427 )
425 428 else:
426 429 raise error.RepoError(_("repository %s not found") % path)
427 430 elif create:
428 431 raise error.RepoError(_("repository %s already exists") % path)
429 432 else:
430 433 try:
431 434 self.requirements = scmutil.readrequires(
432 435 self.vfs, self.supported)
433 436 except IOError as inst:
434 437 if inst.errno != errno.ENOENT:
435 438 raise
436 439
437 440 cachepath = self.vfs.join('cache')
438 441 self.sharedpath = self.path
439 442 try:
440 443 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
441 444 if 'relshared' in self.requirements:
442 445 sharedpath = self.vfs.join(sharedpath)
443 446 vfs = vfsmod.vfs(sharedpath, realpath=True)
444 447 cachepath = vfs.join('cache')
445 448 s = vfs.base
446 449 if not vfs.exists():
447 450 raise error.RepoError(
448 451 _('.hg/sharedpath points to nonexistent directory %s') % s)
449 452 self.sharedpath = s
450 453 except IOError as inst:
451 454 if inst.errno != errno.ENOENT:
452 455 raise
453 456
454 457 if 'exp-sparse' in self.requirements and not sparse.enabled:
455 458 raise error.RepoError(_('repository is using sparse feature but '
456 459 'sparse is not enabled; enable the '
457 460 '"sparse" extensions to access'))
458 461
459 462 self.store = store.store(
460 463 self.requirements, self.sharedpath,
461 464 lambda base: vfsmod.vfs(base, cacheaudited=True))
462 465 self.spath = self.store.path
463 466 self.svfs = self.store.vfs
464 467 self.sjoin = self.store.join
465 468 self.vfs.createmode = self.store.createmode
466 469 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
467 470 self.cachevfs.createmode = self.store.createmode
468 471 if (self.ui.configbool('devel', 'all-warnings') or
469 472 self.ui.configbool('devel', 'check-locks')):
470 473 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
471 474 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
472 475 else: # standard vfs
473 476 self.svfs.audit = self._getsvfsward(self.svfs.audit)
474 477 self._applyopenerreqs()
475 478 if create:
476 479 self._writerequirements()
477 480
478 481 self._dirstatevalidatewarned = False
479 482
480 483 self._branchcaches = {}
481 484 self._revbranchcache = None
482 485 self.filterpats = {}
483 486 self._datafilters = {}
484 487 self._transref = self._lockref = self._wlockref = None
485 488
486 489 # A cache for various files under .hg/ that tracks file changes,
487 490 # (used by the filecache decorator)
488 491 #
489 492 # Maps a property name to its util.filecacheentry
490 493 self._filecache = {}
491 494
492 495 # hold sets of revision to be filtered
493 496 # should be cleared when something might have changed the filter value:
494 497 # - new changesets,
495 498 # - phase change,
496 499 # - new obsolescence marker,
497 500 # - working directory parent change,
498 501 # - bookmark changes
499 502 self.filteredrevcache = {}
500 503
501 504 # post-dirstate-status hooks
502 505 self._postdsstatus = []
503 506
504 507 # generic mapping between names and nodes
505 508 self.names = namespaces.namespaces()
506 509
507 510 # Key to signature value.
508 511 self._sparsesignaturecache = {}
509 512 # Signature to cached matcher instance.
510 513 self._sparsematchercache = {}
511 514
512 515 def _getvfsward(self, origfunc):
513 516 """build a ward for self.vfs"""
514 517 rref = weakref.ref(self)
515 518 def checkvfs(path, mode=None):
516 519 ret = origfunc(path, mode=mode)
517 520 repo = rref()
518 521 if (repo is None
519 522 or not util.safehasattr(repo, '_wlockref')
520 523 or not util.safehasattr(repo, '_lockref')):
521 524 return
522 525 if mode in (None, 'r', 'rb'):
523 526 return
524 527 if path.startswith(repo.path):
525 528 # truncate name relative to the repository (.hg)
526 529 path = path[len(repo.path) + 1:]
527 530 if path.startswith('cache/'):
528 531 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
529 532 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
530 533 if path.startswith('journal.'):
531 534 # journal is covered by 'lock'
532 535 if repo._currentlock(repo._lockref) is None:
533 536 repo.ui.develwarn('write with no lock: "%s"' % path,
534 537 stacklevel=2, config='check-locks')
535 538 elif repo._currentlock(repo._wlockref) is None:
536 539 # rest of vfs files are covered by 'wlock'
537 540 #
538 541 # exclude special files
539 542 for prefix in self._wlockfreeprefix:
540 543 if path.startswith(prefix):
541 544 return
542 545 repo.ui.develwarn('write with no wlock: "%s"' % path,
543 546 stacklevel=2, config='check-locks')
544 547 return ret
545 548 return checkvfs
546 549
547 550 def _getsvfsward(self, origfunc):
548 551 """build a ward for self.svfs"""
549 552 rref = weakref.ref(self)
550 553 def checksvfs(path, mode=None):
551 554 ret = origfunc(path, mode=mode)
552 555 repo = rref()
553 556 if repo is None or not util.safehasattr(repo, '_lockref'):
554 557 return
555 558 if mode in (None, 'r', 'rb'):
556 559 return
557 560 if path.startswith(repo.sharedpath):
558 561 # truncate name relative to the repository (.hg)
559 562 path = path[len(repo.sharedpath) + 1:]
560 563 if repo._currentlock(repo._lockref) is None:
561 564 repo.ui.develwarn('write with no lock: "%s"' % path,
562 565 stacklevel=3)
563 566 return ret
564 567 return checksvfs
565 568
566 569 def close(self):
567 570 self._writecaches()
568 571
569 572 def _loadextensions(self):
570 573 extensions.loadall(self.ui)
571 574
572 575 def _writecaches(self):
573 576 if self._revbranchcache:
574 577 self._revbranchcache.write()
575 578
576 579 def _restrictcapabilities(self, caps):
577 580 if self.ui.configbool('experimental', 'bundle2-advertise'):
578 581 caps = set(caps)
579 582 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
580 583 role='client'))
581 584 caps.add('bundle2=' + urlreq.quote(capsblob))
582 585 return caps
583 586
584 587 def _applyopenerreqs(self):
585 588 self.svfs.options = dict((r, 1) for r in self.requirements
586 589 if r in self.openerreqs)
587 590 # experimental config: format.chunkcachesize
588 591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 592 if chunkcachesize is not None:
590 593 self.svfs.options['chunkcachesize'] = chunkcachesize
591 594 # experimental config: format.maxchainlen
592 595 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 596 if maxchainlen is not None:
594 597 self.svfs.options['maxchainlen'] = maxchainlen
595 598 # experimental config: format.manifestcachesize
596 599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 600 if manifestcachesize is not None:
598 601 self.svfs.options['manifestcachesize'] = manifestcachesize
599 602 # experimental config: format.aggressivemergedeltas
600 603 aggressivemergedeltas = self.ui.configbool('format',
601 604 'aggressivemergedeltas')
602 605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 608 if 0 <= chainspan:
606 609 self.svfs.options['maxdeltachainspan'] = chainspan
607 610 mmapindexthreshold = self.ui.configbytes('experimental',
608 611 'mmapindexthreshold')
609 612 if mmapindexthreshold is not None:
610 613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 615 srdensitythres = float(self.ui.config('experimental',
613 616 'sparse-read.density-threshold'))
614 617 srmingapsize = self.ui.configbytes('experimental',
615 618 'sparse-read.min-gap-size')
616 619 self.svfs.options['with-sparse-read'] = withsparseread
617 620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619 622
620 623 for r in self.requirements:
621 624 if r.startswith('exp-compression-'):
622 625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623 626
624 627 # TODO move "revlogv2" to openerreqs once finalized.
625 628 if REVLOGV2_REQUIREMENT in self.requirements:
626 629 self.svfs.options['revlogv2'] = True
627 630
628 631 def _writerequirements(self):
629 632 scmutil.writerequires(self.vfs, self.requirements)
630 633
631 634 def _checknested(self, path):
632 635 """Determine if path is a legal nested repository."""
633 636 if not path.startswith(self.root):
634 637 return False
635 638 subpath = path[len(self.root) + 1:]
636 639 normsubpath = util.pconvert(subpath)
637 640
638 641 # XXX: Checking against the current working copy is wrong in
639 642 # the sense that it can reject things like
640 643 #
641 644 # $ hg cat -r 10 sub/x.txt
642 645 #
643 646 # if sub/ is no longer a subrepository in the working copy
644 647 # parent revision.
645 648 #
646 649 # However, it can of course also allow things that would have
647 650 # been rejected before, such as the above cat command if sub/
648 651 # is a subrepository now, but was a normal directory before.
649 652 # The old path auditor would have rejected by mistake since it
650 653 # panics when it sees sub/.hg/.
651 654 #
652 655 # All in all, checking against the working copy seems sensible
653 656 # since we want to prevent access to nested repositories on
654 657 # the filesystem *now*.
655 658 ctx = self[None]
656 659 parts = util.splitpath(subpath)
657 660 while parts:
658 661 prefix = '/'.join(parts)
659 662 if prefix in ctx.substate:
660 663 if prefix == normsubpath:
661 664 return True
662 665 else:
663 666 sub = ctx.sub(prefix)
664 667 return sub.checknested(subpath[len(prefix) + 1:])
665 668 else:
666 669 parts.pop()
667 670 return False
668 671
669 672 def peer(self):
670 673 return localpeer(self) # not cached to avoid reference cycle
671 674
672 675 def unfiltered(self):
673 676 """Return unfiltered version of the repository
674 677
675 678 Intended to be overwritten by filtered repo."""
676 679 return self
677 680
678 681 def filtered(self, name, visibilityexceptions=None):
679 682 """Return a filtered version of a repository"""
680 683 cls = repoview.newtype(self.unfiltered().__class__)
681 684 return cls(self, name, visibilityexceptions)
682 685
683 686 @repofilecache('bookmarks', 'bookmarks.current')
684 687 def _bookmarks(self):
685 688 return bookmarks.bmstore(self)
686 689
687 690 @property
688 691 def _activebookmark(self):
689 692 return self._bookmarks.active
690 693
691 694 # _phasesets depend on changelog. what we need is to call
692 695 # _phasecache.invalidate() if '00changelog.i' was changed, but it
693 696 # can't be easily expressed in filecache mechanism.
694 697 @storecache('phaseroots', '00changelog.i')
695 698 def _phasecache(self):
696 699 return phases.phasecache(self, self._phasedefaults)
697 700
698 701 @storecache('obsstore')
699 702 def obsstore(self):
700 703 return obsolete.makestore(self.ui, self)
701 704
702 705 @storecache('00changelog.i')
703 706 def changelog(self):
704 707 return changelog.changelog(self.svfs,
705 708 trypending=txnutil.mayhavepending(self.root))
706 709
707 710 def _constructmanifest(self):
708 711 # This is a temporary function while we migrate from manifest to
709 712 # manifestlog. It allows bundlerepo and unionrepo to intercept the
710 713 # manifest creation.
711 714 return manifest.manifestrevlog(self.svfs)
712 715
713 716 @storecache('00manifest.i')
714 717 def manifestlog(self):
715 718 return manifest.manifestlog(self.svfs, self)
716 719
717 720 @repofilecache('dirstate')
718 721 def dirstate(self):
719 722 sparsematchfn = lambda: sparse.matcher(self)
720 723
721 724 return dirstate.dirstate(self.vfs, self.ui, self.root,
722 725 self._dirstatevalidate, sparsematchfn)
723 726
724 727 def _dirstatevalidate(self, node):
725 728 try:
726 729 self.changelog.rev(node)
727 730 return node
728 731 except error.LookupError:
729 732 if not self._dirstatevalidatewarned:
730 733 self._dirstatevalidatewarned = True
731 734 self.ui.warn(_("warning: ignoring unknown"
732 735 " working parent %s!\n") % short(node))
733 736 return nullid
734 737
735 738 def __getitem__(self, changeid):
736 739 if changeid is None:
737 740 return context.workingctx(self)
738 741 if isinstance(changeid, slice):
739 742 # wdirrev isn't contiguous so the slice shouldn't include it
740 743 return [context.changectx(self, i)
741 744 for i in xrange(*changeid.indices(len(self)))
742 745 if i not in self.changelog.filteredrevs]
743 746 try:
744 747 return context.changectx(self, changeid)
745 748 except error.WdirUnsupported:
746 749 return context.workingctx(self)
747 750
748 751 def __contains__(self, changeid):
749 752 """True if the given changeid exists
750 753
751 754 error.LookupError is raised if an ambiguous node specified.
752 755 """
753 756 try:
754 757 self[changeid]
755 758 return True
756 759 except error.RepoLookupError:
757 760 return False
758 761
759 762 def __nonzero__(self):
760 763 return True
761 764
762 765 __bool__ = __nonzero__
763 766
764 767 def __len__(self):
765 768 # no need to pay the cost of repoview.changelog
766 769 unfi = self.unfiltered()
767 770 return len(unfi.changelog)
768 771
769 772 def __iter__(self):
770 773 return iter(self.changelog)
771 774
772 775 def revs(self, expr, *args):
773 776 '''Find revisions matching a revset.
774 777
775 778 The revset is specified as a string ``expr`` that may contain
776 779 %-formatting to escape certain types. See ``revsetlang.formatspec``.
777 780
778 781 Revset aliases from the configuration are not expanded. To expand
779 782 user aliases, consider calling ``scmutil.revrange()`` or
780 783 ``repo.anyrevs([expr], user=True)``.
781 784
782 785 Returns a revset.abstractsmartset, which is a list-like interface
783 786 that contains integer revisions.
784 787 '''
785 788 expr = revsetlang.formatspec(expr, *args)
786 789 m = revset.match(None, expr)
787 790 return m(self)
788 791
789 792 def set(self, expr, *args):
790 793 '''Find revisions matching a revset and emit changectx instances.
791 794
792 795 This is a convenience wrapper around ``revs()`` that iterates the
793 796 result and is a generator of changectx instances.
794 797
795 798 Revset aliases from the configuration are not expanded. To expand
796 799 user aliases, consider calling ``scmutil.revrange()``.
797 800 '''
798 801 for r in self.revs(expr, *args):
799 802 yield self[r]
800 803
801 804 def anyrevs(self, specs, user=False, localalias=None):
802 805 '''Find revisions matching one of the given revsets.
803 806
804 807 Revset aliases from the configuration are not expanded by default. To
805 808 expand user aliases, specify ``user=True``. To provide some local
806 809 definitions overriding user aliases, set ``localalias`` to
807 810 ``{name: definitionstring}``.
808 811 '''
809 812 if user:
810 813 m = revset.matchany(self.ui, specs, repo=self,
811 814 localalias=localalias)
812 815 else:
813 816 m = revset.matchany(None, specs, localalias=localalias)
814 817 return m(self)
815 818
816 819 def url(self):
817 820 return 'file:' + self.root
818 821
819 822 def hook(self, name, throw=False, **args):
820 823 """Call a hook, passing this repo instance.
821 824
822 825 This a convenience method to aid invoking hooks. Extensions likely
823 826 won't call this unless they have registered a custom hook or are
824 827 replacing code that is expected to call a hook.
825 828 """
826 829 return hook.hook(self.ui, self, name, throw, **args)
827 830
828 831 @filteredpropertycache
829 832 def _tagscache(self):
830 833 '''Returns a tagscache object that contains various tags related
831 834 caches.'''
832 835
833 836 # This simplifies its cache management by having one decorated
834 837 # function (this one) and the rest simply fetch things from it.
835 838 class tagscache(object):
836 839 def __init__(self):
837 840 # These two define the set of tags for this repository. tags
838 841 # maps tag name to node; tagtypes maps tag name to 'global' or
839 842 # 'local'. (Global tags are defined by .hgtags across all
840 843 # heads, and local tags are defined in .hg/localtags.)
841 844 # They constitute the in-memory cache of tags.
842 845 self.tags = self.tagtypes = None
843 846
844 847 self.nodetagscache = self.tagslist = None
845 848
846 849 cache = tagscache()
847 850 cache.tags, cache.tagtypes = self._findtags()
848 851
849 852 return cache
850 853
851 854 def tags(self):
852 855 '''return a mapping of tag to node'''
853 856 t = {}
854 857 if self.changelog.filteredrevs:
855 858 tags, tt = self._findtags()
856 859 else:
857 860 tags = self._tagscache.tags
858 861 for k, v in tags.iteritems():
859 862 try:
860 863 # ignore tags to unknown nodes
861 864 self.changelog.rev(v)
862 865 t[k] = v
863 866 except (error.LookupError, ValueError):
864 867 pass
865 868 return t
866 869
867 870 def _findtags(self):
868 871 '''Do the hard work of finding tags. Return a pair of dicts
869 872 (tags, tagtypes) where tags maps tag name to node, and tagtypes
870 873 maps tag name to a string like \'global\' or \'local\'.
871 874 Subclasses or extensions are free to add their own tags, but
872 875 should be aware that the returned dicts will be retained for the
873 876 duration of the localrepo object.'''
874 877
875 878 # XXX what tagtype should subclasses/extensions use? Currently
876 879 # mq and bookmarks add tags, but do not set the tagtype at all.
877 880 # Should each extension invent its own tag type? Should there
878 881 # be one tagtype for all such "virtual" tags? Or is the status
879 882 # quo fine?
880 883
881 884
882 885 # map tag name to (node, hist)
883 886 alltags = tagsmod.findglobaltags(self.ui, self)
884 887 # map tag name to tag type
885 888 tagtypes = dict((tag, 'global') for tag in alltags)
886 889
887 890 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
888 891
889 892 # Build the return dicts. Have to re-encode tag names because
890 893 # the tags module always uses UTF-8 (in order not to lose info
891 894 # writing to the cache), but the rest of Mercurial wants them in
892 895 # local encoding.
893 896 tags = {}
894 897 for (name, (node, hist)) in alltags.iteritems():
895 898 if node != nullid:
896 899 tags[encoding.tolocal(name)] = node
897 900 tags['tip'] = self.changelog.tip()
898 901 tagtypes = dict([(encoding.tolocal(name), value)
899 902 for (name, value) in tagtypes.iteritems()])
900 903 return (tags, tagtypes)
901 904
902 905 def tagtype(self, tagname):
903 906 '''
904 907 return the type of the given tag. result can be:
905 908
906 909 'local' : a local tag
907 910 'global' : a global tag
908 911 None : tag does not exist
909 912 '''
910 913
911 914 return self._tagscache.tagtypes.get(tagname)
912 915
913 916 def tagslist(self):
914 917 '''return a list of tags ordered by revision'''
915 918 if not self._tagscache.tagslist:
916 919 l = []
917 920 for t, n in self.tags().iteritems():
918 921 l.append((self.changelog.rev(n), t, n))
919 922 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
920 923
921 924 return self._tagscache.tagslist
922 925
923 926 def nodetags(self, node):
924 927 '''return the tags associated with a node'''
925 928 if not self._tagscache.nodetagscache:
926 929 nodetagscache = {}
927 930 for t, n in self._tagscache.tags.iteritems():
928 931 nodetagscache.setdefault(n, []).append(t)
929 932 for tags in nodetagscache.itervalues():
930 933 tags.sort()
931 934 self._tagscache.nodetagscache = nodetagscache
932 935 return self._tagscache.nodetagscache.get(node, [])
933 936
934 937 def nodebookmarks(self, node):
935 938 """return the list of bookmarks pointing to the specified node"""
936 939 marks = []
937 940 for bookmark, n in self._bookmarks.iteritems():
938 941 if n == node:
939 942 marks.append(bookmark)
940 943 return sorted(marks)
941 944
942 945 def branchmap(self):
943 946 '''returns a dictionary {branch: [branchheads]} with branchheads
944 947 ordered by increasing revision number'''
945 948 branchmap.updatecache(self)
946 949 return self._branchcaches[self.filtername]
947 950
948 951 @unfilteredmethod
949 952 def revbranchcache(self):
950 953 if not self._revbranchcache:
951 954 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
952 955 return self._revbranchcache
953 956
954 957 def branchtip(self, branch, ignoremissing=False):
955 958 '''return the tip node for a given branch
956 959
957 960 If ignoremissing is True, then this method will not raise an error.
958 961 This is helpful for callers that only expect None for a missing branch
959 962 (e.g. namespace).
960 963
961 964 '''
962 965 try:
963 966 return self.branchmap().branchtip(branch)
964 967 except KeyError:
965 968 if not ignoremissing:
966 969 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
967 970 else:
968 971 pass
969 972
970 973 def lookup(self, key):
971 974 return self[key].node()
972 975
973 976 def lookupbranch(self, key, remote=None):
974 977 repo = remote or self
975 978 if key in repo.branchmap():
976 979 return key
977 980
978 981 repo = (remote and remote.local()) and remote or self
979 982 return repo[key].branch()
980 983
981 984 def known(self, nodes):
982 985 cl = self.changelog
983 986 nm = cl.nodemap
984 987 filtered = cl.filteredrevs
985 988 result = []
986 989 for n in nodes:
987 990 r = nm.get(n)
988 991 resp = not (r is None or r in filtered)
989 992 result.append(resp)
990 993 return result
991 994
992 995 def local(self):
993 996 return self
994 997
995 998 def publishing(self):
996 999 # it's safe (and desirable) to trust the publish flag unconditionally
997 1000 # so that we don't finalize changes shared between users via ssh or nfs
998 1001 return self.ui.configbool('phases', 'publish', untrusted=True)
999 1002
1000 1003 def cancopy(self):
1001 1004 # so statichttprepo's override of local() works
1002 1005 if not self.local():
1003 1006 return False
1004 1007 if not self.publishing():
1005 1008 return True
1006 1009 # if publishing we can't copy if there is filtered content
1007 1010 return not self.filtered('visible').changelog.filteredrevs
1008 1011
1009 1012 def shared(self):
1010 1013 '''the type of shared repository (None if not shared)'''
1011 1014 if self.sharedpath != self.path:
1012 1015 return 'store'
1013 1016 return None
1014 1017
1015 1018 def wjoin(self, f, *insidef):
1016 1019 return self.vfs.reljoin(self.root, f, *insidef)
1017 1020
1018 1021 def file(self, f):
1019 1022 if f[0] == '/':
1020 1023 f = f[1:]
1021 1024 return filelog.filelog(self.svfs, f)
1022 1025
1023 1026 def changectx(self, changeid):
1024 1027 return self[changeid]
1025 1028
1026 1029 def setparents(self, p1, p2=nullid):
1027 1030 with self.dirstate.parentchange():
1028 1031 copies = self.dirstate.setparents(p1, p2)
1029 1032 pctx = self[p1]
1030 1033 if copies:
1031 1034 # Adjust copy records, the dirstate cannot do it, it
1032 1035 # requires access to parents manifests. Preserve them
1033 1036 # only for entries added to first parent.
1034 1037 for f in copies:
1035 1038 if f not in pctx and copies[f] in pctx:
1036 1039 self.dirstate.copy(copies[f], f)
1037 1040 if p2 == nullid:
1038 1041 for f, s in sorted(self.dirstate.copies().items()):
1039 1042 if f not in pctx and s not in pctx:
1040 1043 self.dirstate.copy(None, f)
1041 1044
1042 1045 def filectx(self, path, changeid=None, fileid=None):
1043 1046 """changeid can be a changeset revision, node, or tag.
1044 1047 fileid can be a file revision or node."""
1045 1048 return context.filectx(self, path, changeid, fileid)
1046 1049
1047 1050 def getcwd(self):
1048 1051 return self.dirstate.getcwd()
1049 1052
1050 1053 def pathto(self, f, cwd=None):
1051 1054 return self.dirstate.pathto(f, cwd)
1052 1055
1053 1056 def _loadfilter(self, filter):
1054 1057 if filter not in self.filterpats:
1055 1058 l = []
1056 1059 for pat, cmd in self.ui.configitems(filter):
1057 1060 if cmd == '!':
1058 1061 continue
1059 1062 mf = matchmod.match(self.root, '', [pat])
1060 1063 fn = None
1061 1064 params = cmd
1062 1065 for name, filterfn in self._datafilters.iteritems():
1063 1066 if cmd.startswith(name):
1064 1067 fn = filterfn
1065 1068 params = cmd[len(name):].lstrip()
1066 1069 break
1067 1070 if not fn:
1068 1071 fn = lambda s, c, **kwargs: util.filter(s, c)
1069 1072 # Wrap old filters not supporting keyword arguments
1070 1073 if not pycompat.getargspec(fn)[2]:
1071 1074 oldfn = fn
1072 1075 fn = lambda s, c, **kwargs: oldfn(s, c)
1073 1076 l.append((mf, fn, params))
1074 1077 self.filterpats[filter] = l
1075 1078 return self.filterpats[filter]
1076 1079
1077 1080 def _filter(self, filterpats, filename, data):
1078 1081 for mf, fn, cmd in filterpats:
1079 1082 if mf(filename):
1080 1083 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1081 1084 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1082 1085 break
1083 1086
1084 1087 return data
1085 1088
1086 1089 @unfilteredpropertycache
1087 1090 def _encodefilterpats(self):
1088 1091 return self._loadfilter('encode')
1089 1092
1090 1093 @unfilteredpropertycache
1091 1094 def _decodefilterpats(self):
1092 1095 return self._loadfilter('decode')
1093 1096
1094 1097 def adddatafilter(self, name, filter):
1095 1098 self._datafilters[name] = filter
1096 1099
1097 1100 def wread(self, filename):
1098 1101 if self.wvfs.islink(filename):
1099 1102 data = self.wvfs.readlink(filename)
1100 1103 else:
1101 1104 data = self.wvfs.read(filename)
1102 1105 return self._filter(self._encodefilterpats, filename, data)
1103 1106
1104 1107 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1105 1108 """write ``data`` into ``filename`` in the working directory
1106 1109
1107 1110 This returns length of written (maybe decoded) data.
1108 1111 """
1109 1112 data = self._filter(self._decodefilterpats, filename, data)
1110 1113 if 'l' in flags:
1111 1114 self.wvfs.symlink(data, filename)
1112 1115 else:
1113 1116 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1114 1117 **kwargs)
1115 1118 if 'x' in flags:
1116 1119 self.wvfs.setflags(filename, False, True)
1117 1120 else:
1118 1121 self.wvfs.setflags(filename, False, False)
1119 1122 return len(data)
1120 1123
1121 1124 def wwritedata(self, filename, data):
1122 1125 return self._filter(self._decodefilterpats, filename, data)
1123 1126
1124 1127 def currenttransaction(self):
1125 1128 """return the current transaction or None if non exists"""
1126 1129 if self._transref:
1127 1130 tr = self._transref()
1128 1131 else:
1129 1132 tr = None
1130 1133
1131 1134 if tr and tr.running():
1132 1135 return tr
1133 1136 return None
1134 1137
1135 1138 def transaction(self, desc, report=None):
1136 1139 if (self.ui.configbool('devel', 'all-warnings')
1137 1140 or self.ui.configbool('devel', 'check-locks')):
1138 1141 if self._currentlock(self._lockref) is None:
1139 1142 raise error.ProgrammingError('transaction requires locking')
1140 1143 tr = self.currenttransaction()
1141 1144 if tr is not None:
1142 1145 return tr.nest()
1143 1146
1144 1147 # abort here if the journal already exists
1145 1148 if self.svfs.exists("journal"):
1146 1149 raise error.RepoError(
1147 1150 _("abandoned transaction found"),
1148 1151 hint=_("run 'hg recover' to clean up transaction"))
1149 1152
1150 1153 idbase = "%.40f#%f" % (random.random(), time.time())
1151 1154 ha = hex(hashlib.sha1(idbase).digest())
1152 1155 txnid = 'TXN:' + ha
1153 1156 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1154 1157
1155 1158 self._writejournal(desc)
1156 1159 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1157 1160 if report:
1158 1161 rp = report
1159 1162 else:
1160 1163 rp = self.ui.warn
1161 1164 vfsmap = {'plain': self.vfs} # root of .hg/
1162 1165 # we must avoid cyclic reference between repo and transaction.
1163 1166 reporef = weakref.ref(self)
1164 1167 # Code to track tag movement
1165 1168 #
1166 1169 # Since tags are all handled as file content, it is actually quite hard
1167 1170 # to track these movement from a code perspective. So we fallback to a
1168 1171 # tracking at the repository level. One could envision to track changes
1169 1172 # to the '.hgtags' file through changegroup apply but that fails to
1170 1173 # cope with case where transaction expose new heads without changegroup
1171 1174 # being involved (eg: phase movement).
1172 1175 #
1173 1176 # For now, We gate the feature behind a flag since this likely comes
1174 1177 # with performance impacts. The current code run more often than needed
1175 1178 # and do not use caches as much as it could. The current focus is on
1176 1179 # the behavior of the feature so we disable it by default. The flag
1177 1180 # will be removed when we are happy with the performance impact.
1178 1181 #
1179 1182 # Once this feature is no longer experimental move the following
1180 1183 # documentation to the appropriate help section:
1181 1184 #
1182 1185 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1183 1186 # tags (new or changed or deleted tags). In addition the details of
1184 1187 # these changes are made available in a file at:
1185 1188 # ``REPOROOT/.hg/changes/tags.changes``.
1186 1189 # Make sure you check for HG_TAG_MOVED before reading that file as it
1187 1190 # might exist from a previous transaction even if no tag were touched
1188 1191 # in this one. Changes are recorded in a line base format::
1189 1192 #
1190 1193 # <action> <hex-node> <tag-name>\n
1191 1194 #
1192 1195 # Actions are defined as follow:
1193 1196 # "-R": tag is removed,
1194 1197 # "+A": tag is added,
1195 1198 # "-M": tag is moved (old value),
1196 1199 # "+M": tag is moved (new value),
1197 1200 tracktags = lambda x: None
1198 1201 # experimental config: experimental.hook-track-tags
1199 1202 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1200 1203 if desc != 'strip' and shouldtracktags:
1201 1204 oldheads = self.changelog.headrevs()
1202 1205 def tracktags(tr2):
1203 1206 repo = reporef()
1204 1207 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1205 1208 newheads = repo.changelog.headrevs()
1206 1209 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1207 1210 # notes: we compare lists here.
1208 1211 # As we do it only once buiding set would not be cheaper
1209 1212 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1210 1213 if changes:
1211 1214 tr2.hookargs['tag_moved'] = '1'
1212 1215 with repo.vfs('changes/tags.changes', 'w',
1213 1216 atomictemp=True) as changesfile:
1214 1217 # note: we do not register the file to the transaction
1215 1218 # because we needs it to still exist on the transaction
1216 1219 # is close (for txnclose hooks)
1217 1220 tagsmod.writediff(changesfile, changes)
1218 1221 def validate(tr2):
1219 1222 """will run pre-closing hooks"""
1220 1223 # XXX the transaction API is a bit lacking here so we take a hacky
1221 1224 # path for now
1222 1225 #
1223 1226 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1224 1227 # dict is copied before these run. In addition we needs the data
1225 1228 # available to in memory hooks too.
1226 1229 #
1227 1230 # Moreover, we also need to make sure this runs before txnclose
1228 1231 # hooks and there is no "pending" mechanism that would execute
1229 1232 # logic only if hooks are about to run.
1230 1233 #
1231 1234 # Fixing this limitation of the transaction is also needed to track
1232 1235 # other families of changes (bookmarks, phases, obsolescence).
1233 1236 #
1234 1237 # This will have to be fixed before we remove the experimental
1235 1238 # gating.
1236 1239 tracktags(tr2)
1237 1240 repo = reporef()
1238 1241 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1239 1242 scmutil.enforcesinglehead(repo, tr2, desc)
1240 1243 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1241 1244 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1242 1245 args = tr.hookargs.copy()
1243 1246 args.update(bookmarks.preparehookargs(name, old, new))
1244 1247 repo.hook('pretxnclose-bookmark', throw=True,
1245 1248 txnname=desc,
1246 1249 **pycompat.strkwargs(args))
1247 1250 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1248 1251 cl = repo.unfiltered().changelog
1249 1252 for rev, (old, new) in tr.changes['phases'].items():
1250 1253 args = tr.hookargs.copy()
1251 1254 node = hex(cl.node(rev))
1252 1255 args.update(phases.preparehookargs(node, old, new))
1253 1256 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1254 1257 **pycompat.strkwargs(args))
1255 1258
1256 1259 repo.hook('pretxnclose', throw=True,
1257 1260 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1258 1261 def releasefn(tr, success):
1259 1262 repo = reporef()
1260 1263 if success:
1261 1264 # this should be explicitly invoked here, because
1262 1265 # in-memory changes aren't written out at closing
1263 1266 # transaction, if tr.addfilegenerator (via
1264 1267 # dirstate.write or so) isn't invoked while
1265 1268 # transaction running
1266 1269 repo.dirstate.write(None)
1267 1270 else:
1268 1271 # discard all changes (including ones already written
1269 1272 # out) in this transaction
1270 1273 repo.dirstate.restorebackup(None, 'journal.dirstate')
1271 1274
1272 1275 repo.invalidate(clearfilecache=True)
1273 1276
1274 1277 tr = transaction.transaction(rp, self.svfs, vfsmap,
1275 1278 "journal",
1276 1279 "undo",
1277 1280 aftertrans(renames),
1278 1281 self.store.createmode,
1279 1282 validator=validate,
1280 1283 releasefn=releasefn,
1281 1284 checkambigfiles=_cachedfiles)
1282 1285 tr.changes['revs'] = xrange(0, 0)
1283 1286 tr.changes['obsmarkers'] = set()
1284 1287 tr.changes['phases'] = {}
1285 1288 tr.changes['bookmarks'] = {}
1286 1289
1287 1290 tr.hookargs['txnid'] = txnid
1288 1291 # note: writing the fncache only during finalize mean that the file is
1289 1292 # outdated when running hooks. As fncache is used for streaming clone,
1290 1293 # this is not expected to break anything that happen during the hooks.
1291 1294 tr.addfinalize('flush-fncache', self.store.write)
1292 1295 def txnclosehook(tr2):
1293 1296 """To be run if transaction is successful, will schedule a hook run
1294 1297 """
1295 1298 # Don't reference tr2 in hook() so we don't hold a reference.
1296 1299 # This reduces memory consumption when there are multiple
1297 1300 # transactions per lock. This can likely go away if issue5045
1298 1301 # fixes the function accumulation.
1299 1302 hookargs = tr2.hookargs
1300 1303
1301 1304 def hookfunc():
1302 1305 repo = reporef()
1303 1306 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1304 1307 bmchanges = sorted(tr.changes['bookmarks'].items())
1305 1308 for name, (old, new) in bmchanges:
1306 1309 args = tr.hookargs.copy()
1307 1310 args.update(bookmarks.preparehookargs(name, old, new))
1308 1311 repo.hook('txnclose-bookmark', throw=False,
1309 1312 txnname=desc, **pycompat.strkwargs(args))
1310 1313
1311 1314 if hook.hashook(repo.ui, 'txnclose-phase'):
1312 1315 cl = repo.unfiltered().changelog
1313 1316 phasemv = sorted(tr.changes['phases'].items())
1314 1317 for rev, (old, new) in phasemv:
1315 1318 args = tr.hookargs.copy()
1316 1319 node = hex(cl.node(rev))
1317 1320 args.update(phases.preparehookargs(node, old, new))
1318 1321 repo.hook('txnclose-phase', throw=False, txnname=desc,
1319 1322 **pycompat.strkwargs(args))
1320 1323
1321 1324 repo.hook('txnclose', throw=False, txnname=desc,
1322 1325 **pycompat.strkwargs(hookargs))
1323 1326 reporef()._afterlock(hookfunc)
1324 1327 tr.addfinalize('txnclose-hook', txnclosehook)
1325 1328 # Include a leading "-" to make it happen before the transaction summary
1326 1329 # reports registered via scmutil.registersummarycallback() whose names
1327 1330 # are 00-txnreport etc. That way, the caches will be warm when the
1328 1331 # callbacks run.
1329 1332 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1330 1333 def txnaborthook(tr2):
1331 1334 """To be run if transaction is aborted
1332 1335 """
1333 1336 reporef().hook('txnabort', throw=False, txnname=desc,
1334 1337 **pycompat.strkwargs(tr2.hookargs))
1335 1338 tr.addabort('txnabort-hook', txnaborthook)
1336 1339 # avoid eager cache invalidation. in-memory data should be identical
1337 1340 # to stored data if transaction has no error.
1338 1341 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1339 1342 self._transref = weakref.ref(tr)
1340 1343 scmutil.registersummarycallback(self, tr, desc)
1341 1344 return tr
1342 1345
1343 1346 def _journalfiles(self):
1344 1347 return ((self.svfs, 'journal'),
1345 1348 (self.vfs, 'journal.dirstate'),
1346 1349 (self.vfs, 'journal.branch'),
1347 1350 (self.vfs, 'journal.desc'),
1348 1351 (self.vfs, 'journal.bookmarks'),
1349 1352 (self.svfs, 'journal.phaseroots'))
1350 1353
1351 1354 def undofiles(self):
1352 1355 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1353 1356
1354 1357 @unfilteredmethod
1355 1358 def _writejournal(self, desc):
1356 1359 self.dirstate.savebackup(None, 'journal.dirstate')
1357 1360 self.vfs.write("journal.branch",
1358 1361 encoding.fromlocal(self.dirstate.branch()))
1359 1362 self.vfs.write("journal.desc",
1360 1363 "%d\n%s\n" % (len(self), desc))
1361 1364 self.vfs.write("journal.bookmarks",
1362 1365 self.vfs.tryread("bookmarks"))
1363 1366 self.svfs.write("journal.phaseroots",
1364 1367 self.svfs.tryread("phaseroots"))
1365 1368
1366 1369 def recover(self):
1367 1370 with self.lock():
1368 1371 if self.svfs.exists("journal"):
1369 1372 self.ui.status(_("rolling back interrupted transaction\n"))
1370 1373 vfsmap = {'': self.svfs,
1371 1374 'plain': self.vfs,}
1372 1375 transaction.rollback(self.svfs, vfsmap, "journal",
1373 1376 self.ui.warn,
1374 1377 checkambigfiles=_cachedfiles)
1375 1378 self.invalidate()
1376 1379 return True
1377 1380 else:
1378 1381 self.ui.warn(_("no interrupted transaction available\n"))
1379 1382 return False
1380 1383
1381 1384 def rollback(self, dryrun=False, force=False):
1382 1385 wlock = lock = dsguard = None
1383 1386 try:
1384 1387 wlock = self.wlock()
1385 1388 lock = self.lock()
1386 1389 if self.svfs.exists("undo"):
1387 1390 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1388 1391
1389 1392 return self._rollback(dryrun, force, dsguard)
1390 1393 else:
1391 1394 self.ui.warn(_("no rollback information available\n"))
1392 1395 return 1
1393 1396 finally:
1394 1397 release(dsguard, lock, wlock)
1395 1398
1396 1399 @unfilteredmethod # Until we get smarter cache management
1397 1400 def _rollback(self, dryrun, force, dsguard):
1398 1401 ui = self.ui
1399 1402 try:
1400 1403 args = self.vfs.read('undo.desc').splitlines()
1401 1404 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1402 1405 if len(args) >= 3:
1403 1406 detail = args[2]
1404 1407 oldtip = oldlen - 1
1405 1408
1406 1409 if detail and ui.verbose:
1407 1410 msg = (_('repository tip rolled back to revision %d'
1408 1411 ' (undo %s: %s)\n')
1409 1412 % (oldtip, desc, detail))
1410 1413 else:
1411 1414 msg = (_('repository tip rolled back to revision %d'
1412 1415 ' (undo %s)\n')
1413 1416 % (oldtip, desc))
1414 1417 except IOError:
1415 1418 msg = _('rolling back unknown transaction\n')
1416 1419 desc = None
1417 1420
1418 1421 if not force and self['.'] != self['tip'] and desc == 'commit':
1419 1422 raise error.Abort(
1420 1423 _('rollback of last commit while not checked out '
1421 1424 'may lose data'), hint=_('use -f to force'))
1422 1425
1423 1426 ui.status(msg)
1424 1427 if dryrun:
1425 1428 return 0
1426 1429
1427 1430 parents = self.dirstate.parents()
1428 1431 self.destroying()
1429 1432 vfsmap = {'plain': self.vfs, '': self.svfs}
1430 1433 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1431 1434 checkambigfiles=_cachedfiles)
1432 1435 if self.vfs.exists('undo.bookmarks'):
1433 1436 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1434 1437 if self.svfs.exists('undo.phaseroots'):
1435 1438 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1436 1439 self.invalidate()
1437 1440
1438 1441 parentgone = (parents[0] not in self.changelog.nodemap or
1439 1442 parents[1] not in self.changelog.nodemap)
1440 1443 if parentgone:
1441 1444 # prevent dirstateguard from overwriting already restored one
1442 1445 dsguard.close()
1443 1446
1444 1447 self.dirstate.restorebackup(None, 'undo.dirstate')
1445 1448 try:
1446 1449 branch = self.vfs.read('undo.branch')
1447 1450 self.dirstate.setbranch(encoding.tolocal(branch))
1448 1451 except IOError:
1449 1452 ui.warn(_('named branch could not be reset: '
1450 1453 'current branch is still \'%s\'\n')
1451 1454 % self.dirstate.branch())
1452 1455
1453 1456 parents = tuple([p.rev() for p in self[None].parents()])
1454 1457 if len(parents) > 1:
1455 1458 ui.status(_('working directory now based on '
1456 1459 'revisions %d and %d\n') % parents)
1457 1460 else:
1458 1461 ui.status(_('working directory now based on '
1459 1462 'revision %d\n') % parents)
1460 1463 mergemod.mergestate.clean(self, self['.'].node())
1461 1464
1462 1465 # TODO: if we know which new heads may result from this rollback, pass
1463 1466 # them to destroy(), which will prevent the branchhead cache from being
1464 1467 # invalidated.
1465 1468 self.destroyed()
1466 1469 return 0
1467 1470
1468 1471 def _buildcacheupdater(self, newtransaction):
1469 1472 """called during transaction to build the callback updating cache
1470 1473
1471 1474 Lives on the repository to help extension who might want to augment
1472 1475 this logic. For this purpose, the created transaction is passed to the
1473 1476 method.
1474 1477 """
1475 1478 # we must avoid cyclic reference between repo and transaction.
1476 1479 reporef = weakref.ref(self)
1477 1480 def updater(tr):
1478 1481 repo = reporef()
1479 1482 repo.updatecaches(tr)
1480 1483 return updater
1481 1484
1482 1485 @unfilteredmethod
1483 1486 def updatecaches(self, tr=None):
1484 1487 """warm appropriate caches
1485 1488
1486 1489 If this function is called after a transaction closed. The transaction
1487 1490 will be available in the 'tr' argument. This can be used to selectively
1488 1491 update caches relevant to the changes in that transaction.
1489 1492 """
1490 1493 if tr is not None and tr.hookargs.get('source') == 'strip':
1491 1494 # During strip, many caches are invalid but
1492 1495 # later call to `destroyed` will refresh them.
1493 1496 return
1494 1497
1495 1498 if tr is None or tr.changes['revs']:
1496 1499 # updating the unfiltered branchmap should refresh all the others,
1497 1500 self.ui.debug('updating the branch cache\n')
1498 1501 branchmap.updatecache(self.filtered('served'))
1499 1502
1500 1503 def invalidatecaches(self):
1501 1504
1502 1505 if '_tagscache' in vars(self):
1503 1506 # can't use delattr on proxy
1504 1507 del self.__dict__['_tagscache']
1505 1508
1506 1509 self.unfiltered()._branchcaches.clear()
1507 1510 self.invalidatevolatilesets()
1508 1511 self._sparsesignaturecache.clear()
1509 1512
1510 1513 def invalidatevolatilesets(self):
1511 1514 self.filteredrevcache.clear()
1512 1515 obsolete.clearobscaches(self)
1513 1516
1514 1517 def invalidatedirstate(self):
1515 1518 '''Invalidates the dirstate, causing the next call to dirstate
1516 1519 to check if it was modified since the last time it was read,
1517 1520 rereading it if it has.
1518 1521
1519 1522 This is different to dirstate.invalidate() that it doesn't always
1520 1523 rereads the dirstate. Use dirstate.invalidate() if you want to
1521 1524 explicitly read the dirstate again (i.e. restoring it to a previous
1522 1525 known good state).'''
1523 1526 if hasunfilteredcache(self, 'dirstate'):
1524 1527 for k in self.dirstate._filecache:
1525 1528 try:
1526 1529 delattr(self.dirstate, k)
1527 1530 except AttributeError:
1528 1531 pass
1529 1532 delattr(self.unfiltered(), 'dirstate')
1530 1533
1531 1534 def invalidate(self, clearfilecache=False):
1532 1535 '''Invalidates both store and non-store parts other than dirstate
1533 1536
1534 1537 If a transaction is running, invalidation of store is omitted,
1535 1538 because discarding in-memory changes might cause inconsistency
1536 1539 (e.g. incomplete fncache causes unintentional failure, but
1537 1540 redundant one doesn't).
1538 1541 '''
1539 1542 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1540 1543 for k in list(self._filecache.keys()):
1541 1544 # dirstate is invalidated separately in invalidatedirstate()
1542 1545 if k == 'dirstate':
1543 1546 continue
1544 1547 if (k == 'changelog' and
1545 1548 self.currenttransaction() and
1546 1549 self.changelog._delayed):
1547 1550 # The changelog object may store unwritten revisions. We don't
1548 1551 # want to lose them.
1549 1552 # TODO: Solve the problem instead of working around it.
1550 1553 continue
1551 1554
1552 1555 if clearfilecache:
1553 1556 del self._filecache[k]
1554 1557 try:
1555 1558 delattr(unfiltered, k)
1556 1559 except AttributeError:
1557 1560 pass
1558 1561 self.invalidatecaches()
1559 1562 if not self.currenttransaction():
1560 1563 # TODO: Changing contents of store outside transaction
1561 1564 # causes inconsistency. We should make in-memory store
1562 1565 # changes detectable, and abort if changed.
1563 1566 self.store.invalidatecaches()
1564 1567
1565 1568 def invalidateall(self):
1566 1569 '''Fully invalidates both store and non-store parts, causing the
1567 1570 subsequent operation to reread any outside changes.'''
1568 1571 # extension should hook this to invalidate its caches
1569 1572 self.invalidate()
1570 1573 self.invalidatedirstate()
1571 1574
1572 1575 @unfilteredmethod
1573 1576 def _refreshfilecachestats(self, tr):
1574 1577 """Reload stats of cached files so that they are flagged as valid"""
1575 1578 for k, ce in self._filecache.items():
1576 1579 k = pycompat.sysstr(k)
1577 1580 if k == r'dirstate' or k not in self.__dict__:
1578 1581 continue
1579 1582 ce.refresh()
1580 1583
1581 1584 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1582 1585 inheritchecker=None, parentenvvar=None):
1583 1586 parentlock = None
1584 1587 # the contents of parentenvvar are used by the underlying lock to
1585 1588 # determine whether it can be inherited
1586 1589 if parentenvvar is not None:
1587 1590 parentlock = encoding.environ.get(parentenvvar)
1588 1591
1589 1592 timeout = 0
1590 1593 warntimeout = 0
1591 1594 if wait:
1592 1595 timeout = self.ui.configint("ui", "timeout")
1593 1596 warntimeout = self.ui.configint("ui", "timeout.warn")
1594 1597
1595 1598 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1596 1599 releasefn=releasefn,
1597 1600 acquirefn=acquirefn, desc=desc,
1598 1601 inheritchecker=inheritchecker,
1599 1602 parentlock=parentlock)
1600 1603 return l
1601 1604
1602 1605 def _afterlock(self, callback):
1603 1606 """add a callback to be run when the repository is fully unlocked
1604 1607
1605 1608 The callback will be executed when the outermost lock is released
1606 1609 (with wlock being higher level than 'lock')."""
1607 1610 for ref in (self._wlockref, self._lockref):
1608 1611 l = ref and ref()
1609 1612 if l and l.held:
1610 1613 l.postrelease.append(callback)
1611 1614 break
1612 1615 else: # no lock have been found.
1613 1616 callback()
1614 1617
1615 1618 def lock(self, wait=True):
1616 1619 '''Lock the repository store (.hg/store) and return a weak reference
1617 1620 to the lock. Use this before modifying the store (e.g. committing or
1618 1621 stripping). If you are opening a transaction, get a lock as well.)
1619 1622
1620 1623 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1621 1624 'wlock' first to avoid a dead-lock hazard.'''
1622 1625 l = self._currentlock(self._lockref)
1623 1626 if l is not None:
1624 1627 l.lock()
1625 1628 return l
1626 1629
1627 1630 l = self._lock(self.svfs, "lock", wait, None,
1628 1631 self.invalidate, _('repository %s') % self.origroot)
1629 1632 self._lockref = weakref.ref(l)
1630 1633 return l
1631 1634
1632 1635 def _wlockchecktransaction(self):
1633 1636 if self.currenttransaction() is not None:
1634 1637 raise error.LockInheritanceContractViolation(
1635 1638 'wlock cannot be inherited in the middle of a transaction')
1636 1639
1637 1640 def wlock(self, wait=True):
1638 1641 '''Lock the non-store parts of the repository (everything under
1639 1642 .hg except .hg/store) and return a weak reference to the lock.
1640 1643
1641 1644 Use this before modifying files in .hg.
1642 1645
1643 1646 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1644 1647 'wlock' first to avoid a dead-lock hazard.'''
1645 1648 l = self._wlockref and self._wlockref()
1646 1649 if l is not None and l.held:
1647 1650 l.lock()
1648 1651 return l
1649 1652
1650 1653 # We do not need to check for non-waiting lock acquisition. Such
1651 1654 # acquisition would not cause dead-lock as they would just fail.
1652 1655 if wait and (self.ui.configbool('devel', 'all-warnings')
1653 1656 or self.ui.configbool('devel', 'check-locks')):
1654 1657 if self._currentlock(self._lockref) is not None:
1655 1658 self.ui.develwarn('"wlock" acquired after "lock"')
1656 1659
1657 1660 def unlock():
1658 1661 if self.dirstate.pendingparentchange():
1659 1662 self.dirstate.invalidate()
1660 1663 else:
1661 1664 self.dirstate.write(None)
1662 1665
1663 1666 self._filecache['dirstate'].refresh()
1664 1667
1665 1668 l = self._lock(self.vfs, "wlock", wait, unlock,
1666 1669 self.invalidatedirstate, _('working directory of %s') %
1667 1670 self.origroot,
1668 1671 inheritchecker=self._wlockchecktransaction,
1669 1672 parentenvvar='HG_WLOCK_LOCKER')
1670 1673 self._wlockref = weakref.ref(l)
1671 1674 return l
1672 1675
1673 1676 def _currentlock(self, lockref):
1674 1677 """Returns the lock if it's held, or None if it's not."""
1675 1678 if lockref is None:
1676 1679 return None
1677 1680 l = lockref()
1678 1681 if l is None or not l.held:
1679 1682 return None
1680 1683 return l
1681 1684
1682 1685 def currentwlock(self):
1683 1686 """Returns the wlock if it's held, or None if it's not."""
1684 1687 return self._currentlock(self._wlockref)
1685 1688
1686 1689 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1687 1690 """
1688 1691 commit an individual file as part of a larger transaction
1689 1692 """
1690 1693
1691 1694 fname = fctx.path()
1692 1695 fparent1 = manifest1.get(fname, nullid)
1693 1696 fparent2 = manifest2.get(fname, nullid)
1694 1697 if isinstance(fctx, context.filectx):
1695 1698 node = fctx.filenode()
1696 1699 if node in [fparent1, fparent2]:
1697 1700 self.ui.debug('reusing %s filelog entry\n' % fname)
1698 1701 if manifest1.flags(fname) != fctx.flags():
1699 1702 changelist.append(fname)
1700 1703 return node
1701 1704
1702 1705 flog = self.file(fname)
1703 1706 meta = {}
1704 1707 copy = fctx.renamed()
1705 1708 if copy and copy[0] != fname:
1706 1709 # Mark the new revision of this file as a copy of another
1707 1710 # file. This copy data will effectively act as a parent
1708 1711 # of this new revision. If this is a merge, the first
1709 1712 # parent will be the nullid (meaning "look up the copy data")
1710 1713 # and the second one will be the other parent. For example:
1711 1714 #
1712 1715 # 0 --- 1 --- 3 rev1 changes file foo
1713 1716 # \ / rev2 renames foo to bar and changes it
1714 1717 # \- 2 -/ rev3 should have bar with all changes and
1715 1718 # should record that bar descends from
1716 1719 # bar in rev2 and foo in rev1
1717 1720 #
1718 1721 # this allows this merge to succeed:
1719 1722 #
1720 1723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1721 1724 # \ / merging rev3 and rev4 should use bar@rev2
1722 1725 # \- 2 --- 4 as the merge base
1723 1726 #
1724 1727
1725 1728 cfname = copy[0]
1726 1729 crev = manifest1.get(cfname)
1727 1730 newfparent = fparent2
1728 1731
1729 1732 if manifest2: # branch merge
1730 1733 if fparent2 == nullid or crev is None: # copied on remote side
1731 1734 if cfname in manifest2:
1732 1735 crev = manifest2[cfname]
1733 1736 newfparent = fparent1
1734 1737
1735 1738 # Here, we used to search backwards through history to try to find
1736 1739 # where the file copy came from if the source of a copy was not in
1737 1740 # the parent directory. However, this doesn't actually make sense to
1738 1741 # do (what does a copy from something not in your working copy even
1739 1742 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1740 1743 # the user that copy information was dropped, so if they didn't
1741 1744 # expect this outcome it can be fixed, but this is the correct
1742 1745 # behavior in this circumstance.
1743 1746
1744 1747 if crev:
1745 1748 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1746 1749 meta["copy"] = cfname
1747 1750 meta["copyrev"] = hex(crev)
1748 1751 fparent1, fparent2 = nullid, newfparent
1749 1752 else:
1750 1753 self.ui.warn(_("warning: can't find ancestor for '%s' "
1751 1754 "copied from '%s'!\n") % (fname, cfname))
1752 1755
1753 1756 elif fparent1 == nullid:
1754 1757 fparent1, fparent2 = fparent2, nullid
1755 1758 elif fparent2 != nullid:
1756 1759 # is one parent an ancestor of the other?
1757 1760 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1758 1761 if fparent1 in fparentancestors:
1759 1762 fparent1, fparent2 = fparent2, nullid
1760 1763 elif fparent2 in fparentancestors:
1761 1764 fparent2 = nullid
1762 1765
1763 1766 # is the file changed?
1764 1767 text = fctx.data()
1765 1768 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1766 1769 changelist.append(fname)
1767 1770 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1768 1771 # are just the flags changed during merge?
1769 1772 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1770 1773 changelist.append(fname)
1771 1774
1772 1775 return fparent1
1773 1776
1774 1777 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1775 1778 """check for commit arguments that aren't committable"""
1776 1779 if match.isexact() or match.prefix():
1777 1780 matched = set(status.modified + status.added + status.removed)
1778 1781
1779 1782 for f in match.files():
1780 1783 f = self.dirstate.normalize(f)
1781 1784 if f == '.' or f in matched or f in wctx.substate:
1782 1785 continue
1783 1786 if f in status.deleted:
1784 1787 fail(f, _('file not found!'))
1785 1788 if f in vdirs: # visited directory
1786 1789 d = f + '/'
1787 1790 for mf in matched:
1788 1791 if mf.startswith(d):
1789 1792 break
1790 1793 else:
1791 1794 fail(f, _("no match under directory!"))
1792 1795 elif f not in self.dirstate:
1793 1796 fail(f, _("file not tracked!"))
1794 1797
1795 1798 @unfilteredmethod
1796 1799 def commit(self, text="", user=None, date=None, match=None, force=False,
1797 1800 editor=False, extra=None):
1798 1801 """Add a new revision to current repository.
1799 1802
1800 1803 Revision information is gathered from the working directory,
1801 1804 match can be used to filter the committed files. If editor is
1802 1805 supplied, it is called to get a commit message.
1803 1806 """
1804 1807 if extra is None:
1805 1808 extra = {}
1806 1809
1807 1810 def fail(f, msg):
1808 1811 raise error.Abort('%s: %s' % (f, msg))
1809 1812
1810 1813 if not match:
1811 1814 match = matchmod.always(self.root, '')
1812 1815
1813 1816 if not force:
1814 1817 vdirs = []
1815 1818 match.explicitdir = vdirs.append
1816 1819 match.bad = fail
1817 1820
1818 1821 wlock = lock = tr = None
1819 1822 try:
1820 1823 wlock = self.wlock()
1821 1824 lock = self.lock() # for recent changelog (see issue4368)
1822 1825
1823 1826 wctx = self[None]
1824 1827 merge = len(wctx.parents()) > 1
1825 1828
1826 1829 if not force and merge and not match.always():
1827 1830 raise error.Abort(_('cannot partially commit a merge '
1828 1831 '(do not specify files or patterns)'))
1829 1832
1830 1833 status = self.status(match=match, clean=force)
1831 1834 if force:
1832 1835 status.modified.extend(status.clean) # mq may commit clean files
1833 1836
1834 1837 # check subrepos
1835 1838 subs, commitsubs, newstate = subrepoutil.precommit(
1836 1839 self.ui, wctx, status, match, force=force)
1837 1840
1838 1841 # make sure all explicit patterns are matched
1839 1842 if not force:
1840 1843 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1841 1844
1842 1845 cctx = context.workingcommitctx(self, status,
1843 1846 text, user, date, extra)
1844 1847
1845 1848 # internal config: ui.allowemptycommit
1846 1849 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1847 1850 or extra.get('close') or merge or cctx.files()
1848 1851 or self.ui.configbool('ui', 'allowemptycommit'))
1849 1852 if not allowemptycommit:
1850 1853 return None
1851 1854
1852 1855 if merge and cctx.deleted():
1853 1856 raise error.Abort(_("cannot commit merge with missing files"))
1854 1857
1855 1858 ms = mergemod.mergestate.read(self)
1856 1859 mergeutil.checkunresolved(ms)
1857 1860
1858 1861 if editor:
1859 1862 cctx._text = editor(self, cctx, subs)
1860 1863 edited = (text != cctx._text)
1861 1864
1862 1865 # Save commit message in case this transaction gets rolled back
1863 1866 # (e.g. by a pretxncommit hook). Leave the content alone on
1864 1867 # the assumption that the user will use the same editor again.
1865 1868 msgfn = self.savecommitmessage(cctx._text)
1866 1869
1867 1870 # commit subs and write new state
1868 1871 if subs:
1869 1872 for s in sorted(commitsubs):
1870 1873 sub = wctx.sub(s)
1871 1874 self.ui.status(_('committing subrepository %s\n') %
1872 1875 subrepoutil.subrelpath(sub))
1873 1876 sr = sub.commit(cctx._text, user, date)
1874 1877 newstate[s] = (newstate[s][0], sr)
1875 1878 subrepoutil.writestate(self, newstate)
1876 1879
1877 1880 p1, p2 = self.dirstate.parents()
1878 1881 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1879 1882 try:
1880 1883 self.hook("precommit", throw=True, parent1=hookp1,
1881 1884 parent2=hookp2)
1882 1885 tr = self.transaction('commit')
1883 1886 ret = self.commitctx(cctx, True)
1884 1887 except: # re-raises
1885 1888 if edited:
1886 1889 self.ui.write(
1887 1890 _('note: commit message saved in %s\n') % msgfn)
1888 1891 raise
1889 1892 # update bookmarks, dirstate and mergestate
1890 1893 bookmarks.update(self, [p1, p2], ret)
1891 1894 cctx.markcommitted(ret)
1892 1895 ms.reset()
1893 1896 tr.close()
1894 1897
1895 1898 finally:
1896 1899 lockmod.release(tr, lock, wlock)
1897 1900
1898 1901 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1899 1902 # hack for command that use a temporary commit (eg: histedit)
1900 1903 # temporary commit got stripped before hook release
1901 1904 if self.changelog.hasnode(ret):
1902 1905 self.hook("commit", node=node, parent1=parent1,
1903 1906 parent2=parent2)
1904 1907 self._afterlock(commithook)
1905 1908 return ret
1906 1909
1907 1910 @unfilteredmethod
1908 1911 def commitctx(self, ctx, error=False):
1909 1912 """Add a new revision to current repository.
1910 1913 Revision information is passed via the context argument.
1911 1914 """
1912 1915
1913 1916 tr = None
1914 1917 p1, p2 = ctx.p1(), ctx.p2()
1915 1918 user = ctx.user()
1916 1919
1917 1920 lock = self.lock()
1918 1921 try:
1919 1922 tr = self.transaction("commit")
1920 1923 trp = weakref.proxy(tr)
1921 1924
1922 1925 if ctx.manifestnode():
1923 1926 # reuse an existing manifest revision
1924 1927 mn = ctx.manifestnode()
1925 1928 files = ctx.files()
1926 1929 elif ctx.files():
1927 1930 m1ctx = p1.manifestctx()
1928 1931 m2ctx = p2.manifestctx()
1929 1932 mctx = m1ctx.copy()
1930 1933
1931 1934 m = mctx.read()
1932 1935 m1 = m1ctx.read()
1933 1936 m2 = m2ctx.read()
1934 1937
1935 1938 # check in files
1936 1939 added = []
1937 1940 changed = []
1938 1941 removed = list(ctx.removed())
1939 1942 linkrev = len(self)
1940 1943 self.ui.note(_("committing files:\n"))
1941 1944 for f in sorted(ctx.modified() + ctx.added()):
1942 1945 self.ui.note(f + "\n")
1943 1946 try:
1944 1947 fctx = ctx[f]
1945 1948 if fctx is None:
1946 1949 removed.append(f)
1947 1950 else:
1948 1951 added.append(f)
1949 1952 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1950 1953 trp, changed)
1951 1954 m.setflag(f, fctx.flags())
1952 1955 except OSError as inst:
1953 1956 self.ui.warn(_("trouble committing %s!\n") % f)
1954 1957 raise
1955 1958 except IOError as inst:
1956 1959 errcode = getattr(inst, 'errno', errno.ENOENT)
1957 1960 if error or errcode and errcode != errno.ENOENT:
1958 1961 self.ui.warn(_("trouble committing %s!\n") % f)
1959 1962 raise
1960 1963
1961 1964 # update manifest
1962 1965 self.ui.note(_("committing manifest\n"))
1963 1966 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1964 1967 drop = [f for f in removed if f in m]
1965 1968 for f in drop:
1966 1969 del m[f]
1967 1970 mn = mctx.write(trp, linkrev,
1968 1971 p1.manifestnode(), p2.manifestnode(),
1969 1972 added, drop)
1970 1973 files = changed + removed
1971 1974 else:
1972 1975 mn = p1.manifestnode()
1973 1976 files = []
1974 1977
1975 1978 # update changelog
1976 1979 self.ui.note(_("committing changelog\n"))
1977 1980 self.changelog.delayupdate(tr)
1978 1981 n = self.changelog.add(mn, files, ctx.description(),
1979 1982 trp, p1.node(), p2.node(),
1980 1983 user, ctx.date(), ctx.extra().copy())
1981 1984 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1982 1985 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1983 1986 parent2=xp2)
1984 1987 # set the new commit is proper phase
1985 1988 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
1986 1989 if targetphase:
1987 1990 # retract boundary do not alter parent changeset.
1988 1991 # if a parent have higher the resulting phase will
1989 1992 # be compliant anyway
1990 1993 #
1991 1994 # if minimal phase was 0 we don't need to retract anything
1992 1995 phases.registernew(self, tr, targetphase, [n])
1993 1996 tr.close()
1994 1997 return n
1995 1998 finally:
1996 1999 if tr:
1997 2000 tr.release()
1998 2001 lock.release()
1999 2002
2000 2003 @unfilteredmethod
2001 2004 def destroying(self):
2002 2005 '''Inform the repository that nodes are about to be destroyed.
2003 2006 Intended for use by strip and rollback, so there's a common
2004 2007 place for anything that has to be done before destroying history.
2005 2008
2006 2009 This is mostly useful for saving state that is in memory and waiting
2007 2010 to be flushed when the current lock is released. Because a call to
2008 2011 destroyed is imminent, the repo will be invalidated causing those
2009 2012 changes to stay in memory (waiting for the next unlock), or vanish
2010 2013 completely.
2011 2014 '''
2012 2015 # When using the same lock to commit and strip, the phasecache is left
2013 2016 # dirty after committing. Then when we strip, the repo is invalidated,
2014 2017 # causing those changes to disappear.
2015 2018 if '_phasecache' in vars(self):
2016 2019 self._phasecache.write()
2017 2020
2018 2021 @unfilteredmethod
2019 2022 def destroyed(self):
2020 2023 '''Inform the repository that nodes have been destroyed.
2021 2024 Intended for use by strip and rollback, so there's a common
2022 2025 place for anything that has to be done after destroying history.
2023 2026 '''
2024 2027 # When one tries to:
2025 2028 # 1) destroy nodes thus calling this method (e.g. strip)
2026 2029 # 2) use phasecache somewhere (e.g. commit)
2027 2030 #
2028 2031 # then 2) will fail because the phasecache contains nodes that were
2029 2032 # removed. We can either remove phasecache from the filecache,
2030 2033 # causing it to reload next time it is accessed, or simply filter
2031 2034 # the removed nodes now and write the updated cache.
2032 2035 self._phasecache.filterunknown(self)
2033 2036 self._phasecache.write()
2034 2037
2035 2038 # refresh all repository caches
2036 2039 self.updatecaches()
2037 2040
2038 2041 # Ensure the persistent tag cache is updated. Doing it now
2039 2042 # means that the tag cache only has to worry about destroyed
2040 2043 # heads immediately after a strip/rollback. That in turn
2041 2044 # guarantees that "cachetip == currenttip" (comparing both rev
2042 2045 # and node) always means no nodes have been added or destroyed.
2043 2046
2044 2047 # XXX this is suboptimal when qrefresh'ing: we strip the current
2045 2048 # head, refresh the tag cache, then immediately add a new head.
2046 2049 # But I think doing it this way is necessary for the "instant
2047 2050 # tag cache retrieval" case to work.
2048 2051 self.invalidate()
2049 2052
2050 2053 def status(self, node1='.', node2=None, match=None,
2051 2054 ignored=False, clean=False, unknown=False,
2052 2055 listsubrepos=False):
2053 2056 '''a convenience method that calls node1.status(node2)'''
2054 2057 return self[node1].status(node2, match, ignored, clean, unknown,
2055 2058 listsubrepos)
2056 2059
2057 2060 def addpostdsstatus(self, ps):
2058 2061 """Add a callback to run within the wlock, at the point at which status
2059 2062 fixups happen.
2060 2063
2061 2064 On status completion, callback(wctx, status) will be called with the
2062 2065 wlock held, unless the dirstate has changed from underneath or the wlock
2063 2066 couldn't be grabbed.
2064 2067
2065 2068 Callbacks should not capture and use a cached copy of the dirstate --
2066 2069 it might change in the meanwhile. Instead, they should access the
2067 2070 dirstate via wctx.repo().dirstate.
2068 2071
2069 2072 This list is emptied out after each status run -- extensions should
2070 2073 make sure it adds to this list each time dirstate.status is called.
2071 2074 Extensions should also make sure they don't call this for statuses
2072 2075 that don't involve the dirstate.
2073 2076 """
2074 2077
2075 2078 # The list is located here for uniqueness reasons -- it is actually
2076 2079 # managed by the workingctx, but that isn't unique per-repo.
2077 2080 self._postdsstatus.append(ps)
2078 2081
2079 2082 def postdsstatus(self):
2080 2083 """Used by workingctx to get the list of post-dirstate-status hooks."""
2081 2084 return self._postdsstatus
2082 2085
2083 2086 def clearpostdsstatus(self):
2084 2087 """Used by workingctx to clear post-dirstate-status hooks."""
2085 2088 del self._postdsstatus[:]
2086 2089
2087 2090 def heads(self, start=None):
2088 2091 if start is None:
2089 2092 cl = self.changelog
2090 2093 headrevs = reversed(cl.headrevs())
2091 2094 return [cl.node(rev) for rev in headrevs]
2092 2095
2093 2096 heads = self.changelog.heads(start)
2094 2097 # sort the output in rev descending order
2095 2098 return sorted(heads, key=self.changelog.rev, reverse=True)
2096 2099
2097 2100 def branchheads(self, branch=None, start=None, closed=False):
2098 2101 '''return a (possibly filtered) list of heads for the given branch
2099 2102
2100 2103 Heads are returned in topological order, from newest to oldest.
2101 2104 If branch is None, use the dirstate branch.
2102 2105 If start is not None, return only heads reachable from start.
2103 2106 If closed is True, return heads that are marked as closed as well.
2104 2107 '''
2105 2108 if branch is None:
2106 2109 branch = self[None].branch()
2107 2110 branches = self.branchmap()
2108 2111 if branch not in branches:
2109 2112 return []
2110 2113 # the cache returns heads ordered lowest to highest
2111 2114 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2112 2115 if start is not None:
2113 2116 # filter out the heads that cannot be reached from startrev
2114 2117 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2115 2118 bheads = [h for h in bheads if h in fbheads]
2116 2119 return bheads
2117 2120
2118 2121 def branches(self, nodes):
2119 2122 if not nodes:
2120 2123 nodes = [self.changelog.tip()]
2121 2124 b = []
2122 2125 for n in nodes:
2123 2126 t = n
2124 2127 while True:
2125 2128 p = self.changelog.parents(n)
2126 2129 if p[1] != nullid or p[0] == nullid:
2127 2130 b.append((t, n, p[0], p[1]))
2128 2131 break
2129 2132 n = p[0]
2130 2133 return b
2131 2134
2132 2135 def between(self, pairs):
2133 2136 r = []
2134 2137
2135 2138 for top, bottom in pairs:
2136 2139 n, l, i = top, [], 0
2137 2140 f = 1
2138 2141
2139 2142 while n != bottom and n != nullid:
2140 2143 p = self.changelog.parents(n)[0]
2141 2144 if i == f:
2142 2145 l.append(n)
2143 2146 f = f * 2
2144 2147 n = p
2145 2148 i += 1
2146 2149
2147 2150 r.append(l)
2148 2151
2149 2152 return r
2150 2153
2151 2154 def checkpush(self, pushop):
2152 2155 """Extensions can override this function if additional checks have
2153 2156 to be performed before pushing, or call it if they override push
2154 2157 command.
2155 2158 """
2156 2159
2157 2160 @unfilteredpropertycache
2158 2161 def prepushoutgoinghooks(self):
2159 2162 """Return util.hooks consists of a pushop with repo, remote, outgoing
2160 2163 methods, which are called before pushing changesets.
2161 2164 """
2162 2165 return util.hooks()
2163 2166
2164 2167 def pushkey(self, namespace, key, old, new):
2165 2168 try:
2166 2169 tr = self.currenttransaction()
2167 2170 hookargs = {}
2168 2171 if tr is not None:
2169 2172 hookargs.update(tr.hookargs)
2170 2173 hookargs['namespace'] = namespace
2171 2174 hookargs['key'] = key
2172 2175 hookargs['old'] = old
2173 2176 hookargs['new'] = new
2174 2177 self.hook('prepushkey', throw=True, **hookargs)
2175 2178 except error.HookAbort as exc:
2176 2179 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2177 2180 if exc.hint:
2178 2181 self.ui.write_err(_("(%s)\n") % exc.hint)
2179 2182 return False
2180 2183 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2181 2184 ret = pushkey.push(self, namespace, key, old, new)
2182 2185 def runhook():
2183 2186 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2184 2187 ret=ret)
2185 2188 self._afterlock(runhook)
2186 2189 return ret
2187 2190
2188 2191 def listkeys(self, namespace):
2189 2192 self.hook('prelistkeys', throw=True, namespace=namespace)
2190 2193 self.ui.debug('listing keys for "%s"\n' % namespace)
2191 2194 values = pushkey.list(self, namespace)
2192 2195 self.hook('listkeys', namespace=namespace, values=values)
2193 2196 return values
2194 2197
2195 2198 def debugwireargs(self, one, two, three=None, four=None, five=None):
2196 2199 '''used to test argument passing over the wire'''
2197 2200 return "%s %s %s %s %s" % (one, two, three, four, five)
2198 2201
2199 2202 def savecommitmessage(self, text):
2200 2203 fp = self.vfs('last-message.txt', 'wb')
2201 2204 try:
2202 2205 fp.write(text)
2203 2206 finally:
2204 2207 fp.close()
2205 2208 return self.pathto(fp.name[len(self.root) + 1:])
2206 2209
2207 2210 # used to avoid circular references so destructors work
2208 2211 def aftertrans(files):
2209 2212 renamefiles = [tuple(t) for t in files]
2210 2213 def a():
2211 2214 for vfs, src, dest in renamefiles:
2212 2215 # if src and dest refer to a same file, vfs.rename is a no-op,
2213 2216 # leaving both src and dest on disk. delete dest to make sure
2214 2217 # the rename couldn't be such a no-op.
2215 2218 vfs.tryunlink(dest)
2216 2219 try:
2217 2220 vfs.rename(src, dest)
2218 2221 except OSError: # journal file does not yet exist
2219 2222 pass
2220 2223 return a
2221 2224
2222 2225 def undoname(fn):
2223 2226 base, name = os.path.split(fn)
2224 2227 assert name.startswith('journal')
2225 2228 return os.path.join(base, name.replace('journal', 'undo', 1))
2226 2229
2227 2230 def instance(ui, path, create):
2228 2231 return localrepository(ui, util.urllocalpath(path), create)
2229 2232
2230 2233 def islocal(path):
2231 2234 return True
2232 2235
2233 2236 def newreporequirements(repo):
2234 2237 """Determine the set of requirements for a new local repository.
2235 2238
2236 2239 Extensions can wrap this function to specify custom requirements for
2237 2240 new repositories.
2238 2241 """
2239 2242 ui = repo.ui
2240 2243 requirements = {'revlogv1'}
2241 2244 if ui.configbool('format', 'usestore'):
2242 2245 requirements.add('store')
2243 2246 if ui.configbool('format', 'usefncache'):
2244 2247 requirements.add('fncache')
2245 2248 if ui.configbool('format', 'dotencode'):
2246 2249 requirements.add('dotencode')
2247 2250
2248 2251 compengine = ui.config('experimental', 'format.compression')
2249 2252 if compengine not in util.compengines:
2250 2253 raise error.Abort(_('compression engine %s defined by '
2251 2254 'experimental.format.compression not available') %
2252 2255 compengine,
2253 2256 hint=_('run "hg debuginstall" to list available '
2254 2257 'compression engines'))
2255 2258
2256 2259 # zlib is the historical default and doesn't need an explicit requirement.
2257 2260 if compengine != 'zlib':
2258 2261 requirements.add('exp-compression-%s' % compengine)
2259 2262
2260 2263 if scmutil.gdinitconfig(ui):
2261 2264 requirements.add('generaldelta')
2262 2265 if ui.configbool('experimental', 'treemanifest'):
2263 2266 requirements.add('treemanifest')
2264 if ui.configbool('experimental', 'manifestv2'):
2265 requirements.add('manifestv2')
2266 2267
2267 2268 revlogv2 = ui.config('experimental', 'revlogv2')
2268 2269 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2269 2270 requirements.remove('revlogv1')
2270 2271 # generaldelta is implied by revlogv2.
2271 2272 requirements.discard('generaldelta')
2272 2273 requirements.add(REVLOGV2_REQUIREMENT)
2273 2274
2274 2275 return requirements
@@ -1,1649 +1,1573 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import itertools
12 import os
13 12 import struct
14 13
15 14 from .i18n import _
16 15 from .node import (
17 16 bin,
18 17 hex,
19 18 )
20 19 from . import (
21 20 error,
22 21 mdiff,
23 22 policy,
24 23 revlog,
25 24 util,
26 25 )
27 26
28 27 parsers = policy.importmod(r'parsers')
29 28 propertycache = util.propertycache
30 29
31 def _parsev1(data):
30 def _parse(data):
32 31 # This method does a little bit of excessive-looking
33 32 # precondition checking. This is so that the behavior of this
34 33 # class exactly matches its C counterpart to try and help
35 34 # prevent surprise breakage for anyone that develops against
36 35 # the pure version.
37 36 if data and data[-1:] != '\n':
38 37 raise ValueError('Manifest did not end in a newline.')
39 38 prev = None
40 39 for l in data.splitlines():
41 40 if prev is not None and prev > l:
42 41 raise ValueError('Manifest lines not in sorted order.')
43 42 prev = l
44 43 f, n = l.split('\0')
45 44 if len(n) > 40:
46 45 yield f, bin(n[:40]), n[40:]
47 46 else:
48 47 yield f, bin(n), ''
49 48
50 def _parsev2(data):
51 metadataend = data.find('\n')
52 # Just ignore metadata for now
53 pos = metadataend + 1
54 prevf = ''
55 while pos < len(data):
56 end = data.find('\n', pos + 1) # +1 to skip stem length byte
57 if end == -1:
58 raise ValueError('Manifest ended with incomplete file entry.')
59 stemlen = ord(data[pos:pos + 1])
60 items = data[pos + 1:end].split('\0')
61 f = prevf[:stemlen] + items[0]
62 if prevf > f:
63 raise ValueError('Manifest entries not in sorted order.')
64 fl = items[1]
65 # Just ignore metadata (items[2:] for now)
66 n = data[end + 1:end + 21]
67 yield f, n, fl
68 pos = end + 22
69 prevf = f
70
71 def _parse(data):
72 """Generates (path, node, flags) tuples from a manifest text"""
73 if data.startswith('\0'):
74 return iter(_parsev2(data))
75 else:
76 return iter(_parsev1(data))
77
78 def _text(it, usemanifestv2):
79 """Given an iterator over (path, node, flags) tuples, returns a manifest
80 text"""
81 if usemanifestv2:
82 return _textv2(it)
83 else:
84 return _textv1(it)
85
86 def _textv1(it):
49 def _text(it):
87 50 files = []
88 51 lines = []
89 52 _hex = revlog.hex
90 53 for f, n, fl in it:
91 54 files.append(f)
92 55 # if this is changed to support newlines in filenames,
93 56 # be sure to check the templates/ dir again (especially *-raw.tmpl)
94 57 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
95 58
96 59 _checkforbidden(files)
97 60 return ''.join(lines)
98 61
99 def _textv2(it):
100 files = []
101 lines = ['\0\n']
102 prevf = ''
103 for f, n, fl in it:
104 files.append(f)
105 stem = os.path.commonprefix([prevf, f])
106 stemlen = min(len(stem), 255)
107 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
108 prevf = f
109 _checkforbidden(files)
110 return ''.join(lines)
111
112 62 class lazymanifestiter(object):
113 63 def __init__(self, lm):
114 64 self.pos = 0
115 65 self.lm = lm
116 66
117 67 def __iter__(self):
118 68 return self
119 69
120 70 def next(self):
121 71 try:
122 72 data, pos = self.lm._get(self.pos)
123 73 except IndexError:
124 74 raise StopIteration
125 75 if pos == -1:
126 76 self.pos += 1
127 77 return data[0]
128 78 self.pos += 1
129 79 zeropos = data.find('\x00', pos)
130 80 return data[pos:zeropos]
131 81
132 82 __next__ = next
133 83
134 84 class lazymanifestiterentries(object):
135 85 def __init__(self, lm):
136 86 self.lm = lm
137 87 self.pos = 0
138 88
139 89 def __iter__(self):
140 90 return self
141 91
142 92 def next(self):
143 93 try:
144 94 data, pos = self.lm._get(self.pos)
145 95 except IndexError:
146 96 raise StopIteration
147 97 if pos == -1:
148 98 self.pos += 1
149 99 return data
150 100 zeropos = data.find('\x00', pos)
151 101 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
152 102 zeropos + 1, 40)
153 103 flags = self.lm._getflags(data, self.pos, zeropos)
154 104 self.pos += 1
155 105 return (data[pos:zeropos], hashval, flags)
156 106
157 107 __next__ = next
158 108
159 109 def unhexlify(data, extra, pos, length):
160 110 s = bin(data[pos:pos + length])
161 111 if extra:
162 112 s += chr(extra & 0xff)
163 113 return s
164 114
165 115 def _cmp(a, b):
166 116 return (a > b) - (a < b)
167 117
168 118 class _lazymanifest(object):
169 119 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
170 120 if positions is None:
171 121 self.positions = self.findlines(data)
172 122 self.extrainfo = [0] * len(self.positions)
173 123 self.data = data
174 124 self.extradata = []
175 125 else:
176 126 self.positions = positions[:]
177 127 self.extrainfo = extrainfo[:]
178 128 self.extradata = extradata[:]
179 129 self.data = data
180 130
181 131 def findlines(self, data):
182 132 if not data:
183 133 return []
184 134 pos = data.find("\n")
185 135 if pos == -1 or data[-1:] != '\n':
186 136 raise ValueError("Manifest did not end in a newline.")
187 137 positions = [0]
188 138 prev = data[:data.find('\x00')]
189 139 while pos < len(data) - 1 and pos != -1:
190 140 positions.append(pos + 1)
191 141 nexts = data[pos + 1:data.find('\x00', pos + 1)]
192 142 if nexts < prev:
193 143 raise ValueError("Manifest lines not in sorted order.")
194 144 prev = nexts
195 145 pos = data.find("\n", pos + 1)
196 146 return positions
197 147
198 148 def _get(self, index):
199 149 # get the position encoded in pos:
200 150 # positive number is an index in 'data'
201 151 # negative number is in extrapieces
202 152 pos = self.positions[index]
203 153 if pos >= 0:
204 154 return self.data, pos
205 155 return self.extradata[-pos - 1], -1
206 156
207 157 def _getkey(self, pos):
208 158 if pos >= 0:
209 159 return self.data[pos:self.data.find('\x00', pos + 1)]
210 160 return self.extradata[-pos - 1][0]
211 161
212 162 def bsearch(self, key):
213 163 first = 0
214 164 last = len(self.positions) - 1
215 165
216 166 while first <= last:
217 167 midpoint = (first + last)//2
218 168 nextpos = self.positions[midpoint]
219 169 candidate = self._getkey(nextpos)
220 170 r = _cmp(key, candidate)
221 171 if r == 0:
222 172 return midpoint
223 173 else:
224 174 if r < 0:
225 175 last = midpoint - 1
226 176 else:
227 177 first = midpoint + 1
228 178 return -1
229 179
230 180 def bsearch2(self, key):
231 181 # same as the above, but will always return the position
232 182 # done for performance reasons
233 183 first = 0
234 184 last = len(self.positions) - 1
235 185
236 186 while first <= last:
237 187 midpoint = (first + last)//2
238 188 nextpos = self.positions[midpoint]
239 189 candidate = self._getkey(nextpos)
240 190 r = _cmp(key, candidate)
241 191 if r == 0:
242 192 return (midpoint, True)
243 193 else:
244 194 if r < 0:
245 195 last = midpoint - 1
246 196 else:
247 197 first = midpoint + 1
248 198 return (first, False)
249 199
250 200 def __contains__(self, key):
251 201 return self.bsearch(key) != -1
252 202
253 203 def _getflags(self, data, needle, pos):
254 204 start = pos + 41
255 205 end = data.find("\n", start)
256 206 if end == -1:
257 207 end = len(data) - 1
258 208 if start == end:
259 209 return ''
260 210 return self.data[start:end]
261 211
262 212 def __getitem__(self, key):
263 213 if not isinstance(key, bytes):
264 214 raise TypeError("getitem: manifest keys must be a bytes.")
265 215 needle = self.bsearch(key)
266 216 if needle == -1:
267 217 raise KeyError
268 218 data, pos = self._get(needle)
269 219 if pos == -1:
270 220 return (data[1], data[2])
271 221 zeropos = data.find('\x00', pos)
272 222 assert 0 <= needle <= len(self.positions)
273 223 assert len(self.extrainfo) == len(self.positions)
274 224 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
275 225 flags = self._getflags(data, needle, zeropos)
276 226 return (hashval, flags)
277 227
278 228 def __delitem__(self, key):
279 229 needle, found = self.bsearch2(key)
280 230 if not found:
281 231 raise KeyError
282 232 cur = self.positions[needle]
283 233 self.positions = self.positions[:needle] + self.positions[needle + 1:]
284 234 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
285 235 if cur >= 0:
286 236 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
287 237
288 238 def __setitem__(self, key, value):
289 239 if not isinstance(key, bytes):
290 240 raise TypeError("setitem: manifest keys must be a byte string.")
291 241 if not isinstance(value, tuple) or len(value) != 2:
292 242 raise TypeError("Manifest values must be a tuple of (node, flags).")
293 243 hashval = value[0]
294 244 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
295 245 raise TypeError("node must be a 20-byte byte string")
296 246 flags = value[1]
297 247 if len(hashval) == 22:
298 248 hashval = hashval[:-1]
299 249 if not isinstance(flags, bytes) or len(flags) > 1:
300 250 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
301 251 needle, found = self.bsearch2(key)
302 252 if found:
303 253 # put the item
304 254 pos = self.positions[needle]
305 255 if pos < 0:
306 256 self.extradata[-pos - 1] = (key, hashval, value[1])
307 257 else:
308 258 # just don't bother
309 259 self.extradata.append((key, hashval, value[1]))
310 260 self.positions[needle] = -len(self.extradata)
311 261 else:
312 262 # not found, put it in with extra positions
313 263 self.extradata.append((key, hashval, value[1]))
314 264 self.positions = (self.positions[:needle] + [-len(self.extradata)]
315 265 + self.positions[needle:])
316 266 self.extrainfo = (self.extrainfo[:needle] + [0] +
317 267 self.extrainfo[needle:])
318 268
319 269 def copy(self):
320 270 # XXX call _compact like in C?
321 271 return _lazymanifest(self.data, self.positions, self.extrainfo,
322 272 self.extradata)
323 273
324 274 def _compact(self):
325 275 # hopefully not called TOO often
326 276 if len(self.extradata) == 0:
327 277 return
328 278 l = []
329 279 last_cut = 0
330 280 i = 0
331 281 offset = 0
332 282 self.extrainfo = [0] * len(self.positions)
333 283 while i < len(self.positions):
334 284 if self.positions[i] >= 0:
335 285 cur = self.positions[i]
336 286 last_cut = cur
337 287 while True:
338 288 self.positions[i] = offset
339 289 i += 1
340 290 if i == len(self.positions) or self.positions[i] < 0:
341 291 break
342 292 offset += self.positions[i] - cur
343 293 cur = self.positions[i]
344 294 end_cut = self.data.find('\n', cur)
345 295 if end_cut != -1:
346 296 end_cut += 1
347 297 offset += end_cut - cur
348 298 l.append(self.data[last_cut:end_cut])
349 299 else:
350 300 while i < len(self.positions) and self.positions[i] < 0:
351 301 cur = self.positions[i]
352 302 t = self.extradata[-cur - 1]
353 303 l.append(self._pack(t))
354 304 self.positions[i] = offset
355 305 if len(t[1]) > 20:
356 306 self.extrainfo[i] = ord(t[1][21])
357 307 offset += len(l[-1])
358 308 i += 1
359 309 self.data = ''.join(l)
360 310 self.extradata = []
361 311
362 312 def _pack(self, d):
363 313 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
364 314
365 315 def text(self):
366 316 self._compact()
367 317 return self.data
368 318
369 319 def diff(self, m2, clean=False):
370 320 '''Finds changes between the current manifest and m2.'''
371 321 # XXX think whether efficiency matters here
372 322 diff = {}
373 323
374 324 for fn, e1, flags in self.iterentries():
375 325 if fn not in m2:
376 326 diff[fn] = (e1, flags), (None, '')
377 327 else:
378 328 e2 = m2[fn]
379 329 if (e1, flags) != e2:
380 330 diff[fn] = (e1, flags), e2
381 331 elif clean:
382 332 diff[fn] = None
383 333
384 334 for fn, e2, flags in m2.iterentries():
385 335 if fn not in self:
386 336 diff[fn] = (None, ''), (e2, flags)
387 337
388 338 return diff
389 339
390 340 def iterentries(self):
391 341 return lazymanifestiterentries(self)
392 342
393 343 def iterkeys(self):
394 344 return lazymanifestiter(self)
395 345
396 346 def __iter__(self):
397 347 return lazymanifestiter(self)
398 348
399 349 def __len__(self):
400 350 return len(self.positions)
401 351
402 352 def filtercopy(self, filterfn):
403 353 # XXX should be optimized
404 354 c = _lazymanifest('')
405 355 for f, n, fl in self.iterentries():
406 356 if filterfn(f):
407 357 c[f] = n, fl
408 358 return c
409 359
410 360 try:
411 361 _lazymanifest = parsers.lazymanifest
412 362 except AttributeError:
413 363 pass
414 364
415 365 class manifestdict(object):
416 366 def __init__(self, data=''):
417 if data.startswith('\0'):
418 #_lazymanifest can not parse v2
419 self._lm = _lazymanifest('')
420 for f, n, fl in _parsev2(data):
421 self._lm[f] = n, fl
422 else:
423 self._lm = _lazymanifest(data)
367 self._lm = _lazymanifest(data)
424 368
425 369 def __getitem__(self, key):
426 370 return self._lm[key][0]
427 371
428 372 def find(self, key):
429 373 return self._lm[key]
430 374
431 375 def __len__(self):
432 376 return len(self._lm)
433 377
434 378 def __nonzero__(self):
435 379 # nonzero is covered by the __len__ function, but implementing it here
436 380 # makes it easier for extensions to override.
437 381 return len(self._lm) != 0
438 382
439 383 __bool__ = __nonzero__
440 384
441 385 def __setitem__(self, key, node):
442 386 self._lm[key] = node, self.flags(key, '')
443 387
444 388 def __contains__(self, key):
445 389 if key is None:
446 390 return False
447 391 return key in self._lm
448 392
449 393 def __delitem__(self, key):
450 394 del self._lm[key]
451 395
452 396 def __iter__(self):
453 397 return self._lm.__iter__()
454 398
455 399 def iterkeys(self):
456 400 return self._lm.iterkeys()
457 401
458 402 def keys(self):
459 403 return list(self.iterkeys())
460 404
461 405 def filesnotin(self, m2, match=None):
462 406 '''Set of files in this manifest that are not in the other'''
463 407 if match:
464 408 m1 = self.matches(match)
465 409 m2 = m2.matches(match)
466 410 return m1.filesnotin(m2)
467 411 diff = self.diff(m2)
468 412 files = set(filepath
469 413 for filepath, hashflags in diff.iteritems()
470 414 if hashflags[1][0] is None)
471 415 return files
472 416
473 417 @propertycache
474 418 def _dirs(self):
475 419 return util.dirs(self)
476 420
477 421 def dirs(self):
478 422 return self._dirs
479 423
480 424 def hasdir(self, dir):
481 425 return dir in self._dirs
482 426
483 427 def _filesfastpath(self, match):
484 428 '''Checks whether we can correctly and quickly iterate over matcher
485 429 files instead of over manifest files.'''
486 430 files = match.files()
487 431 return (len(files) < 100 and (match.isexact() or
488 432 (match.prefix() and all(fn in self for fn in files))))
489 433
490 434 def walk(self, match):
491 435 '''Generates matching file names.
492 436
493 437 Equivalent to manifest.matches(match).iterkeys(), but without creating
494 438 an entirely new manifest.
495 439
496 440 It also reports nonexistent files by marking them bad with match.bad().
497 441 '''
498 442 if match.always():
499 443 for f in iter(self):
500 444 yield f
501 445 return
502 446
503 447 fset = set(match.files())
504 448
505 449 # avoid the entire walk if we're only looking for specific files
506 450 if self._filesfastpath(match):
507 451 for fn in sorted(fset):
508 452 yield fn
509 453 return
510 454
511 455 for fn in self:
512 456 if fn in fset:
513 457 # specified pattern is the exact name
514 458 fset.remove(fn)
515 459 if match(fn):
516 460 yield fn
517 461
518 462 # for dirstate.walk, files=['.'] means "walk the whole tree".
519 463 # follow that here, too
520 464 fset.discard('.')
521 465
522 466 for fn in sorted(fset):
523 467 if not self.hasdir(fn):
524 468 match.bad(fn, None)
525 469
526 470 def matches(self, match):
527 471 '''generate a new manifest filtered by the match argument'''
528 472 if match.always():
529 473 return self.copy()
530 474
531 475 if self._filesfastpath(match):
532 476 m = manifestdict()
533 477 lm = self._lm
534 478 for fn in match.files():
535 479 if fn in lm:
536 480 m._lm[fn] = lm[fn]
537 481 return m
538 482
539 483 m = manifestdict()
540 484 m._lm = self._lm.filtercopy(match)
541 485 return m
542 486
543 487 def diff(self, m2, match=None, clean=False):
544 488 '''Finds changes between the current manifest and m2.
545 489
546 490 Args:
547 491 m2: the manifest to which this manifest should be compared.
548 492 clean: if true, include files unchanged between these manifests
549 493 with a None value in the returned dictionary.
550 494
551 495 The result is returned as a dict with filename as key and
552 496 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
553 497 nodeid in the current/other manifest and fl1/fl2 is the flag
554 498 in the current/other manifest. Where the file does not exist,
555 499 the nodeid will be None and the flags will be the empty
556 500 string.
557 501 '''
558 502 if match:
559 503 m1 = self.matches(match)
560 504 m2 = m2.matches(match)
561 505 return m1.diff(m2, clean=clean)
562 506 return self._lm.diff(m2._lm, clean)
563 507
564 508 def setflag(self, key, flag):
565 509 self._lm[key] = self[key], flag
566 510
567 511 def get(self, key, default=None):
568 512 try:
569 513 return self._lm[key][0]
570 514 except KeyError:
571 515 return default
572 516
573 517 def flags(self, key, default=''):
574 518 try:
575 519 return self._lm[key][1]
576 520 except KeyError:
577 521 return default
578 522
579 523 def copy(self):
580 524 c = manifestdict()
581 525 c._lm = self._lm.copy()
582 526 return c
583 527
584 528 def items(self):
585 529 return (x[:2] for x in self._lm.iterentries())
586 530
587 531 iteritems = items
588 532
589 533 def iterentries(self):
590 534 return self._lm.iterentries()
591 535
592 def text(self, usemanifestv2=False):
593 if usemanifestv2:
594 return _textv2(self._lm.iterentries())
595 else:
596 # use (probably) native version for v1
597 return self._lm.text()
536 def text(self):
537 # most likely uses native version
538 return self._lm.text()
598 539
599 540 def fastdelta(self, base, changes):
600 541 """Given a base manifest text as a bytearray and a list of changes
601 542 relative to that text, compute a delta that can be used by revlog.
602 543 """
603 544 delta = []
604 545 dstart = None
605 546 dend = None
606 547 dline = [""]
607 548 start = 0
608 549 # zero copy representation of base as a buffer
609 550 addbuf = util.buffer(base)
610 551
611 552 changes = list(changes)
612 553 if len(changes) < 1000:
613 554 # start with a readonly loop that finds the offset of
614 555 # each line and creates the deltas
615 556 for f, todelete in changes:
616 557 # bs will either be the index of the item or the insert point
617 558 start, end = _msearch(addbuf, f, start)
618 559 if not todelete:
619 560 h, fl = self._lm[f]
620 561 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
621 562 else:
622 563 if start == end:
623 564 # item we want to delete was not found, error out
624 565 raise AssertionError(
625 566 _("failed to remove %s from manifest") % f)
626 567 l = ""
627 568 if dstart is not None and dstart <= start and dend >= start:
628 569 if dend < end:
629 570 dend = end
630 571 if l:
631 572 dline.append(l)
632 573 else:
633 574 if dstart is not None:
634 575 delta.append([dstart, dend, "".join(dline)])
635 576 dstart = start
636 577 dend = end
637 578 dline = [l]
638 579
639 580 if dstart is not None:
640 581 delta.append([dstart, dend, "".join(dline)])
641 582 # apply the delta to the base, and get a delta for addrevision
642 583 deltatext, arraytext = _addlistdelta(base, delta)
643 584 else:
644 585 # For large changes, it's much cheaper to just build the text and
645 586 # diff it.
646 587 arraytext = bytearray(self.text())
647 588 deltatext = mdiff.textdiff(
648 589 util.buffer(base), util.buffer(arraytext))
649 590
650 591 return arraytext, deltatext
651 592
652 593 def _msearch(m, s, lo=0, hi=None):
653 594 '''return a tuple (start, end) that says where to find s within m.
654 595
655 596 If the string is found m[start:end] are the line containing
656 597 that string. If start == end the string was not found and
657 598 they indicate the proper sorted insertion point.
658 599
659 600 m should be a buffer, a memoryview or a byte string.
660 601 s is a byte string'''
661 602 def advance(i, c):
662 603 while i < lenm and m[i:i + 1] != c:
663 604 i += 1
664 605 return i
665 606 if not s:
666 607 return (lo, lo)
667 608 lenm = len(m)
668 609 if not hi:
669 610 hi = lenm
670 611 while lo < hi:
671 612 mid = (lo + hi) // 2
672 613 start = mid
673 614 while start > 0 and m[start - 1:start] != '\n':
674 615 start -= 1
675 616 end = advance(start, '\0')
676 617 if bytes(m[start:end]) < s:
677 618 # we know that after the null there are 40 bytes of sha1
678 619 # this translates to the bisect lo = mid + 1
679 620 lo = advance(end + 40, '\n') + 1
680 621 else:
681 622 # this translates to the bisect hi = mid
682 623 hi = start
683 624 end = advance(lo, '\0')
684 625 found = m[lo:end]
685 626 if s == found:
686 627 # we know that after the null there are 40 bytes of sha1
687 628 end = advance(end + 40, '\n')
688 629 return (lo, end + 1)
689 630 else:
690 631 return (lo, lo)
691 632
692 633 def _checkforbidden(l):
693 634 """Check filenames for illegal characters."""
694 635 for f in l:
695 636 if '\n' in f or '\r' in f:
696 637 raise error.RevlogError(
697 638 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
698 639
699 640
700 641 # apply the changes collected during the bisect loop to our addlist
701 642 # return a delta suitable for addrevision
702 643 def _addlistdelta(addlist, x):
703 644 # for large addlist arrays, building a new array is cheaper
704 645 # than repeatedly modifying the existing one
705 646 currentposition = 0
706 647 newaddlist = bytearray()
707 648
708 649 for start, end, content in x:
709 650 newaddlist += addlist[currentposition:start]
710 651 if content:
711 652 newaddlist += bytearray(content)
712 653
713 654 currentposition = end
714 655
715 656 newaddlist += addlist[currentposition:]
716 657
717 658 deltatext = "".join(struct.pack(">lll", start, end, len(content))
718 659 + content for start, end, content in x)
719 660 return deltatext, newaddlist
720 661
721 662 def _splittopdir(f):
722 663 if '/' in f:
723 664 dir, subpath = f.split('/', 1)
724 665 return dir + '/', subpath
725 666 else:
726 667 return '', f
727 668
728 669 _noop = lambda s: None
729 670
730 671 class treemanifest(object):
731 672 def __init__(self, dir='', text=''):
732 673 self._dir = dir
733 674 self._node = revlog.nullid
734 675 self._loadfunc = _noop
735 676 self._copyfunc = _noop
736 677 self._dirty = False
737 678 self._dirs = {}
738 679 # Using _lazymanifest here is a little slower than plain old dicts
739 680 self._files = {}
740 681 self._flags = {}
741 682 if text:
742 683 def readsubtree(subdir, subm):
743 684 raise AssertionError('treemanifest constructor only accepts '
744 685 'flat manifests')
745 686 self.parse(text, readsubtree)
746 687 self._dirty = True # Mark flat manifest dirty after parsing
747 688
748 689 def _subpath(self, path):
749 690 return self._dir + path
750 691
751 692 def __len__(self):
752 693 self._load()
753 694 size = len(self._files)
754 695 for m in self._dirs.values():
755 696 size += m.__len__()
756 697 return size
757 698
758 699 def __nonzero__(self):
759 700 # Faster than "__len() != 0" since it avoids loading sub-manifests
760 701 return not self._isempty()
761 702
762 703 __bool__ = __nonzero__
763 704
764 705 def _isempty(self):
765 706 self._load() # for consistency; already loaded by all callers
766 707 return (not self._files and (not self._dirs or
767 708 all(m._isempty() for m in self._dirs.values())))
768 709
769 710 def __repr__(self):
770 711 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
771 712 (self._dir, revlog.hex(self._node),
772 713 bool(self._loadfunc is _noop),
773 714 self._dirty, id(self)))
774 715
775 716 def dir(self):
776 717 '''The directory that this tree manifest represents, including a
777 718 trailing '/'. Empty string for the repo root directory.'''
778 719 return self._dir
779 720
780 721 def node(self):
781 722 '''This node of this instance. nullid for unsaved instances. Should
782 723 be updated when the instance is read or written from a revlog.
783 724 '''
784 725 assert not self._dirty
785 726 return self._node
786 727
787 728 def setnode(self, node):
788 729 self._node = node
789 730 self._dirty = False
790 731
791 732 def iterentries(self):
792 733 self._load()
793 734 for p, n in sorted(itertools.chain(self._dirs.items(),
794 735 self._files.items())):
795 736 if p in self._files:
796 737 yield self._subpath(p), n, self._flags.get(p, '')
797 738 else:
798 739 for x in n.iterentries():
799 740 yield x
800 741
801 742 def items(self):
802 743 self._load()
803 744 for p, n in sorted(itertools.chain(self._dirs.items(),
804 745 self._files.items())):
805 746 if p in self._files:
806 747 yield self._subpath(p), n
807 748 else:
808 749 for f, sn in n.iteritems():
809 750 yield f, sn
810 751
811 752 iteritems = items
812 753
813 754 def iterkeys(self):
814 755 self._load()
815 756 for p in sorted(itertools.chain(self._dirs, self._files)):
816 757 if p in self._files:
817 758 yield self._subpath(p)
818 759 else:
819 760 for f in self._dirs[p]:
820 761 yield f
821 762
822 763 def keys(self):
823 764 return list(self.iterkeys())
824 765
825 766 def __iter__(self):
826 767 return self.iterkeys()
827 768
828 769 def __contains__(self, f):
829 770 if f is None:
830 771 return False
831 772 self._load()
832 773 dir, subpath = _splittopdir(f)
833 774 if dir:
834 775 if dir not in self._dirs:
835 776 return False
836 777 return self._dirs[dir].__contains__(subpath)
837 778 else:
838 779 return f in self._files
839 780
840 781 def get(self, f, default=None):
841 782 self._load()
842 783 dir, subpath = _splittopdir(f)
843 784 if dir:
844 785 if dir not in self._dirs:
845 786 return default
846 787 return self._dirs[dir].get(subpath, default)
847 788 else:
848 789 return self._files.get(f, default)
849 790
850 791 def __getitem__(self, f):
851 792 self._load()
852 793 dir, subpath = _splittopdir(f)
853 794 if dir:
854 795 return self._dirs[dir].__getitem__(subpath)
855 796 else:
856 797 return self._files[f]
857 798
858 799 def flags(self, f):
859 800 self._load()
860 801 dir, subpath = _splittopdir(f)
861 802 if dir:
862 803 if dir not in self._dirs:
863 804 return ''
864 805 return self._dirs[dir].flags(subpath)
865 806 else:
866 807 if f in self._dirs:
867 808 return ''
868 809 return self._flags.get(f, '')
869 810
870 811 def find(self, f):
871 812 self._load()
872 813 dir, subpath = _splittopdir(f)
873 814 if dir:
874 815 return self._dirs[dir].find(subpath)
875 816 else:
876 817 return self._files[f], self._flags.get(f, '')
877 818
878 819 def __delitem__(self, f):
879 820 self._load()
880 821 dir, subpath = _splittopdir(f)
881 822 if dir:
882 823 self._dirs[dir].__delitem__(subpath)
883 824 # If the directory is now empty, remove it
884 825 if self._dirs[dir]._isempty():
885 826 del self._dirs[dir]
886 827 else:
887 828 del self._files[f]
888 829 if f in self._flags:
889 830 del self._flags[f]
890 831 self._dirty = True
891 832
892 833 def __setitem__(self, f, n):
893 834 assert n is not None
894 835 self._load()
895 836 dir, subpath = _splittopdir(f)
896 837 if dir:
897 838 if dir not in self._dirs:
898 839 self._dirs[dir] = treemanifest(self._subpath(dir))
899 840 self._dirs[dir].__setitem__(subpath, n)
900 841 else:
901 842 self._files[f] = n[:21] # to match manifestdict's behavior
902 843 self._dirty = True
903 844
904 845 def _load(self):
905 846 if self._loadfunc is not _noop:
906 847 lf, self._loadfunc = self._loadfunc, _noop
907 848 lf(self)
908 849 elif self._copyfunc is not _noop:
909 850 cf, self._copyfunc = self._copyfunc, _noop
910 851 cf(self)
911 852
912 853 def setflag(self, f, flags):
913 854 """Set the flags (symlink, executable) for path f."""
914 855 self._load()
915 856 dir, subpath = _splittopdir(f)
916 857 if dir:
917 858 if dir not in self._dirs:
918 859 self._dirs[dir] = treemanifest(self._subpath(dir))
919 860 self._dirs[dir].setflag(subpath, flags)
920 861 else:
921 862 self._flags[f] = flags
922 863 self._dirty = True
923 864
924 865 def copy(self):
925 866 copy = treemanifest(self._dir)
926 867 copy._node = self._node
927 868 copy._dirty = self._dirty
928 869 if self._copyfunc is _noop:
929 870 def _copyfunc(s):
930 871 self._load()
931 872 for d in self._dirs:
932 873 s._dirs[d] = self._dirs[d].copy()
933 874 s._files = dict.copy(self._files)
934 875 s._flags = dict.copy(self._flags)
935 876 if self._loadfunc is _noop:
936 877 _copyfunc(copy)
937 878 else:
938 879 copy._copyfunc = _copyfunc
939 880 else:
940 881 copy._copyfunc = self._copyfunc
941 882 return copy
942 883
943 884 def filesnotin(self, m2, match=None):
944 885 '''Set of files in this manifest that are not in the other'''
945 886 if match:
946 887 m1 = self.matches(match)
947 888 m2 = m2.matches(match)
948 889 return m1.filesnotin(m2)
949 890
950 891 files = set()
951 892 def _filesnotin(t1, t2):
952 893 if t1._node == t2._node and not t1._dirty and not t2._dirty:
953 894 return
954 895 t1._load()
955 896 t2._load()
956 897 for d, m1 in t1._dirs.iteritems():
957 898 if d in t2._dirs:
958 899 m2 = t2._dirs[d]
959 900 _filesnotin(m1, m2)
960 901 else:
961 902 files.update(m1.iterkeys())
962 903
963 904 for fn in t1._files:
964 905 if fn not in t2._files:
965 906 files.add(t1._subpath(fn))
966 907
967 908 _filesnotin(self, m2)
968 909 return files
969 910
970 911 @propertycache
971 912 def _alldirs(self):
972 913 return util.dirs(self)
973 914
974 915 def dirs(self):
975 916 return self._alldirs
976 917
977 918 def hasdir(self, dir):
978 919 self._load()
979 920 topdir, subdir = _splittopdir(dir)
980 921 if topdir:
981 922 if topdir in self._dirs:
982 923 return self._dirs[topdir].hasdir(subdir)
983 924 return False
984 925 return (dir + '/') in self._dirs
985 926
986 927 def walk(self, match):
987 928 '''Generates matching file names.
988 929
989 930 Equivalent to manifest.matches(match).iterkeys(), but without creating
990 931 an entirely new manifest.
991 932
992 933 It also reports nonexistent files by marking them bad with match.bad().
993 934 '''
994 935 if match.always():
995 936 for f in iter(self):
996 937 yield f
997 938 return
998 939
999 940 fset = set(match.files())
1000 941
1001 942 for fn in self._walk(match):
1002 943 if fn in fset:
1003 944 # specified pattern is the exact name
1004 945 fset.remove(fn)
1005 946 yield fn
1006 947
1007 948 # for dirstate.walk, files=['.'] means "walk the whole tree".
1008 949 # follow that here, too
1009 950 fset.discard('.')
1010 951
1011 952 for fn in sorted(fset):
1012 953 if not self.hasdir(fn):
1013 954 match.bad(fn, None)
1014 955
1015 956 def _walk(self, match):
1016 957 '''Recursively generates matching file names for walk().'''
1017 958 if not match.visitdir(self._dir[:-1] or '.'):
1018 959 return
1019 960
1020 961 # yield this dir's files and walk its submanifests
1021 962 self._load()
1022 963 for p in sorted(list(self._dirs) + list(self._files)):
1023 964 if p in self._files:
1024 965 fullp = self._subpath(p)
1025 966 if match(fullp):
1026 967 yield fullp
1027 968 else:
1028 969 for f in self._dirs[p]._walk(match):
1029 970 yield f
1030 971
1031 972 def matches(self, match):
1032 973 '''generate a new manifest filtered by the match argument'''
1033 974 if match.always():
1034 975 return self.copy()
1035 976
1036 977 return self._matches(match)
1037 978
1038 979 def _matches(self, match):
1039 980 '''recursively generate a new manifest filtered by the match argument.
1040 981 '''
1041 982
1042 983 visit = match.visitdir(self._dir[:-1] or '.')
1043 984 if visit == 'all':
1044 985 return self.copy()
1045 986 ret = treemanifest(self._dir)
1046 987 if not visit:
1047 988 return ret
1048 989
1049 990 self._load()
1050 991 for fn in self._files:
1051 992 fullp = self._subpath(fn)
1052 993 if not match(fullp):
1053 994 continue
1054 995 ret._files[fn] = self._files[fn]
1055 996 if fn in self._flags:
1056 997 ret._flags[fn] = self._flags[fn]
1057 998
1058 999 for dir, subm in self._dirs.iteritems():
1059 1000 m = subm._matches(match)
1060 1001 if not m._isempty():
1061 1002 ret._dirs[dir] = m
1062 1003
1063 1004 if not ret._isempty():
1064 1005 ret._dirty = True
1065 1006 return ret
1066 1007
1067 1008 def diff(self, m2, match=None, clean=False):
1068 1009 '''Finds changes between the current manifest and m2.
1069 1010
1070 1011 Args:
1071 1012 m2: the manifest to which this manifest should be compared.
1072 1013 clean: if true, include files unchanged between these manifests
1073 1014 with a None value in the returned dictionary.
1074 1015
1075 1016 The result is returned as a dict with filename as key and
1076 1017 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1077 1018 nodeid in the current/other manifest and fl1/fl2 is the flag
1078 1019 in the current/other manifest. Where the file does not exist,
1079 1020 the nodeid will be None and the flags will be the empty
1080 1021 string.
1081 1022 '''
1082 1023 if match:
1083 1024 m1 = self.matches(match)
1084 1025 m2 = m2.matches(match)
1085 1026 return m1.diff(m2, clean=clean)
1086 1027 result = {}
1087 1028 emptytree = treemanifest()
1088 1029 def _diff(t1, t2):
1089 1030 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1090 1031 return
1091 1032 t1._load()
1092 1033 t2._load()
1093 1034 for d, m1 in t1._dirs.iteritems():
1094 1035 m2 = t2._dirs.get(d, emptytree)
1095 1036 _diff(m1, m2)
1096 1037
1097 1038 for d, m2 in t2._dirs.iteritems():
1098 1039 if d not in t1._dirs:
1099 1040 _diff(emptytree, m2)
1100 1041
1101 1042 for fn, n1 in t1._files.iteritems():
1102 1043 fl1 = t1._flags.get(fn, '')
1103 1044 n2 = t2._files.get(fn, None)
1104 1045 fl2 = t2._flags.get(fn, '')
1105 1046 if n1 != n2 or fl1 != fl2:
1106 1047 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1107 1048 elif clean:
1108 1049 result[t1._subpath(fn)] = None
1109 1050
1110 1051 for fn, n2 in t2._files.iteritems():
1111 1052 if fn not in t1._files:
1112 1053 fl2 = t2._flags.get(fn, '')
1113 1054 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1114 1055
1115 1056 _diff(self, m2)
1116 1057 return result
1117 1058
1118 1059 def unmodifiedsince(self, m2):
1119 1060 return not self._dirty and not m2._dirty and self._node == m2._node
1120 1061
1121 1062 def parse(self, text, readsubtree):
1122 1063 for f, n, fl in _parse(text):
1123 1064 if fl == 't':
1124 1065 f = f + '/'
1125 1066 self._dirs[f] = readsubtree(self._subpath(f), n)
1126 1067 elif '/' in f:
1127 1068 # This is a flat manifest, so use __setitem__ and setflag rather
1128 1069 # than assigning directly to _files and _flags, so we can
1129 1070 # assign a path in a subdirectory, and to mark dirty (compared
1130 1071 # to nullid).
1131 1072 self[f] = n
1132 1073 if fl:
1133 1074 self.setflag(f, fl)
1134 1075 else:
1135 1076 # Assigning to _files and _flags avoids marking as dirty,
1136 1077 # and should be a little faster.
1137 1078 self._files[f] = n
1138 1079 if fl:
1139 1080 self._flags[f] = fl
1140 1081
1141 def text(self, usemanifestv2=False):
1082 def text(self):
1142 1083 """Get the full data of this manifest as a bytestring."""
1143 1084 self._load()
1144 return _text(self.iterentries(), usemanifestv2)
1085 return _text(self.iterentries())
1145 1086
1146 def dirtext(self, usemanifestv2=False):
1087 def dirtext(self):
1147 1088 """Get the full data of this directory as a bytestring. Make sure that
1148 1089 any submanifests have been written first, so their nodeids are correct.
1149 1090 """
1150 1091 self._load()
1151 1092 flags = self.flags
1152 1093 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1153 1094 files = [(f, self._files[f], flags(f)) for f in self._files]
1154 return _text(sorted(dirs + files), usemanifestv2)
1095 return _text(sorted(dirs + files))
1155 1096
1156 1097 def read(self, gettext, readsubtree):
1157 1098 def _load_for_read(s):
1158 1099 s.parse(gettext(), readsubtree)
1159 1100 s._dirty = False
1160 1101 self._loadfunc = _load_for_read
1161 1102
1162 1103 def writesubtrees(self, m1, m2, writesubtree):
1163 1104 self._load() # for consistency; should never have any effect here
1164 1105 m1._load()
1165 1106 m2._load()
1166 1107 emptytree = treemanifest()
1167 1108 for d, subm in self._dirs.iteritems():
1168 1109 subp1 = m1._dirs.get(d, emptytree)._node
1169 1110 subp2 = m2._dirs.get(d, emptytree)._node
1170 1111 if subp1 == revlog.nullid:
1171 1112 subp1, subp2 = subp2, subp1
1172 1113 writesubtree(subm, subp1, subp2)
1173 1114
1174 1115 def walksubtrees(self, matcher=None):
1175 1116 """Returns an iterator of the subtrees of this manifest, including this
1176 1117 manifest itself.
1177 1118
1178 1119 If `matcher` is provided, it only returns subtrees that match.
1179 1120 """
1180 1121 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1181 1122 return
1182 1123 if not matcher or matcher(self._dir[:-1]):
1183 1124 yield self
1184 1125
1185 1126 self._load()
1186 1127 for d, subm in self._dirs.iteritems():
1187 1128 for subtree in subm.walksubtrees(matcher=matcher):
1188 1129 yield subtree
1189 1130
1190 1131 class manifestrevlog(revlog.revlog):
1191 1132 '''A revlog that stores manifest texts. This is responsible for caching the
1192 1133 full-text manifest contents.
1193 1134 '''
1194 1135 def __init__(self, opener, dir='', dirlogcache=None, indexfile=None,
1195 1136 treemanifest=False):
1196 1137 """Constructs a new manifest revlog
1197 1138
1198 1139 `indexfile` - used by extensions to have two manifests at once, like
1199 1140 when transitioning between flatmanifeset and treemanifests.
1200 1141
1201 1142 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1202 1143 options can also be used to make this a tree manifest revlog. The opener
1203 1144 option takes precedence, so if it is set to True, we ignore whatever
1204 1145 value is passed in to the constructor.
1205 1146 """
1206 1147 # During normal operations, we expect to deal with not more than four
1207 1148 # revs at a time (such as during commit --amend). When rebasing large
1208 1149 # stacks of commits, the number can go up, hence the config knob below.
1209 1150 cachesize = 4
1210 1151 optiontreemanifest = False
1211 usemanifestv2 = False
1212 1152 opts = getattr(opener, 'options', None)
1213 1153 if opts is not None:
1214 1154 cachesize = opts.get('manifestcachesize', cachesize)
1215 1155 optiontreemanifest = opts.get('treemanifest', False)
1216 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1217 1156
1218 1157 self._treeondisk = optiontreemanifest or treemanifest
1219 self._usemanifestv2 = usemanifestv2
1220 1158
1221 1159 self._fulltextcache = util.lrucachedict(cachesize)
1222 1160
1223 1161 if dir:
1224 1162 assert self._treeondisk, 'opts is %r' % opts
1225 1163 if not dir.endswith('/'):
1226 1164 dir = dir + '/'
1227 1165
1228 1166 if indexfile is None:
1229 1167 indexfile = '00manifest.i'
1230 1168 if dir:
1231 1169 indexfile = "meta/" + dir + indexfile
1232 1170
1233 1171 self._dir = dir
1234 1172 # The dirlogcache is kept on the root manifest log
1235 1173 if dir:
1236 1174 self._dirlogcache = dirlogcache
1237 1175 else:
1238 1176 self._dirlogcache = {'': self}
1239 1177
1240 1178 super(manifestrevlog, self).__init__(opener, indexfile,
1241 1179 # only root indexfile is cached
1242 1180 checkambig=not bool(dir),
1243 1181 mmaplargeindex=True)
1244 1182
1245 1183 @property
1246 1184 def fulltextcache(self):
1247 1185 return self._fulltextcache
1248 1186
1249 1187 def clearcaches(self):
1250 1188 super(manifestrevlog, self).clearcaches()
1251 1189 self._fulltextcache.clear()
1252 1190 self._dirlogcache = {'': self}
1253 1191
1254 1192 def dirlog(self, d):
1255 1193 if d:
1256 1194 assert self._treeondisk
1257 1195 if d not in self._dirlogcache:
1258 1196 mfrevlog = manifestrevlog(self.opener, d,
1259 1197 self._dirlogcache,
1260 1198 treemanifest=self._treeondisk)
1261 1199 self._dirlogcache[d] = mfrevlog
1262 1200 return self._dirlogcache[d]
1263 1201
1264 1202 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
1265 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
1266 and not self._usemanifestv2):
1203 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1267 1204 # If our first parent is in the manifest cache, we can
1268 1205 # compute a delta here using properties we know about the
1269 1206 # manifest up-front, which may save time later for the
1270 1207 # revlog layer.
1271 1208
1272 1209 _checkforbidden(added)
1273 1210 # combine the changed lists into one sorted iterator
1274 1211 work = heapq.merge([(x, False) for x in added],
1275 1212 [(x, True) for x in removed])
1276 1213
1277 1214 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1278 1215 cachedelta = self.rev(p1), deltatext
1279 1216 text = util.buffer(arraytext)
1280 1217 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1281 1218 else:
1282 1219 # The first parent manifest isn't already loaded, so we'll
1283 1220 # just encode a fulltext of the manifest and pass that
1284 1221 # through to the revlog layer, and let it handle the delta
1285 1222 # process.
1286 1223 if self._treeondisk:
1287 1224 assert readtree, "readtree must be set for treemanifest writes"
1288 1225 m1 = readtree(self._dir, p1)
1289 1226 m2 = readtree(self._dir, p2)
1290 1227 n = self._addtree(m, transaction, link, m1, m2, readtree)
1291 1228 arraytext = None
1292 1229 else:
1293 text = m.text(self._usemanifestv2)
1230 text = m.text()
1294 1231 n = self.addrevision(text, transaction, link, p1, p2)
1295 1232 arraytext = bytearray(text)
1296 1233
1297 1234 if arraytext is not None:
1298 1235 self.fulltextcache[n] = arraytext
1299 1236
1300 1237 return n
1301 1238
1302 1239 def _addtree(self, m, transaction, link, m1, m2, readtree):
1303 1240 # If the manifest is unchanged compared to one parent,
1304 1241 # don't write a new revision
1305 1242 if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)):
1306 1243 return m.node()
1307 1244 def writesubtree(subm, subp1, subp2):
1308 1245 sublog = self.dirlog(subm.dir())
1309 1246 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1310 1247 readtree=readtree)
1311 1248 m.writesubtrees(m1, m2, writesubtree)
1312 text = m.dirtext(self._usemanifestv2)
1249 text = m.dirtext()
1313 1250 n = None
1314 1251 if self._dir != '':
1315 1252 # Double-check whether contents are unchanged to one parent
1316 if text == m1.dirtext(self._usemanifestv2):
1253 if text == m1.dirtext():
1317 1254 n = m1.node()
1318 elif text == m2.dirtext(self._usemanifestv2):
1255 elif text == m2.dirtext():
1319 1256 n = m2.node()
1320 1257
1321 1258 if not n:
1322 1259 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1323 1260
1324 1261 # Save nodeid so parent manifest can calculate its nodeid
1325 1262 m.setnode(n)
1326 1263 return n
1327 1264
1328 1265 class manifestlog(object):
1329 1266 """A collection class representing the collection of manifest snapshots
1330 1267 referenced by commits in the repository.
1331 1268
1332 1269 In this situation, 'manifest' refers to the abstract concept of a snapshot
1333 1270 of the list of files in the given commit. Consumers of the output of this
1334 1271 class do not care about the implementation details of the actual manifests
1335 1272 they receive (i.e. tree or flat or lazily loaded, etc)."""
1336 1273 def __init__(self, opener, repo):
1337 1274 usetreemanifest = False
1338 1275 cachesize = 4
1339 1276
1340 1277 opts = getattr(opener, 'options', None)
1341 1278 if opts is not None:
1342 1279 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1343 1280 cachesize = opts.get('manifestcachesize', cachesize)
1344 1281 self._treeinmem = usetreemanifest
1345 1282
1346 1283 self._revlog = repo._constructmanifest()
1347 1284
1348 1285 # A cache of the manifestctx or treemanifestctx for each directory
1349 1286 self._dirmancache = {}
1350 1287 self._dirmancache[''] = util.lrucachedict(cachesize)
1351 1288
1352 1289 self.cachesize = cachesize
1353 1290
1354 1291 def __getitem__(self, node):
1355 1292 """Retrieves the manifest instance for the given node. Throws a
1356 1293 LookupError if not found.
1357 1294 """
1358 1295 return self.get('', node)
1359 1296
1360 1297 def get(self, dir, node, verify=True):
1361 1298 """Retrieves the manifest instance for the given node. Throws a
1362 1299 LookupError if not found.
1363 1300
1364 1301 `verify` - if True an exception will be thrown if the node is not in
1365 1302 the revlog
1366 1303 """
1367 1304 if node in self._dirmancache.get(dir, ()):
1368 1305 return self._dirmancache[dir][node]
1369 1306
1370 1307 if dir:
1371 1308 if self._revlog._treeondisk:
1372 1309 if verify:
1373 1310 dirlog = self._revlog.dirlog(dir)
1374 1311 if node not in dirlog.nodemap:
1375 1312 raise LookupError(node, dirlog.indexfile,
1376 1313 _('no node'))
1377 1314 m = treemanifestctx(self, dir, node)
1378 1315 else:
1379 1316 raise error.Abort(
1380 1317 _("cannot ask for manifest directory '%s' in a flat "
1381 1318 "manifest") % dir)
1382 1319 else:
1383 1320 if verify:
1384 1321 if node not in self._revlog.nodemap:
1385 1322 raise LookupError(node, self._revlog.indexfile,
1386 1323 _('no node'))
1387 1324 if self._treeinmem:
1388 1325 m = treemanifestctx(self, '', node)
1389 1326 else:
1390 1327 m = manifestctx(self, node)
1391 1328
1392 1329 if node != revlog.nullid:
1393 1330 mancache = self._dirmancache.get(dir)
1394 1331 if not mancache:
1395 1332 mancache = util.lrucachedict(self.cachesize)
1396 1333 self._dirmancache[dir] = mancache
1397 1334 mancache[node] = m
1398 1335 return m
1399 1336
1400 1337 def clearcaches(self):
1401 1338 self._dirmancache.clear()
1402 1339 self._revlog.clearcaches()
1403 1340
1404 1341 class memmanifestctx(object):
1405 1342 def __init__(self, manifestlog):
1406 1343 self._manifestlog = manifestlog
1407 1344 self._manifestdict = manifestdict()
1408 1345
1409 1346 def _revlog(self):
1410 1347 return self._manifestlog._revlog
1411 1348
1412 1349 def new(self):
1413 1350 return memmanifestctx(self._manifestlog)
1414 1351
1415 1352 def copy(self):
1416 1353 memmf = memmanifestctx(self._manifestlog)
1417 1354 memmf._manifestdict = self.read().copy()
1418 1355 return memmf
1419 1356
1420 1357 def read(self):
1421 1358 return self._manifestdict
1422 1359
1423 1360 def write(self, transaction, link, p1, p2, added, removed):
1424 1361 return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
1425 1362 added, removed)
1426 1363
1427 1364 class manifestctx(object):
1428 1365 """A class representing a single revision of a manifest, including its
1429 1366 contents, its parent revs, and its linkrev.
1430 1367 """
1431 1368 def __init__(self, manifestlog, node):
1432 1369 self._manifestlog = manifestlog
1433 1370 self._data = None
1434 1371
1435 1372 self._node = node
1436 1373
1437 1374 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1438 1375 # but let's add it later when something needs it and we can load it
1439 1376 # lazily.
1440 1377 #self.p1, self.p2 = revlog.parents(node)
1441 1378 #rev = revlog.rev(node)
1442 1379 #self.linkrev = revlog.linkrev(rev)
1443 1380
1444 1381 def _revlog(self):
1445 1382 return self._manifestlog._revlog
1446 1383
1447 1384 def node(self):
1448 1385 return self._node
1449 1386
1450 1387 def new(self):
1451 1388 return memmanifestctx(self._manifestlog)
1452 1389
1453 1390 def copy(self):
1454 1391 memmf = memmanifestctx(self._manifestlog)
1455 1392 memmf._manifestdict = self.read().copy()
1456 1393 return memmf
1457 1394
1458 1395 @propertycache
1459 1396 def parents(self):
1460 1397 return self._revlog().parents(self._node)
1461 1398
1462 1399 def read(self):
1463 1400 if self._data is None:
1464 1401 if self._node == revlog.nullid:
1465 1402 self._data = manifestdict()
1466 1403 else:
1467 1404 rl = self._revlog()
1468 1405 text = rl.revision(self._node)
1469 1406 arraytext = bytearray(text)
1470 1407 rl._fulltextcache[self._node] = arraytext
1471 1408 self._data = manifestdict(text)
1472 1409 return self._data
1473 1410
1474 1411 def readfast(self, shallow=False):
1475 1412 '''Calls either readdelta or read, based on which would be less work.
1476 1413 readdelta is called if the delta is against the p1, and therefore can be
1477 1414 read quickly.
1478 1415
1479 1416 If `shallow` is True, nothing changes since this is a flat manifest.
1480 1417 '''
1481 1418 rl = self._revlog()
1482 1419 r = rl.rev(self._node)
1483 1420 deltaparent = rl.deltaparent(r)
1484 1421 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1485 1422 return self.readdelta()
1486 1423 return self.read()
1487 1424
1488 1425 def readdelta(self, shallow=False):
1489 1426 '''Returns a manifest containing just the entries that are present
1490 1427 in this manifest, but not in its p1 manifest. This is efficient to read
1491 1428 if the revlog delta is already p1.
1492 1429
1493 1430 Changing the value of `shallow` has no effect on flat manifests.
1494 1431 '''
1495 1432 revlog = self._revlog()
1496 if revlog._usemanifestv2:
1497 # Need to perform a slow delta
1498 r0 = revlog.deltaparent(revlog.rev(self._node))
1499 m0 = self._manifestlog[revlog.node(r0)].read()
1500 m1 = self.read()
1501 md = manifestdict()
1502 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1503 if n1:
1504 md[f] = n1
1505 if fl1:
1506 md.setflag(f, fl1)
1507 return md
1508
1509 1433 r = revlog.rev(self._node)
1510 1434 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1511 1435 return manifestdict(d)
1512 1436
1513 1437 def find(self, key):
1514 1438 return self.read().find(key)
1515 1439
1516 1440 class memtreemanifestctx(object):
1517 1441 def __init__(self, manifestlog, dir=''):
1518 1442 self._manifestlog = manifestlog
1519 1443 self._dir = dir
1520 1444 self._treemanifest = treemanifest()
1521 1445
1522 1446 def _revlog(self):
1523 1447 return self._manifestlog._revlog
1524 1448
1525 1449 def new(self, dir=''):
1526 1450 return memtreemanifestctx(self._manifestlog, dir=dir)
1527 1451
1528 1452 def copy(self):
1529 1453 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1530 1454 memmf._treemanifest = self._treemanifest.copy()
1531 1455 return memmf
1532 1456
1533 1457 def read(self):
1534 1458 return self._treemanifest
1535 1459
1536 1460 def write(self, transaction, link, p1, p2, added, removed):
1537 1461 def readtree(dir, node):
1538 1462 return self._manifestlog.get(dir, node).read()
1539 1463 return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
1540 1464 added, removed, readtree=readtree)
1541 1465
1542 1466 class treemanifestctx(object):
1543 1467 def __init__(self, manifestlog, dir, node):
1544 1468 self._manifestlog = manifestlog
1545 1469 self._dir = dir
1546 1470 self._data = None
1547 1471
1548 1472 self._node = node
1549 1473
1550 1474 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1551 1475 # we can instantiate treemanifestctx objects for directories we don't
1552 1476 # have on disk.
1553 1477 #self.p1, self.p2 = revlog.parents(node)
1554 1478 #rev = revlog.rev(node)
1555 1479 #self.linkrev = revlog.linkrev(rev)
1556 1480
1557 1481 def _revlog(self):
1558 1482 return self._manifestlog._revlog.dirlog(self._dir)
1559 1483
1560 1484 def read(self):
1561 1485 if self._data is None:
1562 1486 rl = self._revlog()
1563 1487 if self._node == revlog.nullid:
1564 1488 self._data = treemanifest()
1565 1489 elif rl._treeondisk:
1566 1490 m = treemanifest(dir=self._dir)
1567 1491 def gettext():
1568 1492 return rl.revision(self._node)
1569 1493 def readsubtree(dir, subm):
1570 1494 # Set verify to False since we need to be able to create
1571 1495 # subtrees for trees that don't exist on disk.
1572 1496 return self._manifestlog.get(dir, subm, verify=False).read()
1573 1497 m.read(gettext, readsubtree)
1574 1498 m.setnode(self._node)
1575 1499 self._data = m
1576 1500 else:
1577 1501 text = rl.revision(self._node)
1578 1502 arraytext = bytearray(text)
1579 1503 rl.fulltextcache[self._node] = arraytext
1580 1504 self._data = treemanifest(dir=self._dir, text=text)
1581 1505
1582 1506 return self._data
1583 1507
1584 1508 def node(self):
1585 1509 return self._node
1586 1510
1587 1511 def new(self, dir=''):
1588 1512 return memtreemanifestctx(self._manifestlog, dir=dir)
1589 1513
1590 1514 def copy(self):
1591 1515 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1592 1516 memmf._treemanifest = self.read().copy()
1593 1517 return memmf
1594 1518
1595 1519 @propertycache
1596 1520 def parents(self):
1597 1521 return self._revlog().parents(self._node)
1598 1522
1599 1523 def readdelta(self, shallow=False):
1600 1524 '''Returns a manifest containing just the entries that are present
1601 1525 in this manifest, but not in its p1 manifest. This is efficient to read
1602 1526 if the revlog delta is already p1.
1603 1527
1604 1528 If `shallow` is True, this will read the delta for this directory,
1605 1529 without recursively reading subdirectory manifests. Instead, any
1606 1530 subdirectory entry will be reported as it appears in the manifest, i.e.
1607 1531 the subdirectory will be reported among files and distinguished only by
1608 1532 its 't' flag.
1609 1533 '''
1610 1534 revlog = self._revlog()
1611 if shallow and not revlog._usemanifestv2:
1535 if shallow:
1612 1536 r = revlog.rev(self._node)
1613 1537 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1614 1538 return manifestdict(d)
1615 1539 else:
1616 1540 # Need to perform a slow delta
1617 1541 r0 = revlog.deltaparent(revlog.rev(self._node))
1618 1542 m0 = self._manifestlog.get(self._dir, revlog.node(r0)).read()
1619 1543 m1 = self.read()
1620 1544 md = treemanifest(dir=self._dir)
1621 1545 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1622 1546 if n1:
1623 1547 md[f] = n1
1624 1548 if fl1:
1625 1549 md.setflag(f, fl1)
1626 1550 return md
1627 1551
1628 1552 def readfast(self, shallow=False):
1629 1553 '''Calls either readdelta or read, based on which would be less work.
1630 1554 readdelta is called if the delta is against the p1, and therefore can be
1631 1555 read quickly.
1632 1556
1633 1557 If `shallow` is True, it only returns the entries from this manifest,
1634 1558 and not any submanifests.
1635 1559 '''
1636 1560 rl = self._revlog()
1637 1561 r = rl.rev(self._node)
1638 1562 deltaparent = rl.deltaparent(r)
1639 1563 if (deltaparent != revlog.nullrev and
1640 1564 deltaparent in rl.parentrevs(r)):
1641 1565 return self.readdelta(shallow=shallow)
1642 1566
1643 1567 if shallow:
1644 1568 return manifestdict(rl.revision(self._node))
1645 1569 else:
1646 1570 return self.read()
1647 1571
1648 1572 def find(self, key):
1649 1573 return self.read().find(key)
@@ -1,867 +1,866 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11 import tempfile
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 hg,
19 19 localrepo,
20 20 manifest,
21 21 revlog,
22 22 scmutil,
23 23 util,
24 24 vfs as vfsmod,
25 25 )
26 26
27 27 def requiredsourcerequirements(repo):
28 28 """Obtain requirements required to be present to upgrade a repo.
29 29
30 30 An upgrade will not be allowed if the repository doesn't have the
31 31 requirements returned by this function.
32 32 """
33 33 return {
34 34 # Introduced in Mercurial 0.9.2.
35 35 'revlogv1',
36 36 # Introduced in Mercurial 0.9.2.
37 37 'store',
38 38 }
39 39
40 40 def blocksourcerequirements(repo):
41 41 """Obtain requirements that will prevent an upgrade from occurring.
42 42
43 43 An upgrade cannot be performed if the source repository contains a
44 44 requirements in the returned set.
45 45 """
46 46 return {
47 47 # The upgrade code does not yet support these experimental features.
48 48 # This is an artificial limitation.
49 'manifestv2',
50 49 'treemanifest',
51 50 # This was a precursor to generaldelta and was never enabled by default.
52 51 # It should (hopefully) not exist in the wild.
53 52 'parentdelta',
54 53 # Upgrade should operate on the actual store, not the shared link.
55 54 'shared',
56 55 }
57 56
58 57 def supportremovedrequirements(repo):
59 58 """Obtain requirements that can be removed during an upgrade.
60 59
61 60 If an upgrade were to create a repository that dropped a requirement,
62 61 the dropped requirement must appear in the returned set for the upgrade
63 62 to be allowed.
64 63 """
65 64 return set()
66 65
67 66 def supporteddestrequirements(repo):
68 67 """Obtain requirements that upgrade supports in the destination.
69 68
70 69 If the result of the upgrade would create requirements not in this set,
71 70 the upgrade is disallowed.
72 71
73 72 Extensions should monkeypatch this to add their custom requirements.
74 73 """
75 74 return {
76 75 'dotencode',
77 76 'fncache',
78 77 'generaldelta',
79 78 'revlogv1',
80 79 'store',
81 80 }
82 81
83 82 def allowednewrequirements(repo):
84 83 """Obtain requirements that can be added to a repository during upgrade.
85 84
86 85 This is used to disallow proposed requirements from being added when
87 86 they weren't present before.
88 87
89 88 We use a list of allowed requirement additions instead of a list of known
90 89 bad additions because the whitelist approach is safer and will prevent
91 90 future, unknown requirements from accidentally being added.
92 91 """
93 92 return {
94 93 'dotencode',
95 94 'fncache',
96 95 'generaldelta',
97 96 }
98 97
99 98 def preservedrequirements(repo):
100 99 return set()
101 100
102 101 deficiency = 'deficiency'
103 102 optimisation = 'optimization'
104 103
105 104 class improvement(object):
106 105 """Represents an improvement that can be made as part of an upgrade.
107 106
108 107 The following attributes are defined on each instance:
109 108
110 109 name
111 110 Machine-readable string uniquely identifying this improvement. It
112 111 will be mapped to an action later in the upgrade process.
113 112
114 113 type
115 114 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
116 115 problem. An optimization is an action (sometimes optional) that
117 116 can be taken to further improve the state of the repository.
118 117
119 118 description
120 119 Message intended for humans explaining the improvement in more detail,
121 120 including the implications of it. For ``deficiency`` types, should be
122 121 worded in the present tense. For ``optimisation`` types, should be
123 122 worded in the future tense.
124 123
125 124 upgrademessage
126 125 Message intended for humans explaining what an upgrade addressing this
127 126 issue will do. Should be worded in the future tense.
128 127 """
129 128 def __init__(self, name, type, description, upgrademessage):
130 129 self.name = name
131 130 self.type = type
132 131 self.description = description
133 132 self.upgrademessage = upgrademessage
134 133
135 134 def __eq__(self, other):
136 135 if not isinstance(other, improvement):
137 136 # This is what python tell use to do
138 137 return NotImplemented
139 138 return self.name == other.name
140 139
141 140 def __ne__(self, other):
142 141 return not self == other
143 142
144 143 def __hash__(self):
145 144 return hash(self.name)
146 145
147 146 allformatvariant = []
148 147
149 148 def registerformatvariant(cls):
150 149 allformatvariant.append(cls)
151 150 return cls
152 151
153 152 class formatvariant(improvement):
154 153 """an improvement subclass dedicated to repository format"""
155 154 type = deficiency
156 155 ### The following attributes should be defined for each class:
157 156
158 157 # machine-readable string uniquely identifying this improvement. it will be
159 158 # mapped to an action later in the upgrade process.
160 159 name = None
161 160
162 161 # message intended for humans explaining the improvement in more detail,
163 162 # including the implications of it ``deficiency`` types, should be worded
164 163 # in the present tense.
165 164 description = None
166 165
167 166 # message intended for humans explaining what an upgrade addressing this
168 167 # issue will do. should be worded in the future tense.
169 168 upgrademessage = None
170 169
171 170 # value of current Mercurial default for new repository
172 171 default = None
173 172
174 173 def __init__(self):
175 174 raise NotImplementedError()
176 175
177 176 @staticmethod
178 177 def fromrepo(repo):
179 178 """current value of the variant in the repository"""
180 179 raise NotImplementedError()
181 180
182 181 @staticmethod
183 182 def fromconfig(repo):
184 183 """current value of the variant in the configuration"""
185 184 raise NotImplementedError()
186 185
187 186 class requirementformatvariant(formatvariant):
188 187 """formatvariant based on a 'requirement' name.
189 188
190 189 Many format variant are controlled by a 'requirement'. We define a small
191 190 subclass to factor the code.
192 191 """
193 192
194 193 # the requirement that control this format variant
195 194 _requirement = None
196 195
197 196 @staticmethod
198 197 def _newreporequirements(repo):
199 198 return localrepo.newreporequirements(repo)
200 199
201 200 @classmethod
202 201 def fromrepo(cls, repo):
203 202 assert cls._requirement is not None
204 203 return cls._requirement in repo.requirements
205 204
206 205 @classmethod
207 206 def fromconfig(cls, repo):
208 207 assert cls._requirement is not None
209 208 return cls._requirement in cls._newreporequirements(repo)
210 209
211 210 @registerformatvariant
212 211 class fncache(requirementformatvariant):
213 212 name = 'fncache'
214 213
215 214 _requirement = 'fncache'
216 215
217 216 default = True
218 217
219 218 description = _('long and reserved filenames may not work correctly; '
220 219 'repository performance is sub-optimal')
221 220
222 221 upgrademessage = _('repository will be more resilient to storing '
223 222 'certain paths and performance of certain '
224 223 'operations should be improved')
225 224
226 225 @registerformatvariant
227 226 class dotencode(requirementformatvariant):
228 227 name = 'dotencode'
229 228
230 229 _requirement = 'dotencode'
231 230
232 231 default = True
233 232
234 233 description = _('storage of filenames beginning with a period or '
235 234 'space may not work correctly')
236 235
237 236 upgrademessage = _('repository will be better able to store files '
238 237 'beginning with a space or period')
239 238
240 239 @registerformatvariant
241 240 class generaldelta(requirementformatvariant):
242 241 name = 'generaldelta'
243 242
244 243 _requirement = 'generaldelta'
245 244
246 245 default = True
247 246
248 247 description = _('deltas within internal storage are unable to '
249 248 'choose optimal revisions; repository is larger and '
250 249 'slower than it could be; interaction with other '
251 250 'repositories may require extra network and CPU '
252 251 'resources, making "hg push" and "hg pull" slower')
253 252
254 253 upgrademessage = _('repository storage will be able to create '
255 254 'optimal deltas; new repository data will be '
256 255 'smaller and read times should decrease; '
257 256 'interacting with other repositories using this '
258 257 'storage model should require less network and '
259 258 'CPU resources, making "hg push" and "hg pull" '
260 259 'faster')
261 260
262 261 @registerformatvariant
263 262 class removecldeltachain(formatvariant):
264 263 name = 'plain-cl-delta'
265 264
266 265 default = True
267 266
268 267 description = _('changelog storage is using deltas instead of '
269 268 'raw entries; changelog reading and any '
270 269 'operation relying on changelog data are slower '
271 270 'than they could be')
272 271
273 272 upgrademessage = _('changelog storage will be reformated to '
274 273 'store raw entries; changelog reading will be '
275 274 'faster; changelog size may be reduced')
276 275
277 276 @staticmethod
278 277 def fromrepo(repo):
279 278 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
280 279 # changelogs with deltas.
281 280 cl = repo.changelog
282 281 chainbase = cl.chainbase
283 282 return all(rev == chainbase(rev) for rev in cl)
284 283
285 284 @staticmethod
286 285 def fromconfig(repo):
287 286 return True
288 287
289 288 @registerformatvariant
290 289 class compressionengine(formatvariant):
291 290 name = 'compression'
292 291 default = 'zlib'
293 292
294 293 description = _('Compresion algorithm used to compress data. '
295 294 'Some engine are faster than other')
296 295
297 296 upgrademessage = _('revlog content will be recompressed with the new '
298 297 'algorithm.')
299 298
300 299 @classmethod
301 300 def fromrepo(cls, repo):
302 301 for req in repo.requirements:
303 302 if req.startswith('exp-compression-'):
304 303 return req.split('-', 2)[2]
305 304 return 'zlib'
306 305
307 306 @classmethod
308 307 def fromconfig(cls, repo):
309 308 return repo.ui.config('experimental', 'format.compression')
310 309
311 310 def finddeficiencies(repo):
312 311 """returns a list of deficiencies that the repo suffer from"""
313 312 deficiencies = []
314 313
315 314 # We could detect lack of revlogv1 and store here, but they were added
316 315 # in 0.9.2 and we don't support upgrading repos without these
317 316 # requirements, so let's not bother.
318 317
319 318 for fv in allformatvariant:
320 319 if not fv.fromrepo(repo):
321 320 deficiencies.append(fv)
322 321
323 322 return deficiencies
324 323
325 324 def findoptimizations(repo):
326 325 """Determine optimisation that could be used during upgrade"""
327 326 # These are unconditionally added. There is logic later that figures out
328 327 # which ones to apply.
329 328 optimizations = []
330 329
331 330 optimizations.append(improvement(
332 331 name='redeltaparent',
333 332 type=optimisation,
334 333 description=_('deltas within internal storage will be recalculated to '
335 334 'choose an optimal base revision where this was not '
336 335 'already done; the size of the repository may shrink and '
337 336 'various operations may become faster; the first time '
338 337 'this optimization is performed could slow down upgrade '
339 338 'execution considerably; subsequent invocations should '
340 339 'not run noticeably slower'),
341 340 upgrademessage=_('deltas within internal storage will choose a new '
342 341 'base revision if needed')))
343 342
344 343 optimizations.append(improvement(
345 344 name='redeltamultibase',
346 345 type=optimisation,
347 346 description=_('deltas within internal storage will be recalculated '
348 347 'against multiple base revision and the smallest '
349 348 'difference will be used; the size of the repository may '
350 349 'shrink significantly when there are many merges; this '
351 350 'optimization will slow down execution in proportion to '
352 351 'the number of merges in the repository and the amount '
353 352 'of files in the repository; this slow down should not '
354 353 'be significant unless there are tens of thousands of '
355 354 'files and thousands of merges'),
356 355 upgrademessage=_('deltas within internal storage will choose an '
357 356 'optimal delta by computing deltas against multiple '
358 357 'parents; may slow down execution time '
359 358 'significantly')))
360 359
361 360 optimizations.append(improvement(
362 361 name='redeltaall',
363 362 type=optimisation,
364 363 description=_('deltas within internal storage will always be '
365 364 'recalculated without reusing prior deltas; this will '
366 365 'likely make execution run several times slower; this '
367 366 'optimization is typically not needed'),
368 367 upgrademessage=_('deltas within internal storage will be fully '
369 368 'recomputed; this will likely drastically slow down '
370 369 'execution time')))
371 370
372 371 optimizations.append(improvement(
373 372 name='redeltafulladd',
374 373 type=optimisation,
375 374 description=_('every revision will be re-added as if it was new '
376 375 'content. It will go through the full storage '
377 376 'mechanism giving extensions a chance to process it '
378 377 '(eg. lfs). This is similar to "redeltaall" but even '
379 378 'slower since more logic is involved.'),
380 379 upgrademessage=_('each revision will be added as new content to the '
381 380 'internal storage; this will likely drastically slow '
382 381 'down execution time, but some extensions might need '
383 382 'it')))
384 383
385 384 return optimizations
386 385
387 386 def determineactions(repo, deficiencies, sourcereqs, destreqs):
388 387 """Determine upgrade actions that will be performed.
389 388
390 389 Given a list of improvements as returned by ``finddeficiencies`` and
391 390 ``findoptimizations``, determine the list of upgrade actions that
392 391 will be performed.
393 392
394 393 The role of this function is to filter improvements if needed, apply
395 394 recommended optimizations from the improvements list that make sense,
396 395 etc.
397 396
398 397 Returns a list of action names.
399 398 """
400 399 newactions = []
401 400
402 401 knownreqs = supporteddestrequirements(repo)
403 402
404 403 for d in deficiencies:
405 404 name = d.name
406 405
407 406 # If the action is a requirement that doesn't show up in the
408 407 # destination requirements, prune the action.
409 408 if name in knownreqs and name not in destreqs:
410 409 continue
411 410
412 411 newactions.append(d)
413 412
414 413 # FUTURE consider adding some optimizations here for certain transitions.
415 414 # e.g. adding generaldelta could schedule parent redeltas.
416 415
417 416 return newactions
418 417
419 418 def _revlogfrompath(repo, path):
420 419 """Obtain a revlog from a repo path.
421 420
422 421 An instance of the appropriate class is returned.
423 422 """
424 423 if path == '00changelog.i':
425 424 return changelog.changelog(repo.svfs)
426 425 elif path.endswith('00manifest.i'):
427 426 mandir = path[:-len('00manifest.i')]
428 427 return manifest.manifestrevlog(repo.svfs, dir=mandir)
429 428 else:
430 429 #reverse of "/".join(("data", path + ".i"))
431 430 return filelog.filelog(repo.svfs, path[5:-2])
432 431
433 432 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
434 433 """Copy revlogs between 2 repos."""
435 434 revcount = 0
436 435 srcsize = 0
437 436 srcrawsize = 0
438 437 dstsize = 0
439 438 fcount = 0
440 439 frevcount = 0
441 440 fsrcsize = 0
442 441 frawsize = 0
443 442 fdstsize = 0
444 443 mcount = 0
445 444 mrevcount = 0
446 445 msrcsize = 0
447 446 mrawsize = 0
448 447 mdstsize = 0
449 448 crevcount = 0
450 449 csrcsize = 0
451 450 crawsize = 0
452 451 cdstsize = 0
453 452
454 453 # Perform a pass to collect metadata. This validates we can open all
455 454 # source files and allows a unified progress bar to be displayed.
456 455 for unencoded, encoded, size in srcrepo.store.walk():
457 456 if unencoded.endswith('.d'):
458 457 continue
459 458
460 459 rl = _revlogfrompath(srcrepo, unencoded)
461 460 revcount += len(rl)
462 461
463 462 datasize = 0
464 463 rawsize = 0
465 464 idx = rl.index
466 465 for rev in rl:
467 466 e = idx[rev]
468 467 datasize += e[1]
469 468 rawsize += e[2]
470 469
471 470 srcsize += datasize
472 471 srcrawsize += rawsize
473 472
474 473 # This is for the separate progress bars.
475 474 if isinstance(rl, changelog.changelog):
476 475 crevcount += len(rl)
477 476 csrcsize += datasize
478 477 crawsize += rawsize
479 478 elif isinstance(rl, manifest.manifestrevlog):
480 479 mcount += 1
481 480 mrevcount += len(rl)
482 481 msrcsize += datasize
483 482 mrawsize += rawsize
484 483 elif isinstance(rl, revlog.revlog):
485 484 fcount += 1
486 485 frevcount += len(rl)
487 486 fsrcsize += datasize
488 487 frawsize += rawsize
489 488
490 489 if not revcount:
491 490 return
492 491
493 492 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
494 493 '%d in changelog)\n') %
495 494 (revcount, frevcount, mrevcount, crevcount))
496 495 ui.write(_('migrating %s in store; %s tracked data\n') % (
497 496 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
498 497
499 498 # Used to keep track of progress.
500 499 progress = []
501 500 def oncopiedrevision(rl, rev, node):
502 501 progress[1] += 1
503 502 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
504 503
505 504 # Do the actual copying.
506 505 # FUTURE this operation can be farmed off to worker processes.
507 506 seen = set()
508 507 for unencoded, encoded, size in srcrepo.store.walk():
509 508 if unencoded.endswith('.d'):
510 509 continue
511 510
512 511 oldrl = _revlogfrompath(srcrepo, unencoded)
513 512 newrl = _revlogfrompath(dstrepo, unencoded)
514 513
515 514 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
516 515 ui.write(_('finished migrating %d manifest revisions across %d '
517 516 'manifests; change in size: %s\n') %
518 517 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
519 518
520 519 ui.write(_('migrating changelog containing %d revisions '
521 520 '(%s in store; %s tracked data)\n') %
522 521 (crevcount, util.bytecount(csrcsize),
523 522 util.bytecount(crawsize)))
524 523 seen.add('c')
525 524 progress[:] = [_('changelog revisions'), 0, crevcount]
526 525 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
527 526 ui.write(_('finished migrating %d filelog revisions across %d '
528 527 'filelogs; change in size: %s\n') %
529 528 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
530 529
531 530 ui.write(_('migrating %d manifests containing %d revisions '
532 531 '(%s in store; %s tracked data)\n') %
533 532 (mcount, mrevcount, util.bytecount(msrcsize),
534 533 util.bytecount(mrawsize)))
535 534 seen.add('m')
536 535 progress[:] = [_('manifest revisions'), 0, mrevcount]
537 536 elif 'f' not in seen:
538 537 ui.write(_('migrating %d filelogs containing %d revisions '
539 538 '(%s in store; %s tracked data)\n') %
540 539 (fcount, frevcount, util.bytecount(fsrcsize),
541 540 util.bytecount(frawsize)))
542 541 seen.add('f')
543 542 progress[:] = [_('file revisions'), 0, frevcount]
544 543
545 544 ui.progress(progress[0], progress[1], total=progress[2])
546 545
547 546 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
548 547 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
549 548 deltareuse=deltareuse,
550 549 aggressivemergedeltas=aggressivemergedeltas)
551 550
552 551 datasize = 0
553 552 idx = newrl.index
554 553 for rev in newrl:
555 554 datasize += idx[rev][1]
556 555
557 556 dstsize += datasize
558 557
559 558 if isinstance(newrl, changelog.changelog):
560 559 cdstsize += datasize
561 560 elif isinstance(newrl, manifest.manifestrevlog):
562 561 mdstsize += datasize
563 562 else:
564 563 fdstsize += datasize
565 564
566 565 ui.progress(progress[0], None)
567 566
568 567 ui.write(_('finished migrating %d changelog revisions; change in size: '
569 568 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
570 569
571 570 ui.write(_('finished migrating %d total revisions; total change in store '
572 571 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
573 572
574 573 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
575 574 """Determine whether to copy a store file during upgrade.
576 575
577 576 This function is called when migrating store files from ``srcrepo`` to
578 577 ``dstrepo`` as part of upgrading a repository.
579 578
580 579 Args:
581 580 srcrepo: repo we are copying from
582 581 dstrepo: repo we are copying to
583 582 requirements: set of requirements for ``dstrepo``
584 583 path: store file being examined
585 584 mode: the ``ST_MODE`` file type of ``path``
586 585 st: ``stat`` data structure for ``path``
587 586
588 587 Function should return ``True`` if the file is to be copied.
589 588 """
590 589 # Skip revlogs.
591 590 if path.endswith(('.i', '.d')):
592 591 return False
593 592 # Skip transaction related files.
594 593 if path.startswith('undo'):
595 594 return False
596 595 # Only copy regular files.
597 596 if mode != stat.S_IFREG:
598 597 return False
599 598 # Skip other skipped files.
600 599 if path in ('lock', 'fncache'):
601 600 return False
602 601
603 602 return True
604 603
605 604 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
606 605 """Hook point for extensions to perform additional actions during upgrade.
607 606
608 607 This function is called after revlogs and store files have been copied but
609 608 before the new store is swapped into the original location.
610 609 """
611 610
612 611 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
613 612 """Do the low-level work of upgrading a repository.
614 613
615 614 The upgrade is effectively performed as a copy between a source
616 615 repository and a temporary destination repository.
617 616
618 617 The source repository is unmodified for as long as possible so the
619 618 upgrade can abort at any time without causing loss of service for
620 619 readers and without corrupting the source repository.
621 620 """
622 621 assert srcrepo.currentwlock()
623 622 assert dstrepo.currentwlock()
624 623
625 624 ui.write(_('(it is safe to interrupt this process any time before '
626 625 'data migration completes)\n'))
627 626
628 627 if 'redeltaall' in actions:
629 628 deltareuse = revlog.revlog.DELTAREUSENEVER
630 629 elif 'redeltaparent' in actions:
631 630 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
632 631 elif 'redeltamultibase' in actions:
633 632 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
634 633 elif 'redeltafulladd' in actions:
635 634 deltareuse = revlog.revlog.DELTAREUSEFULLADD
636 635 else:
637 636 deltareuse = revlog.revlog.DELTAREUSEALWAYS
638 637
639 638 with dstrepo.transaction('upgrade') as tr:
640 639 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
641 640 'redeltamultibase' in actions)
642 641
643 642 # Now copy other files in the store directory.
644 643 # The sorted() makes execution deterministic.
645 644 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
646 645 if not _filterstorefile(srcrepo, dstrepo, requirements,
647 646 p, kind, st):
648 647 continue
649 648
650 649 srcrepo.ui.write(_('copying %s\n') % p)
651 650 src = srcrepo.store.rawvfs.join(p)
652 651 dst = dstrepo.store.rawvfs.join(p)
653 652 util.copyfile(src, dst, copystat=True)
654 653
655 654 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
656 655
657 656 ui.write(_('data fully migrated to temporary repository\n'))
658 657
659 658 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
660 659 backupvfs = vfsmod.vfs(backuppath)
661 660
662 661 # Make a backup of requires file first, as it is the first to be modified.
663 662 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
664 663
665 664 # We install an arbitrary requirement that clients must not support
666 665 # as a mechanism to lock out new clients during the data swap. This is
667 666 # better than allowing a client to continue while the repository is in
668 667 # an inconsistent state.
669 668 ui.write(_('marking source repository as being upgraded; clients will be '
670 669 'unable to read from repository\n'))
671 670 scmutil.writerequires(srcrepo.vfs,
672 671 srcrepo.requirements | {'upgradeinprogress'})
673 672
674 673 ui.write(_('starting in-place swap of repository data\n'))
675 674 ui.write(_('replaced files will be backed up at %s\n') %
676 675 backuppath)
677 676
678 677 # Now swap in the new store directory. Doing it as a rename should make
679 678 # the operation nearly instantaneous and atomic (at least in well-behaved
680 679 # environments).
681 680 ui.write(_('replacing store...\n'))
682 681 tstart = util.timer()
683 682 util.rename(srcrepo.spath, backupvfs.join('store'))
684 683 util.rename(dstrepo.spath, srcrepo.spath)
685 684 elapsed = util.timer() - tstart
686 685 ui.write(_('store replacement complete; repository was inconsistent for '
687 686 '%0.1fs\n') % elapsed)
688 687
689 688 # We first write the requirements file. Any new requirements will lock
690 689 # out legacy clients.
691 690 ui.write(_('finalizing requirements file and making repository readable '
692 691 'again\n'))
693 692 scmutil.writerequires(srcrepo.vfs, requirements)
694 693
695 694 # The lock file from the old store won't be removed because nothing has a
696 695 # reference to its new location. So clean it up manually. Alternatively, we
697 696 # could update srcrepo.svfs and other variables to point to the new
698 697 # location. This is simpler.
699 698 backupvfs.unlink('store/lock')
700 699
701 700 return backuppath
702 701
703 702 def upgraderepo(ui, repo, run=False, optimize=None):
704 703 """Upgrade a repository in place."""
705 704 optimize = set(optimize or [])
706 705 repo = repo.unfiltered()
707 706
708 707 # Ensure the repository can be upgraded.
709 708 missingreqs = requiredsourcerequirements(repo) - repo.requirements
710 709 if missingreqs:
711 710 raise error.Abort(_('cannot upgrade repository; requirement '
712 711 'missing: %s') % _(', ').join(sorted(missingreqs)))
713 712
714 713 blockedreqs = blocksourcerequirements(repo) & repo.requirements
715 714 if blockedreqs:
716 715 raise error.Abort(_('cannot upgrade repository; unsupported source '
717 716 'requirement: %s') %
718 717 _(', ').join(sorted(blockedreqs)))
719 718
720 719 # FUTURE there is potentially a need to control the wanted requirements via
721 720 # command arguments or via an extension hook point.
722 721 newreqs = localrepo.newreporequirements(repo)
723 722 newreqs.update(preservedrequirements(repo))
724 723
725 724 noremovereqs = (repo.requirements - newreqs -
726 725 supportremovedrequirements(repo))
727 726 if noremovereqs:
728 727 raise error.Abort(_('cannot upgrade repository; requirement would be '
729 728 'removed: %s') % _(', ').join(sorted(noremovereqs)))
730 729
731 730 noaddreqs = (newreqs - repo.requirements -
732 731 allowednewrequirements(repo))
733 732 if noaddreqs:
734 733 raise error.Abort(_('cannot upgrade repository; do not support adding '
735 734 'requirement: %s') %
736 735 _(', ').join(sorted(noaddreqs)))
737 736
738 737 unsupportedreqs = newreqs - supporteddestrequirements(repo)
739 738 if unsupportedreqs:
740 739 raise error.Abort(_('cannot upgrade repository; do not support '
741 740 'destination requirement: %s') %
742 741 _(', ').join(sorted(unsupportedreqs)))
743 742
744 743 # Find and validate all improvements that can be made.
745 744 alloptimizations = findoptimizations(repo)
746 745
747 746 # Apply and Validate arguments.
748 747 optimizations = []
749 748 for o in alloptimizations:
750 749 if o.name in optimize:
751 750 optimizations.append(o)
752 751 optimize.discard(o.name)
753 752
754 753 if optimize: # anything left is unknown
755 754 raise error.Abort(_('unknown optimization action requested: %s') %
756 755 ', '.join(sorted(optimize)),
757 756 hint=_('run without arguments to see valid '
758 757 'optimizations'))
759 758
760 759 deficiencies = finddeficiencies(repo)
761 760 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
762 761 actions.extend(o for o in sorted(optimizations)
763 762 # determineactions could have added optimisation
764 763 if o not in actions)
765 764
766 765 def printrequirements():
767 766 ui.write(_('requirements\n'))
768 767 ui.write(_(' preserved: %s\n') %
769 768 _(', ').join(sorted(newreqs & repo.requirements)))
770 769
771 770 if repo.requirements - newreqs:
772 771 ui.write(_(' removed: %s\n') %
773 772 _(', ').join(sorted(repo.requirements - newreqs)))
774 773
775 774 if newreqs - repo.requirements:
776 775 ui.write(_(' added: %s\n') %
777 776 _(', ').join(sorted(newreqs - repo.requirements)))
778 777
779 778 ui.write('\n')
780 779
781 780 def printupgradeactions():
782 781 for a in actions:
783 782 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
784 783
785 784 if not run:
786 785 fromconfig = []
787 786 onlydefault = []
788 787
789 788 for d in deficiencies:
790 789 if d.fromconfig(repo):
791 790 fromconfig.append(d)
792 791 elif d.default:
793 792 onlydefault.append(d)
794 793
795 794 if fromconfig or onlydefault:
796 795
797 796 if fromconfig:
798 797 ui.write(_('repository lacks features recommended by '
799 798 'current config options:\n\n'))
800 799 for i in fromconfig:
801 800 ui.write('%s\n %s\n\n' % (i.name, i.description))
802 801
803 802 if onlydefault:
804 803 ui.write(_('repository lacks features used by the default '
805 804 'config options:\n\n'))
806 805 for i in onlydefault:
807 806 ui.write('%s\n %s\n\n' % (i.name, i.description))
808 807
809 808 ui.write('\n')
810 809 else:
811 810 ui.write(_('(no feature deficiencies found in existing '
812 811 'repository)\n'))
813 812
814 813 ui.write(_('performing an upgrade with "--run" will make the following '
815 814 'changes:\n\n'))
816 815
817 816 printrequirements()
818 817 printupgradeactions()
819 818
820 819 unusedoptimize = [i for i in alloptimizations if i not in actions]
821 820
822 821 if unusedoptimize:
823 822 ui.write(_('additional optimizations are available by specifying '
824 823 '"--optimize <name>":\n\n'))
825 824 for i in unusedoptimize:
826 825 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
827 826 return
828 827
829 828 # Else we're in the run=true case.
830 829 ui.write(_('upgrade will perform the following actions:\n\n'))
831 830 printrequirements()
832 831 printupgradeactions()
833 832
834 833 upgradeactions = [a.name for a in actions]
835 834
836 835 ui.write(_('beginning upgrade...\n'))
837 836 with repo.wlock(), repo.lock():
838 837 ui.write(_('repository locked and read-only\n'))
839 838 # Our strategy for upgrading the repository is to create a new,
840 839 # temporary repository, write data to it, then do a swap of the
841 840 # data. There are less heavyweight ways to do this, but it is easier
842 841 # to create a new repo object than to instantiate all the components
843 842 # (like the store) separately.
844 843 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
845 844 backuppath = None
846 845 try:
847 846 ui.write(_('creating temporary repository to stage migrated '
848 847 'data: %s\n') % tmppath)
849 848
850 849 # clone ui without using ui.copy because repo.ui is protected
851 850 repoui = repo.ui.__class__(repo.ui)
852 851 dstrepo = hg.repository(repoui, path=tmppath, create=True)
853 852
854 853 with dstrepo.wlock(), dstrepo.lock():
855 854 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
856 855 upgradeactions)
857 856
858 857 finally:
859 858 ui.write(_('removing temporary repository %s\n') % tmppath)
860 859 repo.vfs.rmtree(tmppath, forcibly=True)
861 860
862 861 if backuppath:
863 862 ui.warn(_('copy of old repository backed up at %s\n') %
864 863 backuppath)
865 864 ui.warn(_('the old repository will not be deleted; remove '
866 865 'it to free up disk space once the upgraded '
867 866 'repository is verified\n'))
@@ -1,489 +1,422 b''
1 1 from __future__ import absolute_import
2 2
3 3 import binascii
4 4 import itertools
5 5 import silenttestrunner
6 6 import unittest
7 7
8 8 from mercurial import (
9 9 manifest as manifestmod,
10 10 match as matchmod,
11 11 )
12 12
13 13 EMTPY_MANIFEST = b''
14 EMTPY_MANIFEST_V2 = b'\0\n'
15 14
16 15 HASH_1 = b'1' * 40
17 16 BIN_HASH_1 = binascii.unhexlify(HASH_1)
18 17 HASH_2 = b'f' * 40
19 18 BIN_HASH_2 = binascii.unhexlify(HASH_2)
20 19 HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe'
21 20 BIN_HASH_3 = binascii.unhexlify(HASH_3)
22 21 A_SHORT_MANIFEST = (
23 22 b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n'
24 23 b'foo\0%(hash1)s%(flag1)s\n'
25 24 ) % {b'hash1': HASH_1,
26 25 b'flag1': b'',
27 26 b'hash2': HASH_2,
28 27 b'flag2': b'l',
29 28 }
30 29
31 # Same data as A_SHORT_MANIFEST
32 A_SHORT_MANIFEST_V2 = (
33 b'\0\n'
34 b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n'
35 b'\x00foo\0%(flag1)s\n%(hash1)s\n'
36 ) % {b'hash1': BIN_HASH_1,
37 b'flag1': b'',
38 b'hash2': BIN_HASH_2,
39 b'flag2': b'l',
40 }
41
42 # Same data as A_SHORT_MANIFEST
43 A_METADATA_MANIFEST = (
44 b'\0foo\0bar\n'
45 b'\x00bar/baz/qux.py\0%(flag2)s\0foo\0bar\n%(hash2)s\n' # flag and metadata
46 b'\x00foo\0%(flag1)s\0foo\n%(hash1)s\n' # no flag, but metadata
47 ) % {b'hash1': BIN_HASH_1,
48 b'flag1': b'',
49 b'hash2': BIN_HASH_2,
50 b'flag2': b'l',
51 }
52
53 A_STEM_COMPRESSED_MANIFEST = (
54 b'\0\n'
55 b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n'
56 b'\x04qux/foo.py\0%(flag1)s\n%(hash1)s\n' # simple case of 4 stem chars
57 b'\x0az.py\0%(flag1)s\n%(hash1)s\n' # tricky newline = 10 stem characters
58 b'\x00%(verylongdir)sx/x\0\n%(hash1)s\n'
59 b'\xffx/y\0\n%(hash2)s\n' # more than 255 stem chars
60 ) % {b'hash1': BIN_HASH_1,
61 b'flag1': b'',
62 b'hash2': BIN_HASH_2,
63 b'flag2': b'l',
64 b'verylongdir': 255 * b'x',
65 }
66
67 30 A_DEEPER_MANIFEST = (
68 31 b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
69 32 b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n'
70 33 b'a/b/c/foo.py\0%(hash3)s%(flag1)s\n'
71 34 b'a/b/c/foo.txt\0%(hash2)s%(flag2)s\n'
72 35 b'a/b/d/baz.py\0%(hash3)s%(flag1)s\n'
73 36 b'a/b/d/qux.py\0%(hash1)s%(flag2)s\n'
74 37 b'a/b/d/ten.txt\0%(hash3)s%(flag2)s\n'
75 38 b'a/b/dog.py\0%(hash3)s%(flag1)s\n'
76 39 b'a/b/fish.py\0%(hash2)s%(flag1)s\n'
77 40 b'a/c/london.py\0%(hash3)s%(flag2)s\n'
78 41 b'a/c/paper.txt\0%(hash2)s%(flag2)s\n'
79 42 b'a/c/paris.py\0%(hash2)s%(flag1)s\n'
80 43 b'a/d/apple.py\0%(hash3)s%(flag1)s\n'
81 44 b'a/d/pizza.py\0%(hash3)s%(flag2)s\n'
82 45 b'a/green.py\0%(hash1)s%(flag2)s\n'
83 46 b'a/purple.py\0%(hash2)s%(flag1)s\n'
84 47 b'app.py\0%(hash3)s%(flag1)s\n'
85 48 b'readme.txt\0%(hash2)s%(flag1)s\n'
86 49 ) % {b'hash1': HASH_1,
87 50 b'flag1': b'',
88 51 b'hash2': HASH_2,
89 52 b'flag2': b'l',
90 53 b'hash3': HASH_3,
91 54 }
92 55
93 56 HUGE_MANIFEST_ENTRIES = 200001
94 57
95 58 izip = getattr(itertools, 'izip', zip)
96 59 if 'xrange' not in globals():
97 60 xrange = range
98 61
99 62 A_HUGE_MANIFEST = b''.join(sorted(
100 63 b'file%d\0%s%s\n' % (i, h, f) for i, h, f in
101 64 izip(xrange(200001),
102 65 itertools.cycle((HASH_1, HASH_2)),
103 66 itertools.cycle((b'', b'x', b'l')))))
104 67
105 68 class basemanifesttests(object):
106 69 def parsemanifest(self, text):
107 70 raise NotImplementedError('parsemanifest not implemented by test case')
108 71
109 72 def testEmptyManifest(self):
110 73 m = self.parsemanifest(EMTPY_MANIFEST)
111 74 self.assertEqual(0, len(m))
112 75 self.assertEqual([], list(m))
113 76
114 def testEmptyManifestv2(self):
115 m = self.parsemanifest(EMTPY_MANIFEST_V2)
116 self.assertEqual(0, len(m))
117 self.assertEqual([], list(m))
118
119 77 def testManifest(self):
120 78 m = self.parsemanifest(A_SHORT_MANIFEST)
121 79 self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
122 80 self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
123 81 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
124 82 self.assertEqual(BIN_HASH_1, m[b'foo'])
125 83 self.assertEqual(b'', m.flags(b'foo'))
126 84 with self.assertRaises(KeyError):
127 85 m[b'wat']
128 86
129 def testParseManifestV2(self):
130 m1 = self.parsemanifest(A_SHORT_MANIFEST)
131 m2 = self.parsemanifest(A_SHORT_MANIFEST_V2)
132 # Should have same content as A_SHORT_MANIFEST
133 self.assertEqual(m1.text(), m2.text())
134
135 def testParseManifestMetadata(self):
136 # Metadata is for future-proofing and should be accepted but ignored
137 m = self.parsemanifest(A_METADATA_MANIFEST)
138 self.assertEqual(A_SHORT_MANIFEST, m.text())
139
140 def testParseManifestStemCompression(self):
141 m = self.parsemanifest(A_STEM_COMPRESSED_MANIFEST)
142 self.assertIn(b'bar/baz/qux.py', m)
143 self.assertIn(b'bar/qux/foo.py', m)
144 self.assertIn(b'bar/qux/foz.py', m)
145 self.assertIn(256 * b'x' + b'/x', m)
146 self.assertIn(256 * b'x' + b'/y', m)
147 self.assertEqual(A_STEM_COMPRESSED_MANIFEST, m.text(usemanifestv2=True))
148
149 def testTextV2(self):
150 m1 = self.parsemanifest(A_SHORT_MANIFEST)
151 v2text = m1.text(usemanifestv2=True)
152 self.assertEqual(A_SHORT_MANIFEST_V2, v2text)
153
154 87 def testSetItem(self):
155 88 want = BIN_HASH_1
156 89
157 90 m = self.parsemanifest(EMTPY_MANIFEST)
158 91 m[b'a'] = want
159 92 self.assertIn(b'a', m)
160 93 self.assertEqual(want, m[b'a'])
161 94 self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
162 95
163 96 m = self.parsemanifest(A_SHORT_MANIFEST)
164 97 m[b'a'] = want
165 98 self.assertEqual(want, m[b'a'])
166 99 self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST,
167 100 m.text())
168 101
169 102 def testSetFlag(self):
170 103 want = b'x'
171 104
172 105 m = self.parsemanifest(EMTPY_MANIFEST)
173 106 # first add a file; a file-less flag makes no sense
174 107 m[b'a'] = BIN_HASH_1
175 108 m.setflag(b'a', want)
176 109 self.assertEqual(want, m.flags(b'a'))
177 110 self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
178 111
179 112 m = self.parsemanifest(A_SHORT_MANIFEST)
180 113 # first add a file; a file-less flag makes no sense
181 114 m[b'a'] = BIN_HASH_1
182 115 m.setflag(b'a', want)
183 116 self.assertEqual(want, m.flags(b'a'))
184 117 self.assertEqual(b'a\0' + HASH_1 + want + b'\n' + A_SHORT_MANIFEST,
185 118 m.text())
186 119
187 120 def testCopy(self):
188 121 m = self.parsemanifest(A_SHORT_MANIFEST)
189 122 m[b'a'] = BIN_HASH_1
190 123 m2 = m.copy()
191 124 del m
192 125 del m2 # make sure we don't double free() anything
193 126
194 127 def testCompaction(self):
195 128 unhex = binascii.unhexlify
196 129 h1, h2 = unhex(HASH_1), unhex(HASH_2)
197 130 m = self.parsemanifest(A_SHORT_MANIFEST)
198 131 m[b'alpha'] = h1
199 132 m[b'beta'] = h2
200 133 del m[b'foo']
201 134 want = b'alpha\0%s\nbar/baz/qux.py\0%sl\nbeta\0%s\n' % (
202 135 HASH_1, HASH_2, HASH_2)
203 136 self.assertEqual(want, m.text())
204 137 self.assertEqual(3, len(m))
205 138 self.assertEqual([b'alpha', b'bar/baz/qux.py', b'beta'], list(m))
206 139 self.assertEqual(h1, m[b'alpha'])
207 140 self.assertEqual(h2, m[b'bar/baz/qux.py'])
208 141 self.assertEqual(h2, m[b'beta'])
209 142 self.assertEqual(b'', m.flags(b'alpha'))
210 143 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
211 144 self.assertEqual(b'', m.flags(b'beta'))
212 145 with self.assertRaises(KeyError):
213 146 m[b'foo']
214 147
215 148 def testSetGetNodeSuffix(self):
216 149 clean = self.parsemanifest(A_SHORT_MANIFEST)
217 150 m = self.parsemanifest(A_SHORT_MANIFEST)
218 151 h = m[b'foo']
219 152 f = m.flags(b'foo')
220 153 want = h + b'a'
221 154 # Merge code wants to set 21-byte fake hashes at times
222 155 m[b'foo'] = want
223 156 self.assertEqual(want, m[b'foo'])
224 157 self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
225 158 (b'foo', BIN_HASH_1 + b'a')],
226 159 list(m.items()))
227 160 # Sometimes it even tries a 22-byte fake hash, but we can
228 161 # return 21 and it'll work out
229 162 m[b'foo'] = want + b'+'
230 163 self.assertEqual(want, m[b'foo'])
231 164 # make sure the suffix survives a copy
232 165 match = matchmod.match(b'', b'', [b're:foo'])
233 166 m2 = m.matches(match)
234 167 self.assertEqual(want, m2[b'foo'])
235 168 self.assertEqual(1, len(m2))
236 169 m2 = m.copy()
237 170 self.assertEqual(want, m2[b'foo'])
238 171 # suffix with iteration
239 172 self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
240 173 (b'foo', want)],
241 174 list(m.items()))
242 175
243 176 # shows up in diff
244 177 self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
245 178 self.assertEqual({b'foo': ((h, b''), (want, f))}, clean.diff(m))
246 179
247 180 def testMatchException(self):
248 181 m = self.parsemanifest(A_SHORT_MANIFEST)
249 182 match = matchmod.match(b'', b'', [b're:.*'])
250 183 def filt(path):
251 184 if path == b'foo':
252 185 assert False
253 186 return True
254 187 match.matchfn = filt
255 188 with self.assertRaises(AssertionError):
256 189 m.matches(match)
257 190
258 191 def testRemoveItem(self):
259 192 m = self.parsemanifest(A_SHORT_MANIFEST)
260 193 del m[b'foo']
261 194 with self.assertRaises(KeyError):
262 195 m[b'foo']
263 196 self.assertEqual(1, len(m))
264 197 self.assertEqual(1, len(list(m)))
265 198 # now restore and make sure everything works right
266 199 m[b'foo'] = b'a' * 20
267 200 self.assertEqual(2, len(m))
268 201 self.assertEqual(2, len(list(m)))
269 202
270 203 def testManifestDiff(self):
271 204 MISSING = (None, b'')
272 205 addl = b'z-only-in-left\0' + HASH_1 + b'\n'
273 206 addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
274 207 left = self.parsemanifest(
275 208 A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl)
276 209 right = self.parsemanifest(A_SHORT_MANIFEST + addr)
277 210 want = {
278 211 b'foo': ((BIN_HASH_3, b'x'),
279 212 (BIN_HASH_1, b'')),
280 213 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
281 214 b'z-only-in-right': (MISSING, (BIN_HASH_2, b'x')),
282 215 }
283 216 self.assertEqual(want, left.diff(right))
284 217
285 218 want = {
286 219 b'bar/baz/qux.py': (MISSING, (BIN_HASH_2, b'l')),
287 220 b'foo': (MISSING, (BIN_HASH_3, b'x')),
288 221 b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
289 222 }
290 223 self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
291 224
292 225 want = {
293 226 b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
294 227 b'foo': ((BIN_HASH_3, b'x'), MISSING),
295 228 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
296 229 }
297 230 self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
298 231 copy = right.copy()
299 232 del copy[b'z-only-in-right']
300 233 del right[b'foo']
301 234 want = {
302 235 b'foo': (MISSING, (BIN_HASH_1, b'')),
303 236 b'z-only-in-right': ((BIN_HASH_2, b'x'), MISSING),
304 237 }
305 238 self.assertEqual(want, right.diff(copy))
306 239
307 240 short = self.parsemanifest(A_SHORT_MANIFEST)
308 241 pruned = short.copy()
309 242 del pruned[b'foo']
310 243 want = {
311 244 b'foo': ((BIN_HASH_1, b''), MISSING),
312 245 }
313 246 self.assertEqual(want, short.diff(pruned))
314 247 want = {
315 248 b'foo': (MISSING, (BIN_HASH_1, b'')),
316 249 }
317 250 self.assertEqual(want, pruned.diff(short))
318 251 want = {
319 252 b'bar/baz/qux.py': None,
320 253 b'foo': (MISSING, (BIN_HASH_1, b'')),
321 254 }
322 255 self.assertEqual(want, pruned.diff(short, clean=True))
323 256
324 257 def testReversedLines(self):
325 258 backwards = b''.join(
326 259 l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l)
327 260 try:
328 261 self.parsemanifest(backwards)
329 262 self.fail('Should have raised ValueError')
330 263 except ValueError as v:
331 264 self.assertIn('Manifest lines not in sorted order.', str(v))
332 265
333 266 def testNoTerminalNewline(self):
334 267 try:
335 268 self.parsemanifest(A_SHORT_MANIFEST + b'wat')
336 269 self.fail('Should have raised ValueError')
337 270 except ValueError as v:
338 271 self.assertIn('Manifest did not end in a newline.', str(v))
339 272
340 273 def testNoNewLineAtAll(self):
341 274 try:
342 275 self.parsemanifest(b'wat')
343 276 self.fail('Should have raised ValueError')
344 277 except ValueError as v:
345 278 self.assertIn('Manifest did not end in a newline.', str(v))
346 279
347 280 def testHugeManifest(self):
348 281 m = self.parsemanifest(A_HUGE_MANIFEST)
349 282 self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
350 283 self.assertEqual(len(m), len(list(m)))
351 284
352 285 def testMatchesMetadata(self):
353 286 '''Tests matches() for a few specific files to make sure that both
354 287 the set of files as well as their flags and nodeids are correct in
355 288 the resulting manifest.'''
356 289 m = self.parsemanifest(A_HUGE_MANIFEST)
357 290
358 291 match = matchmod.match(b'/', b'',
359 292 [b'file1', b'file200', b'file300'], exact=True)
360 293 m2 = m.matches(match)
361 294
362 295 w = (b'file1\0%sx\n'
363 296 b'file200\0%sl\n'
364 297 b'file300\0%s\n') % (HASH_2, HASH_1, HASH_1)
365 298 self.assertEqual(w, m2.text())
366 299
367 300 def testMatchesNonexistentFile(self):
368 301 '''Tests matches() for a small set of specific files, including one
369 302 nonexistent file to make sure in only matches against existing files.
370 303 '''
371 304 m = self.parsemanifest(A_DEEPER_MANIFEST)
372 305
373 306 match = matchmod.match(b'/', b'',
374 307 [b'a/b/c/bar.txt', b'a/b/d/qux.py',
375 308 b'readme.txt', b'nonexistent'],
376 309 exact=True)
377 310 m2 = m.matches(match)
378 311
379 312 self.assertEqual(
380 313 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'],
381 314 m2.keys())
382 315
383 316 def testMatchesNonexistentDirectory(self):
384 317 '''Tests matches() for a relpath match on a directory that doesn't
385 318 actually exist.'''
386 319 m = self.parsemanifest(A_DEEPER_MANIFEST)
387 320
388 321 match = matchmod.match(b'/', b'', [b'a/f'], default=b'relpath')
389 322 m2 = m.matches(match)
390 323
391 324 self.assertEqual([], m2.keys())
392 325
393 326 def testMatchesExactLarge(self):
394 327 '''Tests matches() for files matching a large list of exact files.
395 328 '''
396 329 m = self.parsemanifest(A_HUGE_MANIFEST)
397 330
398 331 flist = m.keys()[80:300]
399 332 match = matchmod.match(b'/', b'', flist, exact=True)
400 333 m2 = m.matches(match)
401 334
402 335 self.assertEqual(flist, m2.keys())
403 336
404 337 def testMatchesFull(self):
405 338 '''Tests matches() for what should be a full match.'''
406 339 m = self.parsemanifest(A_DEEPER_MANIFEST)
407 340
408 341 match = matchmod.match(b'/', b'', [b''])
409 342 m2 = m.matches(match)
410 343
411 344 self.assertEqual(m.keys(), m2.keys())
412 345
413 346 def testMatchesDirectory(self):
414 347 '''Tests matches() on a relpath match on a directory, which should
415 348 match against all files within said directory.'''
416 349 m = self.parsemanifest(A_DEEPER_MANIFEST)
417 350
418 351 match = matchmod.match(b'/', b'', [b'a/b'], default=b'relpath')
419 352 m2 = m.matches(match)
420 353
421 354 self.assertEqual([
422 355 b'a/b/c/bar.py', b'a/b/c/bar.txt', b'a/b/c/foo.py',
423 356 b'a/b/c/foo.txt',
424 357 b'a/b/d/baz.py', b'a/b/d/qux.py', b'a/b/d/ten.txt', b'a/b/dog.py',
425 358 b'a/b/fish.py'], m2.keys())
426 359
427 360 def testMatchesExactPath(self):
428 361 '''Tests matches() on an exact match on a directory, which should
429 362 result in an empty manifest because you can't perform an exact match
430 363 against a directory.'''
431 364 m = self.parsemanifest(A_DEEPER_MANIFEST)
432 365
433 366 match = matchmod.match(b'/', b'', [b'a/b'], exact=True)
434 367 m2 = m.matches(match)
435 368
436 369 self.assertEqual([], m2.keys())
437 370
438 371 def testMatchesCwd(self):
439 372 '''Tests matches() on a relpath match with the current directory ('.')
440 373 when not in the root directory.'''
441 374 m = self.parsemanifest(A_DEEPER_MANIFEST)
442 375
443 376 match = matchmod.match(b'/', b'a/b', [b'.'], default=b'relpath')
444 377 m2 = m.matches(match)
445 378
446 379 self.assertEqual([
447 380 b'a/b/c/bar.py', b'a/b/c/bar.txt', b'a/b/c/foo.py',
448 381 b'a/b/c/foo.txt', b'a/b/d/baz.py', b'a/b/d/qux.py',
449 382 b'a/b/d/ten.txt', b'a/b/dog.py', b'a/b/fish.py'], m2.keys())
450 383
451 384 def testMatchesWithPattern(self):
452 385 '''Tests matches() for files matching a pattern that reside
453 386 deeper than the specified directory.'''
454 387 m = self.parsemanifest(A_DEEPER_MANIFEST)
455 388
456 389 match = matchmod.match(b'/', b'', [b'a/b/*/*.txt'])
457 390 m2 = m.matches(match)
458 391
459 392 self.assertEqual(
460 393 [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'],
461 394 m2.keys())
462 395
463 396 class testmanifestdict(unittest.TestCase, basemanifesttests):
464 397 def parsemanifest(self, text):
465 398 return manifestmod.manifestdict(text)
466 399
467 400 class testtreemanifest(unittest.TestCase, basemanifesttests):
468 401 def parsemanifest(self, text):
469 402 return manifestmod.treemanifest(b'', text)
470 403
471 404 def testWalkSubtrees(self):
472 405 m = self.parsemanifest(A_DEEPER_MANIFEST)
473 406
474 407 dirs = [s._dir for s in m.walksubtrees()]
475 408 self.assertEqual(
476 409 sorted([
477 410 b'', b'a/', b'a/c/', b'a/d/', b'a/b/', b'a/b/c/', b'a/b/d/']),
478 411 sorted(dirs)
479 412 )
480 413
481 414 match = matchmod.match(b'/', b'', [b'path:a/b/'])
482 415 dirs = [s._dir for s in m.walksubtrees(matcher=match)]
483 416 self.assertEqual(
484 417 sorted([b'a/b/', b'a/b/c/', b'a/b/d/']),
485 418 sorted(dirs)
486 419 )
487 420
488 421 if __name__ == '__main__':
489 422 silenttestrunner.main(__name__)
@@ -1,702 +1,697 b''
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [extensions]
3 3 > share =
4 4 > EOF
5 5
6 6 store and revlogv1 are required in source
7 7
8 8 $ hg --config format.usestore=false init no-store
9 9 $ hg -R no-store debugupgraderepo
10 10 abort: cannot upgrade repository; requirement missing: store
11 11 [255]
12 12
13 13 $ hg init no-revlogv1
14 14 $ cat > no-revlogv1/.hg/requires << EOF
15 15 > dotencode
16 16 > fncache
17 17 > generaldelta
18 18 > store
19 19 > EOF
20 20
21 21 $ hg -R no-revlogv1 debugupgraderepo
22 22 abort: cannot upgrade repository; requirement missing: revlogv1
23 23 [255]
24 24
25 25 Cannot upgrade shared repositories
26 26
27 27 $ hg init share-parent
28 28 $ hg -q share share-parent share-child
29 29
30 30 $ hg -R share-child debugupgraderepo
31 31 abort: cannot upgrade repository; unsupported source requirement: shared
32 32 [255]
33 33
34 Do not yet support upgrading manifestv2 and treemanifest repos
35
36 $ hg --config experimental.manifestv2=true init manifestv2
37 $ hg -R manifestv2 debugupgraderepo
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
39 [255]
34 Do not yet support upgrading treemanifest repos
40 35
41 36 $ hg --config experimental.treemanifest=true init treemanifest
42 37 $ hg -R treemanifest debugupgraderepo
43 38 abort: cannot upgrade repository; unsupported source requirement: treemanifest
44 39 [255]
45 40
46 Cannot add manifestv2 or treemanifest requirement during upgrade
41 Cannot add treemanifest requirement during upgrade
47 42
48 43 $ hg init disallowaddedreq
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
44 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
45 abort: cannot upgrade repository; do not support adding requirement: treemanifest
51 46 [255]
52 47
53 48 An upgrade of a repository created with recommended settings only suggests optimizations
54 49
55 50 $ hg init empty
56 51 $ cd empty
57 52 $ hg debugformat
58 53 format-variant repo
59 54 fncache: yes
60 55 dotencode: yes
61 56 generaldelta: yes
62 57 plain-cl-delta: yes
63 58 compression: zlib
64 59 $ hg debugformat --verbose
65 60 format-variant repo config default
66 61 fncache: yes yes yes
67 62 dotencode: yes yes yes
68 63 generaldelta: yes yes yes
69 64 plain-cl-delta: yes yes yes
70 65 compression: zlib zlib zlib
71 66 $ hg debugformat --verbose --config format.usegfncache=no
72 67 format-variant repo config default
73 68 fncache: yes yes yes
74 69 dotencode: yes yes yes
75 70 generaldelta: yes yes yes
76 71 plain-cl-delta: yes yes yes
77 72 compression: zlib zlib zlib
78 73 $ hg debugformat --verbose --config format.usegfncache=no --color=debug
79 74 format-variant repo config default
80 75 [formatvariant.name.uptodate|fncache: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
81 76 [formatvariant.name.uptodate|dotencode: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
82 77 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
83 78 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
84 79 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
85 80 $ hg debugformat -Tjson
86 81 [
87 82 {
88 83 "config": true,
89 84 "default": true,
90 85 "name": "fncache",
91 86 "repo": true
92 87 },
93 88 {
94 89 "config": true,
95 90 "default": true,
96 91 "name": "dotencode",
97 92 "repo": true
98 93 },
99 94 {
100 95 "config": true,
101 96 "default": true,
102 97 "name": "generaldelta",
103 98 "repo": true
104 99 },
105 100 {
106 101 "config": true,
107 102 "default": true,
108 103 "name": "plain-cl-delta",
109 104 "repo": true
110 105 },
111 106 {
112 107 "config": "zlib",
113 108 "default": "zlib",
114 109 "name": "compression",
115 110 "repo": "zlib"
116 111 }
117 112 ]
118 113 $ hg debugupgraderepo
119 114 (no feature deficiencies found in existing repository)
120 115 performing an upgrade with "--run" will make the following changes:
121 116
122 117 requirements
123 118 preserved: dotencode, fncache, generaldelta, revlogv1, store
124 119
125 120 additional optimizations are available by specifying "--optimize <name>":
126 121
127 122 redeltaparent
128 123 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
129 124
130 125 redeltamultibase
131 126 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
132 127
133 128 redeltaall
134 129 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
135 130
136 131 redeltafulladd
137 132 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
138 133
139 134
140 135 --optimize can be used to add optimizations
141 136
142 137 $ hg debugupgrade --optimize redeltaparent
143 138 (no feature deficiencies found in existing repository)
144 139 performing an upgrade with "--run" will make the following changes:
145 140
146 141 requirements
147 142 preserved: dotencode, fncache, generaldelta, revlogv1, store
148 143
149 144 redeltaparent
150 145 deltas within internal storage will choose a new base revision if needed
151 146
152 147 additional optimizations are available by specifying "--optimize <name>":
153 148
154 149 redeltamultibase
155 150 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
156 151
157 152 redeltaall
158 153 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
159 154
160 155 redeltafulladd
161 156 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
162 157
163 158
164 159 Various sub-optimal detections work
165 160
166 161 $ cat > .hg/requires << EOF
167 162 > revlogv1
168 163 > store
169 164 > EOF
170 165
171 166 $ hg debugformat
172 167 format-variant repo
173 168 fncache: no
174 169 dotencode: no
175 170 generaldelta: no
176 171 plain-cl-delta: yes
177 172 compression: zlib
178 173 $ hg debugformat --verbose
179 174 format-variant repo config default
180 175 fncache: no yes yes
181 176 dotencode: no yes yes
182 177 generaldelta: no yes yes
183 178 plain-cl-delta: yes yes yes
184 179 compression: zlib zlib zlib
185 180 $ hg debugformat --verbose --config format.usegeneraldelta=no
186 181 format-variant repo config default
187 182 fncache: no yes yes
188 183 dotencode: no yes yes
189 184 generaldelta: no no yes
190 185 plain-cl-delta: yes yes yes
191 186 compression: zlib zlib zlib
192 187 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
193 188 format-variant repo config default
194 189 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
195 190 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
196 191 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
197 192 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
198 193 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
199 194 $ hg debugupgraderepo
200 195 repository lacks features recommended by current config options:
201 196
202 197 fncache
203 198 long and reserved filenames may not work correctly; repository performance is sub-optimal
204 199
205 200 dotencode
206 201 storage of filenames beginning with a period or space may not work correctly
207 202
208 203 generaldelta
209 204 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
210 205
211 206
212 207 performing an upgrade with "--run" will make the following changes:
213 208
214 209 requirements
215 210 preserved: revlogv1, store
216 211 added: dotencode, fncache, generaldelta
217 212
218 213 fncache
219 214 repository will be more resilient to storing certain paths and performance of certain operations should be improved
220 215
221 216 dotencode
222 217 repository will be better able to store files beginning with a space or period
223 218
224 219 generaldelta
225 220 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
226 221
227 222 additional optimizations are available by specifying "--optimize <name>":
228 223
229 224 redeltaparent
230 225 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
231 226
232 227 redeltamultibase
233 228 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
234 229
235 230 redeltaall
236 231 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
237 232
238 233 redeltafulladd
239 234 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
240 235
241 236
242 237 $ hg --config format.dotencode=false debugupgraderepo
243 238 repository lacks features recommended by current config options:
244 239
245 240 fncache
246 241 long and reserved filenames may not work correctly; repository performance is sub-optimal
247 242
248 243 generaldelta
249 244 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
250 245
251 246 repository lacks features used by the default config options:
252 247
253 248 dotencode
254 249 storage of filenames beginning with a period or space may not work correctly
255 250
256 251
257 252 performing an upgrade with "--run" will make the following changes:
258 253
259 254 requirements
260 255 preserved: revlogv1, store
261 256 added: fncache, generaldelta
262 257
263 258 fncache
264 259 repository will be more resilient to storing certain paths and performance of certain operations should be improved
265 260
266 261 generaldelta
267 262 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
268 263
269 264 additional optimizations are available by specifying "--optimize <name>":
270 265
271 266 redeltaparent
272 267 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
273 268
274 269 redeltamultibase
275 270 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
276 271
277 272 redeltaall
278 273 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
279 274
280 275 redeltafulladd
281 276 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
282 277
283 278
284 279 $ cd ..
285 280
286 281 Upgrading a repository that is already modern essentially no-ops
287 282
288 283 $ hg init modern
289 284 $ hg -R modern debugupgraderepo --run
290 285 upgrade will perform the following actions:
291 286
292 287 requirements
293 288 preserved: dotencode, fncache, generaldelta, revlogv1, store
294 289
295 290 beginning upgrade...
296 291 repository locked and read-only
297 292 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
298 293 (it is safe to interrupt this process any time before data migration completes)
299 294 data fully migrated to temporary repository
300 295 marking source repository as being upgraded; clients will be unable to read from repository
301 296 starting in-place swap of repository data
302 297 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
303 298 replacing store...
304 299 store replacement complete; repository was inconsistent for *s (glob)
305 300 finalizing requirements file and making repository readable again
306 301 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
307 302 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
308 303 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
309 304
310 305 Upgrading a repository to generaldelta works
311 306
312 307 $ hg --config format.usegeneraldelta=false init upgradegd
313 308 $ cd upgradegd
314 309 $ touch f0
315 310 $ hg -q commit -A -m initial
316 311 $ touch f1
317 312 $ hg -q commit -A -m 'add f1'
318 313 $ hg -q up -r 0
319 314 $ touch f2
320 315 $ hg -q commit -A -m 'add f2'
321 316
322 317 $ hg debugupgraderepo --run
323 318 upgrade will perform the following actions:
324 319
325 320 requirements
326 321 preserved: dotencode, fncache, revlogv1, store
327 322 added: generaldelta
328 323
329 324 generaldelta
330 325 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
331 326
332 327 beginning upgrade...
333 328 repository locked and read-only
334 329 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
335 330 (it is safe to interrupt this process any time before data migration completes)
336 331 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
337 332 migrating 341 bytes in store; 401 bytes tracked data
338 333 migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
339 334 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
340 335 migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
341 336 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
342 337 migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
343 338 finished migrating 3 changelog revisions; change in size: 0 bytes
344 339 finished migrating 9 total revisions; total change in store size: 0 bytes
345 340 copying phaseroots
346 341 data fully migrated to temporary repository
347 342 marking source repository as being upgraded; clients will be unable to read from repository
348 343 starting in-place swap of repository data
349 344 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
350 345 replacing store...
351 346 store replacement complete; repository was inconsistent for *s (glob)
352 347 finalizing requirements file and making repository readable again
353 348 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
354 349 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
355 350 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
356 351
357 352 Original requirements backed up
358 353
359 354 $ cat .hg/upgradebackup.*/requires
360 355 dotencode
361 356 fncache
362 357 revlogv1
363 358 store
364 359
365 360 generaldelta added to original requirements files
366 361
367 362 $ cat .hg/requires
368 363 dotencode
369 364 fncache
370 365 generaldelta
371 366 revlogv1
372 367 store
373 368
374 369 store directory has files we expect
375 370
376 371 $ ls .hg/store
377 372 00changelog.i
378 373 00manifest.i
379 374 data
380 375 fncache
381 376 phaseroots
382 377 undo
383 378 undo.backupfiles
384 379 undo.phaseroots
385 380
386 381 manifest should be generaldelta
387 382
388 383 $ hg debugrevlog -m | grep flags
389 384 flags : inline, generaldelta
390 385
391 386 verify should be happy
392 387
393 388 $ hg verify
394 389 checking changesets
395 390 checking manifests
396 391 crosschecking files in changesets and manifests
397 392 checking files
398 393 3 files, 3 changesets, 3 total revisions
399 394
400 395 old store should be backed up
401 396
402 397 $ ls .hg/upgradebackup.*/store
403 398 00changelog.i
404 399 00manifest.i
405 400 data
406 401 fncache
407 402 phaseroots
408 403 undo
409 404 undo.backup.fncache
410 405 undo.backupfiles
411 406 undo.phaseroots
412 407
413 408 $ cd ..
414 409
415 410 store files with special filenames aren't encoded during copy
416 411
417 412 $ hg init store-filenames
418 413 $ cd store-filenames
419 414 $ touch foo
420 415 $ hg -q commit -A -m initial
421 416 $ touch .hg/store/.XX_special_filename
422 417
423 418 $ hg debugupgraderepo --run
424 419 upgrade will perform the following actions:
425 420
426 421 requirements
427 422 preserved: dotencode, fncache, generaldelta, revlogv1, store
428 423
429 424 beginning upgrade...
430 425 repository locked and read-only
431 426 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
432 427 (it is safe to interrupt this process any time before data migration completes)
433 428 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
434 429 migrating 109 bytes in store; 107 bytes tracked data
435 430 migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
436 431 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
437 432 migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
438 433 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
439 434 migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
440 435 finished migrating 1 changelog revisions; change in size: 0 bytes
441 436 finished migrating 3 total revisions; total change in store size: 0 bytes
442 437 copying .XX_special_filename
443 438 copying phaseroots
444 439 data fully migrated to temporary repository
445 440 marking source repository as being upgraded; clients will be unable to read from repository
446 441 starting in-place swap of repository data
447 442 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
448 443 replacing store...
449 444 store replacement complete; repository was inconsistent for *s (glob)
450 445 finalizing requirements file and making repository readable again
451 446 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
452 447 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
453 448 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
454 449 $ hg debugupgraderepo --run --optimize redeltafulladd
455 450 upgrade will perform the following actions:
456 451
457 452 requirements
458 453 preserved: dotencode, fncache, generaldelta, revlogv1, store
459 454
460 455 redeltafulladd
461 456 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
462 457
463 458 beginning upgrade...
464 459 repository locked and read-only
465 460 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
466 461 (it is safe to interrupt this process any time before data migration completes)
467 462 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
468 463 migrating 109 bytes in store; 107 bytes tracked data
469 464 migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
470 465 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
471 466 migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
472 467 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
473 468 migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
474 469 finished migrating 1 changelog revisions; change in size: 0 bytes
475 470 finished migrating 3 total revisions; total change in store size: 0 bytes
476 471 copying .XX_special_filename
477 472 copying phaseroots
478 473 data fully migrated to temporary repository
479 474 marking source repository as being upgraded; clients will be unable to read from repository
480 475 starting in-place swap of repository data
481 476 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
482 477 replacing store...
483 478 store replacement complete; repository was inconsistent for *s (glob)
484 479 finalizing requirements file and making repository readable again
485 480 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
486 481 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
487 482 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
488 483
489 484 $ cd ..
490 485
491 486 Check upgrading a large file repository
492 487 ---------------------------------------
493 488
494 489 $ hg init largefilesrepo
495 490 $ cat << EOF >> largefilesrepo/.hg/hgrc
496 491 > [extensions]
497 492 > largefiles =
498 493 > EOF
499 494
500 495 $ cd largefilesrepo
501 496 $ touch foo
502 497 $ hg add --large foo
503 498 $ hg -q commit -m initial
504 499 $ cat .hg/requires
505 500 dotencode
506 501 fncache
507 502 generaldelta
508 503 largefiles
509 504 revlogv1
510 505 store
511 506
512 507 $ hg debugupgraderepo --run
513 508 upgrade will perform the following actions:
514 509
515 510 requirements
516 511 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, store
517 512
518 513 beginning upgrade...
519 514 repository locked and read-only
520 515 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
521 516 (it is safe to interrupt this process any time before data migration completes)
522 517 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
523 518 migrating 163 bytes in store; 160 bytes tracked data
524 519 migrating 1 filelogs containing 1 revisions (42 bytes in store; 41 bytes tracked data)
525 520 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
526 521 migrating 1 manifests containing 1 revisions (52 bytes in store; 51 bytes tracked data)
527 522 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
528 523 migrating changelog containing 1 revisions (69 bytes in store; 68 bytes tracked data)
529 524 finished migrating 1 changelog revisions; change in size: 0 bytes
530 525 finished migrating 3 total revisions; total change in store size: 0 bytes
531 526 copying phaseroots
532 527 data fully migrated to temporary repository
533 528 marking source repository as being upgraded; clients will be unable to read from repository
534 529 starting in-place swap of repository data
535 530 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
536 531 replacing store...
537 532 store replacement complete; repository was inconsistent for *s (glob)
538 533 finalizing requirements file and making repository readable again
539 534 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
540 535 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
541 536 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
542 537 $ cat .hg/requires
543 538 dotencode
544 539 fncache
545 540 generaldelta
546 541 largefiles
547 542 revlogv1
548 543 store
549 544
550 545 $ cat << EOF >> .hg/hgrc
551 546 > [extensions]
552 547 > lfs =
553 548 > [lfs]
554 549 > threshold = 10
555 550 > EOF
556 551 $ echo '123456789012345' > lfs.bin
557 552 $ hg ci -Am 'lfs.bin'
558 553 adding lfs.bin
559 554 $ grep lfs .hg/requires
560 555 lfs
561 556 $ find .hg/store/lfs -type f
562 557 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
563 558
564 559 $ hg debugupgraderepo --run
565 560 upgrade will perform the following actions:
566 561
567 562 requirements
568 563 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, store
569 564
570 565 beginning upgrade...
571 566 repository locked and read-only
572 567 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
573 568 (it is safe to interrupt this process any time before data migration completes)
574 569 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
575 570 migrating 417 bytes in store; 467 bytes tracked data
576 571 migrating 2 filelogs containing 2 revisions (168 bytes in store; 182 bytes tracked data)
577 572 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
578 573 migrating 1 manifests containing 2 revisions (113 bytes in store; 151 bytes tracked data)
579 574 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
580 575 migrating changelog containing 2 revisions (136 bytes in store; 134 bytes tracked data)
581 576 finished migrating 2 changelog revisions; change in size: 0 bytes
582 577 finished migrating 6 total revisions; total change in store size: 0 bytes
583 578 copying phaseroots
584 579 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
585 580 data fully migrated to temporary repository
586 581 marking source repository as being upgraded; clients will be unable to read from repository
587 582 starting in-place swap of repository data
588 583 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
589 584 replacing store...
590 585 store replacement complete; repository was inconsistent for *s (glob)
591 586 finalizing requirements file and making repository readable again
592 587 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
593 588 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
594 589 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
595 590
596 591 $ grep lfs .hg/requires
597 592 lfs
598 593 $ find .hg/store/lfs -type f
599 594 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
600 595 $ hg verify
601 596 checking changesets
602 597 checking manifests
603 598 crosschecking files in changesets and manifests
604 599 checking files
605 600 2 files, 2 changesets, 2 total revisions
606 601 $ hg debugdata lfs.bin 0
607 602 version https://git-lfs.github.com/spec/v1
608 603 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
609 604 size 16
610 605 x-is-binary 0
611 606
612 607 $ cd ..
613 608
614 609 repository config is taken in account
615 610 -------------------------------------
616 611
617 612 $ cat << EOF >> $HGRCPATH
618 613 > [format]
619 614 > maxchainlen = 1
620 615 > EOF
621 616
622 617 $ hg init localconfig
623 618 $ cd localconfig
624 619 $ cat << EOF > file
625 620 > some content
626 621 > with some length
627 622 > to make sure we get a delta
628 623 > after changes
629 624 > very long
630 625 > very long
631 626 > very long
632 627 > very long
633 628 > very long
634 629 > very long
635 630 > very long
636 631 > very long
637 632 > very long
638 633 > very long
639 634 > very long
640 635 > EOF
641 636 $ hg -q commit -A -m A
642 637 $ echo "new line" >> file
643 638 $ hg -q commit -m B
644 639 $ echo "new line" >> file
645 640 $ hg -q commit -m C
646 641
647 642 $ cat << EOF >> .hg/hgrc
648 643 > [format]
649 644 > maxchainlen = 9001
650 645 > EOF
651 646 $ hg config format
652 647 format.maxchainlen=9001
653 648 $ hg debugindex file
654 649 rev offset length delta linkrev nodeid p1 p2
655 650 0 0 77 -1 0 bcc1d3df78b2 000000000000 000000000000
656 651 1 77 21 0 1 af3e29f7a72e bcc1d3df78b2 000000000000
657 652 2 98 84 -1 2 8daf79c5522b af3e29f7a72e 000000000000
658 653
659 654 $ hg debugupgraderepo --run --optimize redeltaall
660 655 upgrade will perform the following actions:
661 656
662 657 requirements
663 658 preserved: dotencode, fncache, generaldelta, revlogv1, store
664 659
665 660 redeltaall
666 661 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
667 662
668 663 beginning upgrade...
669 664 repository locked and read-only
670 665 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
671 666 (it is safe to interrupt this process any time before data migration completes)
672 667 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
673 668 migrating 497 bytes in store; 882 bytes tracked data
674 669 migrating 1 filelogs containing 3 revisions (182 bytes in store; 573 bytes tracked data)
675 670 finished migrating 3 filelog revisions across 1 filelogs; change in size: -63 bytes
676 671 migrating 1 manifests containing 3 revisions (141 bytes in store; 138 bytes tracked data)
677 672 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
678 673 migrating changelog containing 3 revisions (174 bytes in store; 171 bytes tracked data)
679 674 finished migrating 3 changelog revisions; change in size: 0 bytes
680 675 finished migrating 9 total revisions; total change in store size: -63 bytes
681 676 copying phaseroots
682 677 data fully migrated to temporary repository
683 678 marking source repository as being upgraded; clients will be unable to read from repository
684 679 starting in-place swap of repository data
685 680 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
686 681 replacing store...
687 682 store replacement complete; repository was inconsistent for *s (glob)
688 683 finalizing requirements file and making repository readable again
689 684 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
690 685 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
691 686 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
692 687 $ hg debugindex file
693 688 rev offset length delta linkrev nodeid p1 p2
694 689 0 0 77 -1 0 bcc1d3df78b2 000000000000 000000000000
695 690 1 77 21 0 1 af3e29f7a72e bcc1d3df78b2 000000000000
696 691 2 98 21 1 2 8daf79c5522b af3e29f7a72e 000000000000
697 692 $ cd ..
698 693
699 694 $ cat << EOF >> $HGRCPATH
700 695 > [format]
701 696 > maxchainlen = 9001
702 697 > EOF
1 NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now