##// END OF EJS Templates
nodemap: add a (python) index class for persistent nodemap testing...
marmoute -
r44794:6f9e8e14 default
parent child Browse files
Show More
@@ -1,1558 +1,1561 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section, configprefix + b'nodates', default=False,
137 137 )
138 138 coreconfigitem(
139 139 section, configprefix + b'showfunc', default=False,
140 140 )
141 141 coreconfigitem(
142 142 section, configprefix + b'unified', default=None,
143 143 )
144 144 coreconfigitem(
145 145 section, configprefix + b'git', default=False,
146 146 )
147 147 coreconfigitem(
148 148 section, configprefix + b'ignorews', default=False,
149 149 )
150 150 coreconfigitem(
151 151 section, configprefix + b'ignorewsamount', default=False,
152 152 )
153 153 coreconfigitem(
154 154 section, configprefix + b'ignoreblanklines', default=False,
155 155 )
156 156 coreconfigitem(
157 157 section, configprefix + b'ignorewseol', default=False,
158 158 )
159 159 coreconfigitem(
160 160 section, configprefix + b'nobinary', default=False,
161 161 )
162 162 coreconfigitem(
163 163 section, configprefix + b'noprefix', default=False,
164 164 )
165 165 coreconfigitem(
166 166 section, configprefix + b'word-diff', default=False,
167 167 )
168 168
169 169
170 170 coreconfigitem(
171 171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 172 )
173 173 coreconfigitem(
174 174 b'auth', b'cookiefile', default=None,
175 175 )
176 176 _registerdiffopts(section=b'annotate')
177 177 # bookmarks.pushing: internal hack for discovery
178 178 coreconfigitem(
179 179 b'bookmarks', b'pushing', default=list,
180 180 )
181 181 # bundle.mainreporoot: internal hack for bundlerepo
182 182 coreconfigitem(
183 183 b'bundle', b'mainreporoot', default=b'',
184 184 )
185 185 coreconfigitem(
186 186 b'censor', b'policy', default=b'abort', experimental=True,
187 187 )
188 188 coreconfigitem(
189 189 b'chgserver', b'idletimeout', default=3600,
190 190 )
191 191 coreconfigitem(
192 192 b'chgserver', b'skiphash', default=False,
193 193 )
194 194 coreconfigitem(
195 195 b'cmdserver', b'log', default=None,
196 196 )
197 197 coreconfigitem(
198 198 b'cmdserver', b'max-log-files', default=7,
199 199 )
200 200 coreconfigitem(
201 201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 202 )
203 203 coreconfigitem(
204 204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 205 )
206 206 coreconfigitem(
207 207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 208 )
209 209 coreconfigitem(
210 210 b'cmdserver',
211 211 b'track-log',
212 212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 213 )
214 214 coreconfigitem(
215 215 b'color', b'.*', default=None, generic=True,
216 216 )
217 217 coreconfigitem(
218 218 b'color', b'mode', default=b'auto',
219 219 )
220 220 coreconfigitem(
221 221 b'color', b'pagermode', default=dynamicdefault,
222 222 )
223 223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 224 coreconfigitem(
225 225 b'commands', b'commit.post-status', default=False,
226 226 )
227 227 coreconfigitem(
228 228 b'commands', b'grep.all-files', default=False, experimental=True,
229 229 )
230 230 coreconfigitem(
231 231 b'commands', b'merge.require-rev', default=False,
232 232 )
233 233 coreconfigitem(
234 234 b'commands', b'push.require-revs', default=False,
235 235 )
236 236 coreconfigitem(
237 237 b'commands', b'resolve.confirm', default=False,
238 238 )
239 239 coreconfigitem(
240 240 b'commands', b'resolve.explicit-re-merge', default=False,
241 241 )
242 242 coreconfigitem(
243 243 b'commands', b'resolve.mark-check', default=b'none',
244 244 )
245 245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
246 246 coreconfigitem(
247 247 b'commands', b'show.aliasprefix', default=list,
248 248 )
249 249 coreconfigitem(
250 250 b'commands', b'status.relative', default=False,
251 251 )
252 252 coreconfigitem(
253 253 b'commands', b'status.skipstates', default=[], experimental=True,
254 254 )
255 255 coreconfigitem(
256 256 b'commands', b'status.terse', default=b'',
257 257 )
258 258 coreconfigitem(
259 259 b'commands', b'status.verbose', default=False,
260 260 )
261 261 coreconfigitem(
262 262 b'commands', b'update.check', default=None,
263 263 )
264 264 coreconfigitem(
265 265 b'commands', b'update.requiredest', default=False,
266 266 )
267 267 coreconfigitem(
268 268 b'committemplate', b'.*', default=None, generic=True,
269 269 )
270 270 coreconfigitem(
271 271 b'convert', b'bzr.saverev', default=True,
272 272 )
273 273 coreconfigitem(
274 274 b'convert', b'cvsps.cache', default=True,
275 275 )
276 276 coreconfigitem(
277 277 b'convert', b'cvsps.fuzz', default=60,
278 278 )
279 279 coreconfigitem(
280 280 b'convert', b'cvsps.logencoding', default=None,
281 281 )
282 282 coreconfigitem(
283 283 b'convert', b'cvsps.mergefrom', default=None,
284 284 )
285 285 coreconfigitem(
286 286 b'convert', b'cvsps.mergeto', default=None,
287 287 )
288 288 coreconfigitem(
289 289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
290 290 )
291 291 coreconfigitem(
292 292 b'convert', b'git.extrakeys', default=list,
293 293 )
294 294 coreconfigitem(
295 295 b'convert', b'git.findcopiesharder', default=False,
296 296 )
297 297 coreconfigitem(
298 298 b'convert', b'git.remoteprefix', default=b'remote',
299 299 )
300 300 coreconfigitem(
301 301 b'convert', b'git.renamelimit', default=400,
302 302 )
303 303 coreconfigitem(
304 304 b'convert', b'git.saverev', default=True,
305 305 )
306 306 coreconfigitem(
307 307 b'convert', b'git.similarity', default=50,
308 308 )
309 309 coreconfigitem(
310 310 b'convert', b'git.skipsubmodules', default=False,
311 311 )
312 312 coreconfigitem(
313 313 b'convert', b'hg.clonebranches', default=False,
314 314 )
315 315 coreconfigitem(
316 316 b'convert', b'hg.ignoreerrors', default=False,
317 317 )
318 318 coreconfigitem(
319 319 b'convert', b'hg.preserve-hash', default=False,
320 320 )
321 321 coreconfigitem(
322 322 b'convert', b'hg.revs', default=None,
323 323 )
324 324 coreconfigitem(
325 325 b'convert', b'hg.saverev', default=False,
326 326 )
327 327 coreconfigitem(
328 328 b'convert', b'hg.sourcename', default=None,
329 329 )
330 330 coreconfigitem(
331 331 b'convert', b'hg.startrev', default=None,
332 332 )
333 333 coreconfigitem(
334 334 b'convert', b'hg.tagsbranch', default=b'default',
335 335 )
336 336 coreconfigitem(
337 337 b'convert', b'hg.usebranchnames', default=True,
338 338 )
339 339 coreconfigitem(
340 340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
341 341 )
342 342 coreconfigitem(
343 343 b'convert', b'localtimezone', default=False,
344 344 )
345 345 coreconfigitem(
346 346 b'convert', b'p4.encoding', default=dynamicdefault,
347 347 )
348 348 coreconfigitem(
349 349 b'convert', b'p4.startrev', default=0,
350 350 )
351 351 coreconfigitem(
352 352 b'convert', b'skiptags', default=False,
353 353 )
354 354 coreconfigitem(
355 355 b'convert', b'svn.debugsvnlog', default=True,
356 356 )
357 357 coreconfigitem(
358 358 b'convert', b'svn.trunk', default=None,
359 359 )
360 360 coreconfigitem(
361 361 b'convert', b'svn.tags', default=None,
362 362 )
363 363 coreconfigitem(
364 364 b'convert', b'svn.branches', default=None,
365 365 )
366 366 coreconfigitem(
367 367 b'convert', b'svn.startrev', default=0,
368 368 )
369 369 coreconfigitem(
370 370 b'debug', b'dirstate.delaywrite', default=0,
371 371 )
372 372 coreconfigitem(
373 373 b'defaults', b'.*', default=None, generic=True,
374 374 )
375 375 coreconfigitem(
376 376 b'devel', b'all-warnings', default=False,
377 377 )
378 378 coreconfigitem(
379 379 b'devel', b'bundle2.debug', default=False,
380 380 )
381 381 coreconfigitem(
382 382 b'devel', b'bundle.delta', default=b'',
383 383 )
384 384 coreconfigitem(
385 385 b'devel', b'cache-vfs', default=None,
386 386 )
387 387 coreconfigitem(
388 388 b'devel', b'check-locks', default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'devel', b'check-relroot', default=False,
392 392 )
393 393 coreconfigitem(
394 394 b'devel', b'default-date', default=None,
395 395 )
396 396 coreconfigitem(
397 397 b'devel', b'deprec-warn', default=False,
398 398 )
399 399 coreconfigitem(
400 400 b'devel', b'disableloaddefaultcerts', default=False,
401 401 )
402 402 coreconfigitem(
403 403 b'devel', b'warn-empty-changegroup', default=False,
404 404 )
405 405 coreconfigitem(
406 406 b'devel', b'legacy.exchange', default=list,
407 407 )
408 408 coreconfigitem(
409 b'devel', b'persistent-nodemap', default=False,
410 )
411 coreconfigitem(
409 412 b'devel', b'servercafile', default=b'',
410 413 )
411 414 coreconfigitem(
412 415 b'devel', b'serverexactprotocol', default=b'',
413 416 )
414 417 coreconfigitem(
415 418 b'devel', b'serverrequirecert', default=False,
416 419 )
417 420 coreconfigitem(
418 421 b'devel', b'strip-obsmarkers', default=True,
419 422 )
420 423 coreconfigitem(
421 424 b'devel', b'warn-config', default=None,
422 425 )
423 426 coreconfigitem(
424 427 b'devel', b'warn-config-default', default=None,
425 428 )
426 429 coreconfigitem(
427 430 b'devel', b'user.obsmarker', default=None,
428 431 )
429 432 coreconfigitem(
430 433 b'devel', b'warn-config-unknown', default=None,
431 434 )
432 435 coreconfigitem(
433 436 b'devel', b'debug.copies', default=False,
434 437 )
435 438 coreconfigitem(
436 439 b'devel', b'debug.extensions', default=False,
437 440 )
438 441 coreconfigitem(
439 442 b'devel', b'debug.repo-filters', default=False,
440 443 )
441 444 coreconfigitem(
442 445 b'devel', b'debug.peer-request', default=False,
443 446 )
444 447 coreconfigitem(
445 448 b'devel', b'discovery.randomize', default=True,
446 449 )
447 450 _registerdiffopts(section=b'diff')
448 451 coreconfigitem(
449 452 b'email', b'bcc', default=None,
450 453 )
451 454 coreconfigitem(
452 455 b'email', b'cc', default=None,
453 456 )
454 457 coreconfigitem(
455 458 b'email', b'charsets', default=list,
456 459 )
457 460 coreconfigitem(
458 461 b'email', b'from', default=None,
459 462 )
460 463 coreconfigitem(
461 464 b'email', b'method', default=b'smtp',
462 465 )
463 466 coreconfigitem(
464 467 b'email', b'reply-to', default=None,
465 468 )
466 469 coreconfigitem(
467 470 b'email', b'to', default=None,
468 471 )
469 472 coreconfigitem(
470 473 b'experimental', b'archivemetatemplate', default=dynamicdefault,
471 474 )
472 475 coreconfigitem(
473 476 b'experimental', b'auto-publish', default=b'publish',
474 477 )
475 478 coreconfigitem(
476 479 b'experimental', b'bundle-phases', default=False,
477 480 )
478 481 coreconfigitem(
479 482 b'experimental', b'bundle2-advertise', default=True,
480 483 )
481 484 coreconfigitem(
482 485 b'experimental', b'bundle2-output-capture', default=False,
483 486 )
484 487 coreconfigitem(
485 488 b'experimental', b'bundle2.pushback', default=False,
486 489 )
487 490 coreconfigitem(
488 491 b'experimental', b'bundle2lazylocking', default=False,
489 492 )
490 493 coreconfigitem(
491 494 b'experimental', b'bundlecomplevel', default=None,
492 495 )
493 496 coreconfigitem(
494 497 b'experimental', b'bundlecomplevel.bzip2', default=None,
495 498 )
496 499 coreconfigitem(
497 500 b'experimental', b'bundlecomplevel.gzip', default=None,
498 501 )
499 502 coreconfigitem(
500 503 b'experimental', b'bundlecomplevel.none', default=None,
501 504 )
502 505 coreconfigitem(
503 506 b'experimental', b'bundlecomplevel.zstd', default=None,
504 507 )
505 508 coreconfigitem(
506 509 b'experimental', b'changegroup3', default=False,
507 510 )
508 511 coreconfigitem(
509 512 b'experimental', b'cleanup-as-archived', default=False,
510 513 )
511 514 coreconfigitem(
512 515 b'experimental', b'clientcompressionengines', default=list,
513 516 )
514 517 coreconfigitem(
515 518 b'experimental', b'copytrace', default=b'on',
516 519 )
517 520 coreconfigitem(
518 521 b'experimental', b'copytrace.movecandidateslimit', default=100,
519 522 )
520 523 coreconfigitem(
521 524 b'experimental', b'copytrace.sourcecommitlimit', default=100,
522 525 )
523 526 coreconfigitem(
524 527 b'experimental', b'copies.read-from', default=b"filelog-only",
525 528 )
526 529 coreconfigitem(
527 530 b'experimental', b'copies.write-to', default=b'filelog-only',
528 531 )
529 532 coreconfigitem(
530 533 b'experimental', b'crecordtest', default=None,
531 534 )
532 535 coreconfigitem(
533 536 b'experimental', b'directaccess', default=False,
534 537 )
535 538 coreconfigitem(
536 539 b'experimental', b'directaccess.revnums', default=False,
537 540 )
538 541 coreconfigitem(
539 542 b'experimental', b'editortmpinhg', default=False,
540 543 )
541 544 coreconfigitem(
542 545 b'experimental', b'evolution', default=list,
543 546 )
544 547 coreconfigitem(
545 548 b'experimental',
546 549 b'evolution.allowdivergence',
547 550 default=False,
548 551 alias=[(b'experimental', b'allowdivergence')],
549 552 )
550 553 coreconfigitem(
551 554 b'experimental', b'evolution.allowunstable', default=None,
552 555 )
553 556 coreconfigitem(
554 557 b'experimental', b'evolution.createmarkers', default=None,
555 558 )
556 559 coreconfigitem(
557 560 b'experimental',
558 561 b'evolution.effect-flags',
559 562 default=True,
560 563 alias=[(b'experimental', b'effect-flags')],
561 564 )
562 565 coreconfigitem(
563 566 b'experimental', b'evolution.exchange', default=None,
564 567 )
565 568 coreconfigitem(
566 569 b'experimental', b'evolution.bundle-obsmarker', default=False,
567 570 )
568 571 coreconfigitem(
569 572 b'experimental', b'log.topo', default=False,
570 573 )
571 574 coreconfigitem(
572 575 b'experimental', b'evolution.report-instabilities', default=True,
573 576 )
574 577 coreconfigitem(
575 578 b'experimental', b'evolution.track-operation', default=True,
576 579 )
577 580 # repo-level config to exclude a revset visibility
578 581 #
579 582 # The target use case is to use `share` to expose different subset of the same
580 583 # repository, especially server side. See also `server.view`.
581 584 coreconfigitem(
582 585 b'experimental', b'extra-filter-revs', default=None,
583 586 )
584 587 coreconfigitem(
585 588 b'experimental', b'maxdeltachainspan', default=-1,
586 589 )
587 590 coreconfigitem(
588 591 b'experimental', b'mergetempdirprefix', default=None,
589 592 )
590 593 coreconfigitem(
591 594 b'experimental', b'mmapindexthreshold', default=None,
592 595 )
593 596 coreconfigitem(
594 597 b'experimental', b'narrow', default=False,
595 598 )
596 599 coreconfigitem(
597 600 b'experimental', b'nonnormalparanoidcheck', default=False,
598 601 )
599 602 coreconfigitem(
600 603 b'experimental', b'exportableenviron', default=list,
601 604 )
602 605 coreconfigitem(
603 606 b'experimental', b'extendedheader.index', default=None,
604 607 )
605 608 coreconfigitem(
606 609 b'experimental', b'extendedheader.similarity', default=False,
607 610 )
608 611 coreconfigitem(
609 612 b'experimental', b'graphshorten', default=False,
610 613 )
611 614 coreconfigitem(
612 615 b'experimental', b'graphstyle.parent', default=dynamicdefault,
613 616 )
614 617 coreconfigitem(
615 618 b'experimental', b'graphstyle.missing', default=dynamicdefault,
616 619 )
617 620 coreconfigitem(
618 621 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
619 622 )
620 623 coreconfigitem(
621 624 b'experimental', b'hook-track-tags', default=False,
622 625 )
623 626 coreconfigitem(
624 627 b'experimental', b'httppeer.advertise-v2', default=False,
625 628 )
626 629 coreconfigitem(
627 630 b'experimental', b'httppeer.v2-encoder-order', default=None,
628 631 )
629 632 coreconfigitem(
630 633 b'experimental', b'httppostargs', default=False,
631 634 )
632 635 coreconfigitem(
633 636 b'experimental', b'mergedriver', default=None,
634 637 )
635 638 coreconfigitem(b'experimental', b'nointerrupt', default=False)
636 639 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
637 640
638 641 coreconfigitem(
639 642 b'experimental', b'obsmarkers-exchange-debug', default=False,
640 643 )
641 644 coreconfigitem(
642 645 b'experimental', b'remotenames', default=False,
643 646 )
644 647 coreconfigitem(
645 648 b'experimental', b'removeemptydirs', default=True,
646 649 )
647 650 coreconfigitem(
648 651 b'experimental', b'revert.interactive.select-to-keep', default=False,
649 652 )
650 653 coreconfigitem(
651 654 b'experimental', b'revisions.prefixhexnode', default=False,
652 655 )
653 656 coreconfigitem(
654 657 b'experimental', b'revlogv2', default=None,
655 658 )
656 659 coreconfigitem(
657 660 b'experimental', b'revisions.disambiguatewithin', default=None,
658 661 )
659 662 coreconfigitem(
660 663 b'experimental', b'rust.index', default=False,
661 664 )
662 665 coreconfigitem(
663 666 b'experimental', b'exp-persistent-nodemap', default=False,
664 667 )
665 668 coreconfigitem(
666 669 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
667 670 )
668 671 coreconfigitem(
669 672 b'experimental',
670 673 b'server.manifestdata.recommended-batch-size',
671 674 default=100000,
672 675 )
673 676 coreconfigitem(
674 677 b'experimental', b'server.stream-narrow-clones', default=False,
675 678 )
676 679 coreconfigitem(
677 680 b'experimental', b'single-head-per-branch', default=False,
678 681 )
679 682 coreconfigitem(
680 683 b'experimental',
681 684 b'single-head-per-branch:account-closed-heads',
682 685 default=False,
683 686 )
684 687 coreconfigitem(
685 688 b'experimental', b'sshserver.support-v2', default=False,
686 689 )
687 690 coreconfigitem(
688 691 b'experimental', b'sparse-read', default=False,
689 692 )
690 693 coreconfigitem(
691 694 b'experimental', b'sparse-read.density-threshold', default=0.50,
692 695 )
693 696 coreconfigitem(
694 697 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
695 698 )
696 699 coreconfigitem(
697 700 b'experimental', b'treemanifest', default=False,
698 701 )
699 702 coreconfigitem(
700 703 b'experimental', b'update.atomic-file', default=False,
701 704 )
702 705 coreconfigitem(
703 706 b'experimental', b'sshpeer.advertise-v2', default=False,
704 707 )
705 708 coreconfigitem(
706 709 b'experimental', b'web.apiserver', default=False,
707 710 )
708 711 coreconfigitem(
709 712 b'experimental', b'web.api.http-v2', default=False,
710 713 )
711 714 coreconfigitem(
712 715 b'experimental', b'web.api.debugreflect', default=False,
713 716 )
714 717 coreconfigitem(
715 718 b'experimental', b'worker.wdir-get-thread-safe', default=False,
716 719 )
717 720 coreconfigitem(
718 721 b'experimental', b'worker.repository-upgrade', default=False,
719 722 )
720 723 coreconfigitem(
721 724 b'experimental', b'xdiff', default=False,
722 725 )
723 726 coreconfigitem(
724 727 b'extensions', b'.*', default=None, generic=True,
725 728 )
726 729 coreconfigitem(
727 730 b'extdata', b'.*', default=None, generic=True,
728 731 )
729 732 coreconfigitem(
730 733 b'format', b'bookmarks-in-store', default=False,
731 734 )
732 735 coreconfigitem(
733 736 b'format', b'chunkcachesize', default=None, experimental=True,
734 737 )
735 738 coreconfigitem(
736 739 b'format', b'dotencode', default=True,
737 740 )
738 741 coreconfigitem(
739 742 b'format', b'generaldelta', default=False, experimental=True,
740 743 )
741 744 coreconfigitem(
742 745 b'format', b'manifestcachesize', default=None, experimental=True,
743 746 )
744 747 coreconfigitem(
745 748 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
746 749 )
747 750 coreconfigitem(
748 751 b'format', b'obsstore-version', default=None,
749 752 )
750 753 coreconfigitem(
751 754 b'format', b'sparse-revlog', default=True,
752 755 )
753 756 coreconfigitem(
754 757 b'format',
755 758 b'revlog-compression',
756 759 default=b'zlib',
757 760 alias=[(b'experimental', b'format.compression')],
758 761 )
759 762 coreconfigitem(
760 763 b'format', b'usefncache', default=True,
761 764 )
762 765 coreconfigitem(
763 766 b'format', b'usegeneraldelta', default=True,
764 767 )
765 768 coreconfigitem(
766 769 b'format', b'usestore', default=True,
767 770 )
768 771 coreconfigitem(
769 772 b'format',
770 773 b'exp-use-copies-side-data-changeset',
771 774 default=False,
772 775 experimental=True,
773 776 )
774 777 coreconfigitem(
775 778 b'format', b'exp-use-side-data', default=False, experimental=True,
776 779 )
777 780 coreconfigitem(
778 781 b'format', b'internal-phase', default=False, experimental=True,
779 782 )
780 783 coreconfigitem(
781 784 b'fsmonitor', b'warn_when_unused', default=True,
782 785 )
783 786 coreconfigitem(
784 787 b'fsmonitor', b'warn_update_file_count', default=50000,
785 788 )
786 789 coreconfigitem(
787 790 b'help', br'hidden-command\..*', default=False, generic=True,
788 791 )
789 792 coreconfigitem(
790 793 b'help', br'hidden-topic\..*', default=False, generic=True,
791 794 )
792 795 coreconfigitem(
793 796 b'hooks', b'.*', default=dynamicdefault, generic=True,
794 797 )
795 798 coreconfigitem(
796 799 b'hgweb-paths', b'.*', default=list, generic=True,
797 800 )
798 801 coreconfigitem(
799 802 b'hostfingerprints', b'.*', default=list, generic=True,
800 803 )
801 804 coreconfigitem(
802 805 b'hostsecurity', b'ciphers', default=None,
803 806 )
804 807 coreconfigitem(
805 808 b'hostsecurity', b'disabletls10warning', default=False,
806 809 )
807 810 coreconfigitem(
808 811 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
809 812 )
810 813 coreconfigitem(
811 814 b'hostsecurity',
812 815 b'.*:minimumprotocol$',
813 816 default=dynamicdefault,
814 817 generic=True,
815 818 )
816 819 coreconfigitem(
817 820 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
818 821 )
819 822 coreconfigitem(
820 823 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
821 824 )
822 825 coreconfigitem(
823 826 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
824 827 )
825 828
826 829 coreconfigitem(
827 830 b'http_proxy', b'always', default=False,
828 831 )
829 832 coreconfigitem(
830 833 b'http_proxy', b'host', default=None,
831 834 )
832 835 coreconfigitem(
833 836 b'http_proxy', b'no', default=list,
834 837 )
835 838 coreconfigitem(
836 839 b'http_proxy', b'passwd', default=None,
837 840 )
838 841 coreconfigitem(
839 842 b'http_proxy', b'user', default=None,
840 843 )
841 844
842 845 coreconfigitem(
843 846 b'http', b'timeout', default=None,
844 847 )
845 848
846 849 coreconfigitem(
847 850 b'logtoprocess', b'commandexception', default=None,
848 851 )
849 852 coreconfigitem(
850 853 b'logtoprocess', b'commandfinish', default=None,
851 854 )
852 855 coreconfigitem(
853 856 b'logtoprocess', b'command', default=None,
854 857 )
855 858 coreconfigitem(
856 859 b'logtoprocess', b'develwarn', default=None,
857 860 )
858 861 coreconfigitem(
859 862 b'logtoprocess', b'uiblocked', default=None,
860 863 )
861 864 coreconfigitem(
862 865 b'merge', b'checkunknown', default=b'abort',
863 866 )
864 867 coreconfigitem(
865 868 b'merge', b'checkignored', default=b'abort',
866 869 )
867 870 coreconfigitem(
868 871 b'experimental', b'merge.checkpathconflicts', default=False,
869 872 )
870 873 coreconfigitem(
871 874 b'merge', b'followcopies', default=True,
872 875 )
873 876 coreconfigitem(
874 877 b'merge', b'on-failure', default=b'continue',
875 878 )
876 879 coreconfigitem(
877 880 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
878 881 )
879 882 coreconfigitem(
880 883 b'merge', b'strict-capability-check', default=False,
881 884 )
882 885 coreconfigitem(
883 886 b'merge-tools', b'.*', default=None, generic=True,
884 887 )
885 888 coreconfigitem(
886 889 b'merge-tools',
887 890 br'.*\.args$',
888 891 default=b"$local $base $other",
889 892 generic=True,
890 893 priority=-1,
891 894 )
892 895 coreconfigitem(
893 896 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
894 897 )
895 898 coreconfigitem(
896 899 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
897 900 )
898 901 coreconfigitem(
899 902 b'merge-tools',
900 903 br'.*\.checkchanged$',
901 904 default=False,
902 905 generic=True,
903 906 priority=-1,
904 907 )
905 908 coreconfigitem(
906 909 b'merge-tools',
907 910 br'.*\.executable$',
908 911 default=dynamicdefault,
909 912 generic=True,
910 913 priority=-1,
911 914 )
912 915 coreconfigitem(
913 916 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
914 917 )
915 918 coreconfigitem(
916 919 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
917 920 )
918 921 coreconfigitem(
919 922 b'merge-tools',
920 923 br'.*\.mergemarkers$',
921 924 default=b'basic',
922 925 generic=True,
923 926 priority=-1,
924 927 )
925 928 coreconfigitem(
926 929 b'merge-tools',
927 930 br'.*\.mergemarkertemplate$',
928 931 default=dynamicdefault, # take from ui.mergemarkertemplate
929 932 generic=True,
930 933 priority=-1,
931 934 )
932 935 coreconfigitem(
933 936 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
934 937 )
935 938 coreconfigitem(
936 939 b'merge-tools',
937 940 br'.*\.premerge$',
938 941 default=dynamicdefault,
939 942 generic=True,
940 943 priority=-1,
941 944 )
942 945 coreconfigitem(
943 946 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
944 947 )
945 948 coreconfigitem(
946 949 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
947 950 )
948 951 coreconfigitem(
949 952 b'pager', b'ignore', default=list,
950 953 )
951 954 coreconfigitem(
952 955 b'pager', b'pager', default=dynamicdefault,
953 956 )
954 957 coreconfigitem(
955 958 b'patch', b'eol', default=b'strict',
956 959 )
957 960 coreconfigitem(
958 961 b'patch', b'fuzz', default=2,
959 962 )
960 963 coreconfigitem(
961 964 b'paths', b'default', default=None,
962 965 )
963 966 coreconfigitem(
964 967 b'paths', b'default-push', default=None,
965 968 )
966 969 coreconfigitem(
967 970 b'paths', b'.*', default=None, generic=True,
968 971 )
969 972 coreconfigitem(
970 973 b'phases', b'checksubrepos', default=b'follow',
971 974 )
972 975 coreconfigitem(
973 976 b'phases', b'new-commit', default=b'draft',
974 977 )
975 978 coreconfigitem(
976 979 b'phases', b'publish', default=True,
977 980 )
978 981 coreconfigitem(
979 982 b'profiling', b'enabled', default=False,
980 983 )
981 984 coreconfigitem(
982 985 b'profiling', b'format', default=b'text',
983 986 )
984 987 coreconfigitem(
985 988 b'profiling', b'freq', default=1000,
986 989 )
987 990 coreconfigitem(
988 991 b'profiling', b'limit', default=30,
989 992 )
990 993 coreconfigitem(
991 994 b'profiling', b'nested', default=0,
992 995 )
993 996 coreconfigitem(
994 997 b'profiling', b'output', default=None,
995 998 )
996 999 coreconfigitem(
997 1000 b'profiling', b'showmax', default=0.999,
998 1001 )
999 1002 coreconfigitem(
1000 1003 b'profiling', b'showmin', default=dynamicdefault,
1001 1004 )
1002 1005 coreconfigitem(
1003 1006 b'profiling', b'showtime', default=True,
1004 1007 )
1005 1008 coreconfigitem(
1006 1009 b'profiling', b'sort', default=b'inlinetime',
1007 1010 )
1008 1011 coreconfigitem(
1009 1012 b'profiling', b'statformat', default=b'hotpath',
1010 1013 )
1011 1014 coreconfigitem(
1012 1015 b'profiling', b'time-track', default=dynamicdefault,
1013 1016 )
1014 1017 coreconfigitem(
1015 1018 b'profiling', b'type', default=b'stat',
1016 1019 )
1017 1020 coreconfigitem(
1018 1021 b'progress', b'assume-tty', default=False,
1019 1022 )
1020 1023 coreconfigitem(
1021 1024 b'progress', b'changedelay', default=1,
1022 1025 )
1023 1026 coreconfigitem(
1024 1027 b'progress', b'clear-complete', default=True,
1025 1028 )
1026 1029 coreconfigitem(
1027 1030 b'progress', b'debug', default=False,
1028 1031 )
1029 1032 coreconfigitem(
1030 1033 b'progress', b'delay', default=3,
1031 1034 )
1032 1035 coreconfigitem(
1033 1036 b'progress', b'disable', default=False,
1034 1037 )
1035 1038 coreconfigitem(
1036 1039 b'progress', b'estimateinterval', default=60.0,
1037 1040 )
1038 1041 coreconfigitem(
1039 1042 b'progress',
1040 1043 b'format',
1041 1044 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1042 1045 )
1043 1046 coreconfigitem(
1044 1047 b'progress', b'refresh', default=0.1,
1045 1048 )
1046 1049 coreconfigitem(
1047 1050 b'progress', b'width', default=dynamicdefault,
1048 1051 )
1049 1052 coreconfigitem(
1050 1053 b'push', b'pushvars.server', default=False,
1051 1054 )
1052 1055 coreconfigitem(
1053 1056 b'rewrite',
1054 1057 b'backup-bundle',
1055 1058 default=True,
1056 1059 alias=[(b'ui', b'history-editing-backup')],
1057 1060 )
1058 1061 coreconfigitem(
1059 1062 b'rewrite', b'update-timestamp', default=False,
1060 1063 )
1061 1064 coreconfigitem(
1062 1065 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1063 1066 )
1064 1067 coreconfigitem(
1065 1068 b'storage',
1066 1069 b'revlog.optimize-delta-parent-choice',
1067 1070 default=True,
1068 1071 alias=[(b'format', b'aggressivemergedeltas')],
1069 1072 )
1070 1073 coreconfigitem(
1071 1074 b'storage', b'revlog.reuse-external-delta', default=True,
1072 1075 )
1073 1076 coreconfigitem(
1074 1077 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1075 1078 )
1076 1079 coreconfigitem(
1077 1080 b'storage', b'revlog.zlib.level', default=None,
1078 1081 )
1079 1082 coreconfigitem(
1080 1083 b'storage', b'revlog.zstd.level', default=None,
1081 1084 )
1082 1085 coreconfigitem(
1083 1086 b'server', b'bookmarks-pushkey-compat', default=True,
1084 1087 )
1085 1088 coreconfigitem(
1086 1089 b'server', b'bundle1', default=True,
1087 1090 )
1088 1091 coreconfigitem(
1089 1092 b'server', b'bundle1gd', default=None,
1090 1093 )
1091 1094 coreconfigitem(
1092 1095 b'server', b'bundle1.pull', default=None,
1093 1096 )
1094 1097 coreconfigitem(
1095 1098 b'server', b'bundle1gd.pull', default=None,
1096 1099 )
1097 1100 coreconfigitem(
1098 1101 b'server', b'bundle1.push', default=None,
1099 1102 )
1100 1103 coreconfigitem(
1101 1104 b'server', b'bundle1gd.push', default=None,
1102 1105 )
1103 1106 coreconfigitem(
1104 1107 b'server',
1105 1108 b'bundle2.stream',
1106 1109 default=True,
1107 1110 alias=[(b'experimental', b'bundle2.stream')],
1108 1111 )
1109 1112 coreconfigitem(
1110 1113 b'server', b'compressionengines', default=list,
1111 1114 )
1112 1115 coreconfigitem(
1113 1116 b'server', b'concurrent-push-mode', default=b'strict',
1114 1117 )
1115 1118 coreconfigitem(
1116 1119 b'server', b'disablefullbundle', default=False,
1117 1120 )
1118 1121 coreconfigitem(
1119 1122 b'server', b'maxhttpheaderlen', default=1024,
1120 1123 )
1121 1124 coreconfigitem(
1122 1125 b'server', b'pullbundle', default=False,
1123 1126 )
1124 1127 coreconfigitem(
1125 1128 b'server', b'preferuncompressed', default=False,
1126 1129 )
1127 1130 coreconfigitem(
1128 1131 b'server', b'streamunbundle', default=False,
1129 1132 )
1130 1133 coreconfigitem(
1131 1134 b'server', b'uncompressed', default=True,
1132 1135 )
1133 1136 coreconfigitem(
1134 1137 b'server', b'uncompressedallowsecret', default=False,
1135 1138 )
1136 1139 coreconfigitem(
1137 1140 b'server', b'view', default=b'served',
1138 1141 )
1139 1142 coreconfigitem(
1140 1143 b'server', b'validate', default=False,
1141 1144 )
1142 1145 coreconfigitem(
1143 1146 b'server', b'zliblevel', default=-1,
1144 1147 )
1145 1148 coreconfigitem(
1146 1149 b'server', b'zstdlevel', default=3,
1147 1150 )
1148 1151 coreconfigitem(
1149 1152 b'share', b'pool', default=None,
1150 1153 )
1151 1154 coreconfigitem(
1152 1155 b'share', b'poolnaming', default=b'identity',
1153 1156 )
1154 1157 coreconfigitem(
1155 1158 b'shelve', b'maxbackups', default=10,
1156 1159 )
1157 1160 coreconfigitem(
1158 1161 b'smtp', b'host', default=None,
1159 1162 )
1160 1163 coreconfigitem(
1161 1164 b'smtp', b'local_hostname', default=None,
1162 1165 )
1163 1166 coreconfigitem(
1164 1167 b'smtp', b'password', default=None,
1165 1168 )
1166 1169 coreconfigitem(
1167 1170 b'smtp', b'port', default=dynamicdefault,
1168 1171 )
1169 1172 coreconfigitem(
1170 1173 b'smtp', b'tls', default=b'none',
1171 1174 )
1172 1175 coreconfigitem(
1173 1176 b'smtp', b'username', default=None,
1174 1177 )
1175 1178 coreconfigitem(
1176 1179 b'sparse', b'missingwarning', default=True, experimental=True,
1177 1180 )
1178 1181 coreconfigitem(
1179 1182 b'subrepos',
1180 1183 b'allowed',
1181 1184 default=dynamicdefault, # to make backporting simpler
1182 1185 )
1183 1186 coreconfigitem(
1184 1187 b'subrepos', b'hg:allowed', default=dynamicdefault,
1185 1188 )
1186 1189 coreconfigitem(
1187 1190 b'subrepos', b'git:allowed', default=dynamicdefault,
1188 1191 )
1189 1192 coreconfigitem(
1190 1193 b'subrepos', b'svn:allowed', default=dynamicdefault,
1191 1194 )
1192 1195 coreconfigitem(
1193 1196 b'templates', b'.*', default=None, generic=True,
1194 1197 )
1195 1198 coreconfigitem(
1196 1199 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1197 1200 )
1198 1201 coreconfigitem(
1199 1202 b'trusted', b'groups', default=list,
1200 1203 )
1201 1204 coreconfigitem(
1202 1205 b'trusted', b'users', default=list,
1203 1206 )
1204 1207 coreconfigitem(
1205 1208 b'ui', b'_usedassubrepo', default=False,
1206 1209 )
1207 1210 coreconfigitem(
1208 1211 b'ui', b'allowemptycommit', default=False,
1209 1212 )
1210 1213 coreconfigitem(
1211 1214 b'ui', b'archivemeta', default=True,
1212 1215 )
1213 1216 coreconfigitem(
1214 1217 b'ui', b'askusername', default=False,
1215 1218 )
1216 1219 coreconfigitem(
1217 1220 b'ui', b'clonebundlefallback', default=False,
1218 1221 )
1219 1222 coreconfigitem(
1220 1223 b'ui', b'clonebundleprefers', default=list,
1221 1224 )
1222 1225 coreconfigitem(
1223 1226 b'ui', b'clonebundles', default=True,
1224 1227 )
1225 1228 coreconfigitem(
1226 1229 b'ui', b'color', default=b'auto',
1227 1230 )
1228 1231 coreconfigitem(
1229 1232 b'ui', b'commitsubrepos', default=False,
1230 1233 )
1231 1234 coreconfigitem(
1232 1235 b'ui', b'debug', default=False,
1233 1236 )
1234 1237 coreconfigitem(
1235 1238 b'ui', b'debugger', default=None,
1236 1239 )
1237 1240 coreconfigitem(
1238 1241 b'ui', b'editor', default=dynamicdefault,
1239 1242 )
1240 1243 coreconfigitem(
1241 1244 b'ui', b'fallbackencoding', default=None,
1242 1245 )
1243 1246 coreconfigitem(
1244 1247 b'ui', b'forcecwd', default=None,
1245 1248 )
1246 1249 coreconfigitem(
1247 1250 b'ui', b'forcemerge', default=None,
1248 1251 )
1249 1252 coreconfigitem(
1250 1253 b'ui', b'formatdebug', default=False,
1251 1254 )
1252 1255 coreconfigitem(
1253 1256 b'ui', b'formatjson', default=False,
1254 1257 )
1255 1258 coreconfigitem(
1256 1259 b'ui', b'formatted', default=None,
1257 1260 )
1258 1261 coreconfigitem(
1259 1262 b'ui', b'graphnodetemplate', default=None,
1260 1263 )
1261 1264 coreconfigitem(
1262 1265 b'ui', b'interactive', default=None,
1263 1266 )
1264 1267 coreconfigitem(
1265 1268 b'ui', b'interface', default=None,
1266 1269 )
1267 1270 coreconfigitem(
1268 1271 b'ui', b'interface.chunkselector', default=None,
1269 1272 )
1270 1273 coreconfigitem(
1271 1274 b'ui', b'large-file-limit', default=10000000,
1272 1275 )
1273 1276 coreconfigitem(
1274 1277 b'ui', b'logblockedtimes', default=False,
1275 1278 )
1276 1279 coreconfigitem(
1277 1280 b'ui', b'logtemplate', default=None,
1278 1281 )
1279 1282 coreconfigitem(
1280 1283 b'ui', b'merge', default=None,
1281 1284 )
1282 1285 coreconfigitem(
1283 1286 b'ui', b'mergemarkers', default=b'basic',
1284 1287 )
1285 1288 coreconfigitem(
1286 1289 b'ui',
1287 1290 b'mergemarkertemplate',
1288 1291 default=(
1289 1292 b'{node|short} '
1290 1293 b'{ifeq(tags, "tip", "", '
1291 1294 b'ifeq(tags, "", "", "{tags} "))}'
1292 1295 b'{if(bookmarks, "{bookmarks} ")}'
1293 1296 b'{ifeq(branch, "default", "", "{branch} ")}'
1294 1297 b'- {author|user}: {desc|firstline}'
1295 1298 ),
1296 1299 )
1297 1300 coreconfigitem(
1298 1301 b'ui', b'message-output', default=b'stdio',
1299 1302 )
1300 1303 coreconfigitem(
1301 1304 b'ui', b'nontty', default=False,
1302 1305 )
1303 1306 coreconfigitem(
1304 1307 b'ui', b'origbackuppath', default=None,
1305 1308 )
1306 1309 coreconfigitem(
1307 1310 b'ui', b'paginate', default=True,
1308 1311 )
1309 1312 coreconfigitem(
1310 1313 b'ui', b'patch', default=None,
1311 1314 )
1312 1315 coreconfigitem(
1313 1316 b'ui', b'pre-merge-tool-output-template', default=None,
1314 1317 )
1315 1318 coreconfigitem(
1316 1319 b'ui', b'portablefilenames', default=b'warn',
1317 1320 )
1318 1321 coreconfigitem(
1319 1322 b'ui', b'promptecho', default=False,
1320 1323 )
1321 1324 coreconfigitem(
1322 1325 b'ui', b'quiet', default=False,
1323 1326 )
1324 1327 coreconfigitem(
1325 1328 b'ui', b'quietbookmarkmove', default=False,
1326 1329 )
1327 1330 coreconfigitem(
1328 1331 b'ui', b'relative-paths', default=b'legacy',
1329 1332 )
1330 1333 coreconfigitem(
1331 1334 b'ui', b'remotecmd', default=b'hg',
1332 1335 )
1333 1336 coreconfigitem(
1334 1337 b'ui', b'report_untrusted', default=True,
1335 1338 )
1336 1339 coreconfigitem(
1337 1340 b'ui', b'rollback', default=True,
1338 1341 )
1339 1342 coreconfigitem(
1340 1343 b'ui', b'signal-safe-lock', default=True,
1341 1344 )
1342 1345 coreconfigitem(
1343 1346 b'ui', b'slash', default=False,
1344 1347 )
1345 1348 coreconfigitem(
1346 1349 b'ui', b'ssh', default=b'ssh',
1347 1350 )
1348 1351 coreconfigitem(
1349 1352 b'ui', b'ssherrorhint', default=None,
1350 1353 )
1351 1354 coreconfigitem(
1352 1355 b'ui', b'statuscopies', default=False,
1353 1356 )
1354 1357 coreconfigitem(
1355 1358 b'ui', b'strict', default=False,
1356 1359 )
1357 1360 coreconfigitem(
1358 1361 b'ui', b'style', default=b'',
1359 1362 )
1360 1363 coreconfigitem(
1361 1364 b'ui', b'supportcontact', default=None,
1362 1365 )
1363 1366 coreconfigitem(
1364 1367 b'ui', b'textwidth', default=78,
1365 1368 )
1366 1369 coreconfigitem(
1367 1370 b'ui', b'timeout', default=b'600',
1368 1371 )
1369 1372 coreconfigitem(
1370 1373 b'ui', b'timeout.warn', default=0,
1371 1374 )
1372 1375 coreconfigitem(
1373 1376 b'ui', b'traceback', default=False,
1374 1377 )
1375 1378 coreconfigitem(
1376 1379 b'ui', b'tweakdefaults', default=False,
1377 1380 )
1378 1381 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1379 1382 coreconfigitem(
1380 1383 b'ui', b'verbose', default=False,
1381 1384 )
1382 1385 coreconfigitem(
1383 1386 b'verify', b'skipflags', default=None,
1384 1387 )
1385 1388 coreconfigitem(
1386 1389 b'web', b'allowbz2', default=False,
1387 1390 )
1388 1391 coreconfigitem(
1389 1392 b'web', b'allowgz', default=False,
1390 1393 )
1391 1394 coreconfigitem(
1392 1395 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1393 1396 )
1394 1397 coreconfigitem(
1395 1398 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1396 1399 )
1397 1400 coreconfigitem(
1398 1401 b'web', b'allowzip', default=False,
1399 1402 )
1400 1403 coreconfigitem(
1401 1404 b'web', b'archivesubrepos', default=False,
1402 1405 )
1403 1406 coreconfigitem(
1404 1407 b'web', b'cache', default=True,
1405 1408 )
1406 1409 coreconfigitem(
1407 1410 b'web', b'comparisoncontext', default=5,
1408 1411 )
1409 1412 coreconfigitem(
1410 1413 b'web', b'contact', default=None,
1411 1414 )
1412 1415 coreconfigitem(
1413 1416 b'web', b'deny_push', default=list,
1414 1417 )
1415 1418 coreconfigitem(
1416 1419 b'web', b'guessmime', default=False,
1417 1420 )
1418 1421 coreconfigitem(
1419 1422 b'web', b'hidden', default=False,
1420 1423 )
1421 1424 coreconfigitem(
1422 1425 b'web', b'labels', default=list,
1423 1426 )
1424 1427 coreconfigitem(
1425 1428 b'web', b'logoimg', default=b'hglogo.png',
1426 1429 )
1427 1430 coreconfigitem(
1428 1431 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1429 1432 )
1430 1433 coreconfigitem(
1431 1434 b'web', b'accesslog', default=b'-',
1432 1435 )
1433 1436 coreconfigitem(
1434 1437 b'web', b'address', default=b'',
1435 1438 )
1436 1439 coreconfigitem(
1437 1440 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1438 1441 )
1439 1442 coreconfigitem(
1440 1443 b'web', b'allow_read', default=list,
1441 1444 )
1442 1445 coreconfigitem(
1443 1446 b'web', b'baseurl', default=None,
1444 1447 )
1445 1448 coreconfigitem(
1446 1449 b'web', b'cacerts', default=None,
1447 1450 )
1448 1451 coreconfigitem(
1449 1452 b'web', b'certificate', default=None,
1450 1453 )
1451 1454 coreconfigitem(
1452 1455 b'web', b'collapse', default=False,
1453 1456 )
1454 1457 coreconfigitem(
1455 1458 b'web', b'csp', default=None,
1456 1459 )
1457 1460 coreconfigitem(
1458 1461 b'web', b'deny_read', default=list,
1459 1462 )
1460 1463 coreconfigitem(
1461 1464 b'web', b'descend', default=True,
1462 1465 )
1463 1466 coreconfigitem(
1464 1467 b'web', b'description', default=b"",
1465 1468 )
1466 1469 coreconfigitem(
1467 1470 b'web', b'encoding', default=lambda: encoding.encoding,
1468 1471 )
1469 1472 coreconfigitem(
1470 1473 b'web', b'errorlog', default=b'-',
1471 1474 )
1472 1475 coreconfigitem(
1473 1476 b'web', b'ipv6', default=False,
1474 1477 )
1475 1478 coreconfigitem(
1476 1479 b'web', b'maxchanges', default=10,
1477 1480 )
1478 1481 coreconfigitem(
1479 1482 b'web', b'maxfiles', default=10,
1480 1483 )
1481 1484 coreconfigitem(
1482 1485 b'web', b'maxshortchanges', default=60,
1483 1486 )
1484 1487 coreconfigitem(
1485 1488 b'web', b'motd', default=b'',
1486 1489 )
1487 1490 coreconfigitem(
1488 1491 b'web', b'name', default=dynamicdefault,
1489 1492 )
1490 1493 coreconfigitem(
1491 1494 b'web', b'port', default=8000,
1492 1495 )
1493 1496 coreconfigitem(
1494 1497 b'web', b'prefix', default=b'',
1495 1498 )
1496 1499 coreconfigitem(
1497 1500 b'web', b'push_ssl', default=True,
1498 1501 )
1499 1502 coreconfigitem(
1500 1503 b'web', b'refreshinterval', default=20,
1501 1504 )
1502 1505 coreconfigitem(
1503 1506 b'web', b'server-header', default=None,
1504 1507 )
1505 1508 coreconfigitem(
1506 1509 b'web', b'static', default=None,
1507 1510 )
1508 1511 coreconfigitem(
1509 1512 b'web', b'staticurl', default=None,
1510 1513 )
1511 1514 coreconfigitem(
1512 1515 b'web', b'stripes', default=1,
1513 1516 )
1514 1517 coreconfigitem(
1515 1518 b'web', b'style', default=b'paper',
1516 1519 )
1517 1520 coreconfigitem(
1518 1521 b'web', b'templates', default=None,
1519 1522 )
1520 1523 coreconfigitem(
1521 1524 b'web', b'view', default=b'served', experimental=True,
1522 1525 )
1523 1526 coreconfigitem(
1524 1527 b'worker', b'backgroundclose', default=dynamicdefault,
1525 1528 )
1526 1529 # Windows defaults to a limit of 512 open files. A buffer of 128
1527 1530 # should give us enough headway.
1528 1531 coreconfigitem(
1529 1532 b'worker', b'backgroundclosemaxqueue', default=384,
1530 1533 )
1531 1534 coreconfigitem(
1532 1535 b'worker', b'backgroundcloseminfilecount', default=2048,
1533 1536 )
1534 1537 coreconfigitem(
1535 1538 b'worker', b'backgroundclosethreadcount', default=4,
1536 1539 )
1537 1540 coreconfigitem(
1538 1541 b'worker', b'enabled', default=True,
1539 1542 )
1540 1543 coreconfigitem(
1541 1544 b'worker', b'numcpus', default=None,
1542 1545 )
1543 1546
1544 1547 # Rebase related configuration moved to core because other extension are doing
1545 1548 # strange things. For example, shelve import the extensions to reuse some bit
1546 1549 # without formally loading it.
1547 1550 coreconfigitem(
1548 1551 b'commands', b'rebase.requiredest', default=False,
1549 1552 )
1550 1553 coreconfigitem(
1551 1554 b'experimental', b'rebaseskipobsolete', default=True,
1552 1555 )
1553 1556 coreconfigitem(
1554 1557 b'rebase', b'singletransaction', default=False,
1555 1558 )
1556 1559 coreconfigitem(
1557 1560 b'rebase', b'experimental.inmemory', default=False,
1558 1561 )
@@ -1,3789 +1,3791 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 context,
36 36 dirstate,
37 37 dirstateguard,
38 38 discovery,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 filelog,
44 44 hook,
45 45 lock as lockmod,
46 46 match as matchmod,
47 47 merge as mergemod,
48 48 mergeutil,
49 49 namespaces,
50 50 narrowspec,
51 51 obsolete,
52 52 pathutil,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 rcutil,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 hashutil,
78 78 procutil,
79 79 stringutil,
80 80 )
81 81
82 82 from .revlogutils import constants as revlogconst
83 83
84 84 release = lockmod.release
85 85 urlerr = util.urlerr
86 86 urlreq = util.urlreq
87 87
88 88 # set of (path, vfs-location) tuples. vfs-location is:
89 89 # - 'plain for vfs relative paths
90 90 # - '' for svfs relative paths
91 91 _cachedfiles = set()
92 92
93 93
94 94 class _basefilecache(scmutil.filecache):
95 95 """All filecache usage on repo are done for logic that should be unfiltered
96 96 """
97 97
98 98 def __get__(self, repo, type=None):
99 99 if repo is None:
100 100 return self
101 101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 102 unfi = repo.unfiltered()
103 103 try:
104 104 return unfi.__dict__[self.sname]
105 105 except KeyError:
106 106 pass
107 107 return super(_basefilecache, self).__get__(unfi, type)
108 108
109 109 def set(self, repo, value):
110 110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 111
112 112
113 113 class repofilecache(_basefilecache):
114 114 """filecache for files in .hg but outside of .hg/store"""
115 115
116 116 def __init__(self, *paths):
117 117 super(repofilecache, self).__init__(*paths)
118 118 for path in paths:
119 119 _cachedfiles.add((path, b'plain'))
120 120
121 121 def join(self, obj, fname):
122 122 return obj.vfs.join(fname)
123 123
124 124
125 125 class storecache(_basefilecache):
126 126 """filecache for files in the store"""
127 127
128 128 def __init__(self, *paths):
129 129 super(storecache, self).__init__(*paths)
130 130 for path in paths:
131 131 _cachedfiles.add((path, b''))
132 132
133 133 def join(self, obj, fname):
134 134 return obj.sjoin(fname)
135 135
136 136
137 137 class mixedrepostorecache(_basefilecache):
138 138 """filecache for a mix files in .hg/store and outside"""
139 139
140 140 def __init__(self, *pathsandlocations):
141 141 # scmutil.filecache only uses the path for passing back into our
142 142 # join(), so we can safely pass a list of paths and locations
143 143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 144 _cachedfiles.update(pathsandlocations)
145 145
146 146 def join(self, obj, fnameandlocation):
147 147 fname, location = fnameandlocation
148 148 if location == b'plain':
149 149 return obj.vfs.join(fname)
150 150 else:
151 151 if location != b'':
152 152 raise error.ProgrammingError(
153 153 b'unexpected location: %s' % location
154 154 )
155 155 return obj.sjoin(fname)
156 156
157 157
158 158 def isfilecached(repo, name):
159 159 """check if a repo has already cached "name" filecache-ed property
160 160
161 161 This returns (cachedobj-or-None, iscached) tuple.
162 162 """
163 163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 164 if not cacheentry:
165 165 return None, False
166 166 return cacheentry.obj, True
167 167
168 168
169 169 class unfilteredpropertycache(util.propertycache):
170 170 """propertycache that apply to unfiltered repo only"""
171 171
172 172 def __get__(self, repo, type=None):
173 173 unfi = repo.unfiltered()
174 174 if unfi is repo:
175 175 return super(unfilteredpropertycache, self).__get__(unfi)
176 176 return getattr(unfi, self.name)
177 177
178 178
179 179 class filteredpropertycache(util.propertycache):
180 180 """propertycache that must take filtering in account"""
181 181
182 182 def cachevalue(self, obj, value):
183 183 object.__setattr__(obj, self.name, value)
184 184
185 185
186 186 def hasunfilteredcache(repo, name):
187 187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 188 return name in vars(repo.unfiltered())
189 189
190 190
191 191 def unfilteredmethod(orig):
192 192 """decorate method that always need to be run on unfiltered version"""
193 193
194 194 def wrapper(repo, *args, **kwargs):
195 195 return orig(repo.unfiltered(), *args, **kwargs)
196 196
197 197 return wrapper
198 198
199 199
200 200 moderncaps = {
201 201 b'lookup',
202 202 b'branchmap',
203 203 b'pushkey',
204 204 b'known',
205 205 b'getbundle',
206 206 b'unbundle',
207 207 }
208 208 legacycaps = moderncaps.union({b'changegroupsubset'})
209 209
210 210
211 211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 212 class localcommandexecutor(object):
213 213 def __init__(self, peer):
214 214 self._peer = peer
215 215 self._sent = False
216 216 self._closed = False
217 217
218 218 def __enter__(self):
219 219 return self
220 220
221 221 def __exit__(self, exctype, excvalue, exctb):
222 222 self.close()
223 223
224 224 def callcommand(self, command, args):
225 225 if self._sent:
226 226 raise error.ProgrammingError(
227 227 b'callcommand() cannot be used after sendcommands()'
228 228 )
229 229
230 230 if self._closed:
231 231 raise error.ProgrammingError(
232 232 b'callcommand() cannot be used after close()'
233 233 )
234 234
235 235 # We don't need to support anything fancy. Just call the named
236 236 # method on the peer and return a resolved future.
237 237 fn = getattr(self._peer, pycompat.sysstr(command))
238 238
239 239 f = pycompat.futures.Future()
240 240
241 241 try:
242 242 result = fn(**pycompat.strkwargs(args))
243 243 except Exception:
244 244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 245 else:
246 246 f.set_result(result)
247 247
248 248 return f
249 249
250 250 def sendcommands(self):
251 251 self._sent = True
252 252
253 253 def close(self):
254 254 self._closed = True
255 255
256 256
257 257 @interfaceutil.implementer(repository.ipeercommands)
258 258 class localpeer(repository.peer):
259 259 '''peer for a local repo; reflects only the most recent API'''
260 260
261 261 def __init__(self, repo, caps=None):
262 262 super(localpeer, self).__init__()
263 263
264 264 if caps is None:
265 265 caps = moderncaps.copy()
266 266 self._repo = repo.filtered(b'served')
267 267 self.ui = repo.ui
268 268 self._caps = repo._restrictcapabilities(caps)
269 269
270 270 # Begin of _basepeer interface.
271 271
272 272 def url(self):
273 273 return self._repo.url()
274 274
275 275 def local(self):
276 276 return self._repo
277 277
278 278 def peer(self):
279 279 return self
280 280
281 281 def canpush(self):
282 282 return True
283 283
284 284 def close(self):
285 285 self._repo.close()
286 286
287 287 # End of _basepeer interface.
288 288
289 289 # Begin of _basewirecommands interface.
290 290
291 291 def branchmap(self):
292 292 return self._repo.branchmap()
293 293
294 294 def capabilities(self):
295 295 return self._caps
296 296
297 297 def clonebundles(self):
298 298 return self._repo.tryread(b'clonebundles.manifest')
299 299
300 300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 301 """Used to test argument passing over the wire"""
302 302 return b"%s %s %s %s %s" % (
303 303 one,
304 304 two,
305 305 pycompat.bytestr(three),
306 306 pycompat.bytestr(four),
307 307 pycompat.bytestr(five),
308 308 )
309 309
310 310 def getbundle(
311 311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 312 ):
313 313 chunks = exchange.getbundlechunks(
314 314 self._repo,
315 315 source,
316 316 heads=heads,
317 317 common=common,
318 318 bundlecaps=bundlecaps,
319 319 **kwargs
320 320 )[1]
321 321 cb = util.chunkbuffer(chunks)
322 322
323 323 if exchange.bundle2requested(bundlecaps):
324 324 # When requesting a bundle2, getbundle returns a stream to make the
325 325 # wire level function happier. We need to build a proper object
326 326 # from it in local peer.
327 327 return bundle2.getunbundler(self.ui, cb)
328 328 else:
329 329 return changegroup.getunbundler(b'01', cb, None)
330 330
331 331 def heads(self):
332 332 return self._repo.heads()
333 333
334 334 def known(self, nodes):
335 335 return self._repo.known(nodes)
336 336
337 337 def listkeys(self, namespace):
338 338 return self._repo.listkeys(namespace)
339 339
340 340 def lookup(self, key):
341 341 return self._repo.lookup(key)
342 342
343 343 def pushkey(self, namespace, key, old, new):
344 344 return self._repo.pushkey(namespace, key, old, new)
345 345
346 346 def stream_out(self):
347 347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 348
349 349 def unbundle(self, bundle, heads, url):
350 350 """apply a bundle on a repo
351 351
352 352 This function handles the repo locking itself."""
353 353 try:
354 354 try:
355 355 bundle = exchange.readbundle(self.ui, bundle, None)
356 356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 357 if util.safehasattr(ret, b'getchunks'):
358 358 # This is a bundle20 object, turn it into an unbundler.
359 359 # This little dance should be dropped eventually when the
360 360 # API is finally improved.
361 361 stream = util.chunkbuffer(ret.getchunks())
362 362 ret = bundle2.getunbundler(self.ui, stream)
363 363 return ret
364 364 except Exception as exc:
365 365 # If the exception contains output salvaged from a bundle2
366 366 # reply, we need to make sure it is printed before continuing
367 367 # to fail. So we build a bundle2 with such output and consume
368 368 # it directly.
369 369 #
370 370 # This is not very elegant but allows a "simple" solution for
371 371 # issue4594
372 372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 373 if output:
374 374 bundler = bundle2.bundle20(self._repo.ui)
375 375 for out in output:
376 376 bundler.addpart(out)
377 377 stream = util.chunkbuffer(bundler.getchunks())
378 378 b = bundle2.getunbundler(self.ui, stream)
379 379 bundle2.processbundle(self._repo, b)
380 380 raise
381 381 except error.PushRaced as exc:
382 382 raise error.ResponseError(
383 383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 384 )
385 385
386 386 # End of _basewirecommands interface.
387 387
388 388 # Begin of peer interface.
389 389
390 390 def commandexecutor(self):
391 391 return localcommandexecutor(self)
392 392
393 393 # End of peer interface.
394 394
395 395
396 396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 397 class locallegacypeer(localpeer):
398 398 '''peer extension which implements legacy methods too; used for tests with
399 399 restricted capabilities'''
400 400
401 401 def __init__(self, repo):
402 402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 403
404 404 # Begin of baselegacywirecommands interface.
405 405
406 406 def between(self, pairs):
407 407 return self._repo.between(pairs)
408 408
409 409 def branches(self, nodes):
410 410 return self._repo.branches(nodes)
411 411
412 412 def changegroup(self, nodes, source):
413 413 outgoing = discovery.outgoing(
414 414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 415 )
416 416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 417
418 418 def changegroupsubset(self, bases, heads, source):
419 419 outgoing = discovery.outgoing(
420 420 self._repo, missingroots=bases, missingheads=heads
421 421 )
422 422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 423
424 424 # End of baselegacywirecommands interface.
425 425
426 426
427 427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 428 # clients.
429 429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 430
431 431 # A repository with the sparserevlog feature will have delta chains that
432 432 # can spread over a larger span. Sparse reading cuts these large spans into
433 433 # pieces, so that each piece isn't too big.
434 434 # Without the sparserevlog capability, reading from the repository could use
435 435 # huge amounts of memory, because the whole span would be read at once,
436 436 # including all the intermediate revisions that aren't pertinent for the chain.
437 437 # This is why once a repository has enabled sparse-read, it becomes required.
438 438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 439
440 440 # A repository with the sidedataflag requirement will allow to store extra
441 441 # information for revision without altering their original hashes.
442 442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 443
444 444 # A repository with the the copies-sidedata-changeset requirement will store
445 445 # copies related information in changeset's sidedata.
446 446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 447
448 448 # Functions receiving (ui, features) that extensions can register to impact
449 449 # the ability to load repositories with custom requirements. Only
450 450 # functions defined in loaded extensions are called.
451 451 #
452 452 # The function receives a set of requirement strings that the repository
453 453 # is capable of opening. Functions will typically add elements to the
454 454 # set to reflect that the extension knows how to handle that requirements.
455 455 featuresetupfuncs = set()
456 456
457 457
458 458 def makelocalrepository(baseui, path, intents=None):
459 459 """Create a local repository object.
460 460
461 461 Given arguments needed to construct a local repository, this function
462 462 performs various early repository loading functionality (such as
463 463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 464 the repository can be opened, derives a type suitable for representing
465 465 that repository, and returns an instance of it.
466 466
467 467 The returned object conforms to the ``repository.completelocalrepository``
468 468 interface.
469 469
470 470 The repository type is derived by calling a series of factory functions
471 471 for each aspect/interface of the final repository. These are defined by
472 472 ``REPO_INTERFACES``.
473 473
474 474 Each factory function is called to produce a type implementing a specific
475 475 interface. The cumulative list of returned types will be combined into a
476 476 new type and that type will be instantiated to represent the local
477 477 repository.
478 478
479 479 The factory functions each receive various state that may be consulted
480 480 as part of deriving a type.
481 481
482 482 Extensions should wrap these factory functions to customize repository type
483 483 creation. Note that an extension's wrapped function may be called even if
484 484 that extension is not loaded for the repo being constructed. Extensions
485 485 should check if their ``__name__`` appears in the
486 486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 487 not.
488 488 """
489 489 ui = baseui.copy()
490 490 # Prevent copying repo configuration.
491 491 ui.copy = baseui.copy
492 492
493 493 # Working directory VFS rooted at repository root.
494 494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495 495
496 496 # Main VFS for .hg/ directory.
497 497 hgpath = wdirvfs.join(b'.hg')
498 498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499 499
500 500 # The .hg/ path should exist and should be a directory. All other
501 501 # cases are errors.
502 502 if not hgvfs.isdir():
503 503 try:
504 504 hgvfs.stat()
505 505 except OSError as e:
506 506 if e.errno != errno.ENOENT:
507 507 raise
508 508
509 509 raise error.RepoError(_(b'repository %s not found') % path)
510 510
511 511 # .hg/requires file contains a newline-delimited list of
512 512 # features/capabilities the opener (us) must have in order to use
513 513 # the repository. This file was introduced in Mercurial 0.9.2,
514 514 # which means very old repositories may not have one. We assume
515 515 # a missing file translates to no requirements.
516 516 try:
517 517 requirements = set(hgvfs.read(b'requires').splitlines())
518 518 except IOError as e:
519 519 if e.errno != errno.ENOENT:
520 520 raise
521 521 requirements = set()
522 522
523 523 # The .hg/hgrc file may load extensions or contain config options
524 524 # that influence repository construction. Attempt to load it and
525 525 # process any new extensions that it may have pulled in.
526 526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 528 extensions.loadall(ui)
529 529 extensions.populateui(ui)
530 530
531 531 # Set of module names of extensions loaded for this repository.
532 532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533 533
534 534 supportedrequirements = gathersupportedrequirements(ui)
535 535
536 536 # We first validate the requirements are known.
537 537 ensurerequirementsrecognized(requirements, supportedrequirements)
538 538
539 539 # Then we validate that the known set is reasonable to use together.
540 540 ensurerequirementscompatible(ui, requirements)
541 541
542 542 # TODO there are unhandled edge cases related to opening repositories with
543 543 # shared storage. If storage is shared, we should also test for requirements
544 544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 545 # that repo, as that repo may load extensions needed to open it. This is a
546 546 # bit complicated because we don't want the other hgrc to overwrite settings
547 547 # in this hgrc.
548 548 #
549 549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 550 # file when sharing repos. But if a requirement is added after the share is
551 551 # performed, thereby introducing a new requirement for the opener, we may
552 552 # will not see that and could encounter a run-time error interacting with
553 553 # that shared store since it has an unknown-to-us requirement.
554 554
555 555 # At this point, we know we should be capable of opening the repository.
556 556 # Now get on with doing that.
557 557
558 558 features = set()
559 559
560 560 # The "store" part of the repository holds versioned data. How it is
561 561 # accessed is determined by various requirements. The ``shared`` or
562 562 # ``relshared`` requirements indicate the store lives in the path contained
563 563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 565 if b'shared' in requirements or b'relshared' in requirements:
566 566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 567 if b'relshared' in requirements:
568 568 sharedpath = hgvfs.join(sharedpath)
569 569
570 570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571 571
572 572 if not sharedvfs.exists():
573 573 raise error.RepoError(
574 574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 575 % sharedvfs.base
576 576 )
577 577
578 578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579 579
580 580 storebasepath = sharedvfs.base
581 581 cachepath = sharedvfs.join(b'cache')
582 582 else:
583 583 storebasepath = hgvfs.base
584 584 cachepath = hgvfs.join(b'cache')
585 585 wcachepath = hgvfs.join(b'wcache')
586 586
587 587 # The store has changed over time and the exact layout is dictated by
588 588 # requirements. The store interface abstracts differences across all
589 589 # of them.
590 590 store = makestore(
591 591 requirements,
592 592 storebasepath,
593 593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 594 )
595 595 hgvfs.createmode = store.createmode
596 596
597 597 storevfs = store.vfs
598 598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599 599
600 600 # The cache vfs is used to manage cache files.
601 601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 602 cachevfs.createmode = store.createmode
603 603 # The cache vfs is used to manage cache files related to the working copy
604 604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 605 wcachevfs.createmode = store.createmode
606 606
607 607 # Now resolve the type for the repository object. We do this by repeatedly
608 608 # calling a factory function to produces types for specific aspects of the
609 609 # repo's operation. The aggregate returned types are used as base classes
610 610 # for a dynamically-derived type, which will represent our new repository.
611 611
612 612 bases = []
613 613 extrastate = {}
614 614
615 615 for iface, fn in REPO_INTERFACES:
616 616 # We pass all potentially useful state to give extensions tons of
617 617 # flexibility.
618 618 typ = fn()(
619 619 ui=ui,
620 620 intents=intents,
621 621 requirements=requirements,
622 622 features=features,
623 623 wdirvfs=wdirvfs,
624 624 hgvfs=hgvfs,
625 625 store=store,
626 626 storevfs=storevfs,
627 627 storeoptions=storevfs.options,
628 628 cachevfs=cachevfs,
629 629 wcachevfs=wcachevfs,
630 630 extensionmodulenames=extensionmodulenames,
631 631 extrastate=extrastate,
632 632 baseclasses=bases,
633 633 )
634 634
635 635 if not isinstance(typ, type):
636 636 raise error.ProgrammingError(
637 637 b'unable to construct type for %s' % iface
638 638 )
639 639
640 640 bases.append(typ)
641 641
642 642 # type() allows you to use characters in type names that wouldn't be
643 643 # recognized as Python symbols in source code. We abuse that to add
644 644 # rich information about our constructed repo.
645 645 name = pycompat.sysstr(
646 646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 647 )
648 648
649 649 cls = type(name, tuple(bases), {})
650 650
651 651 return cls(
652 652 baseui=baseui,
653 653 ui=ui,
654 654 origroot=path,
655 655 wdirvfs=wdirvfs,
656 656 hgvfs=hgvfs,
657 657 requirements=requirements,
658 658 supportedrequirements=supportedrequirements,
659 659 sharedpath=storebasepath,
660 660 store=store,
661 661 cachevfs=cachevfs,
662 662 wcachevfs=wcachevfs,
663 663 features=features,
664 664 intents=intents,
665 665 )
666 666
667 667
668 668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 669 """Load hgrc files/content into a ui instance.
670 670
671 671 This is called during repository opening to load any additional
672 672 config files or settings relevant to the current repository.
673 673
674 674 Returns a bool indicating whether any additional configs were loaded.
675 675
676 676 Extensions should monkeypatch this function to modify how per-repo
677 677 configs are loaded. For example, an extension may wish to pull in
678 678 configs from alternate files or sources.
679 679 """
680 680 if not rcutil.use_repo_hgrc():
681 681 return False
682 682 try:
683 683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 684 return True
685 685 except IOError:
686 686 return False
687 687
688 688
689 689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 690 """Perform additional actions after .hg/hgrc is loaded.
691 691
692 692 This function is called during repository loading immediately after
693 693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694 694
695 695 The function can be used to validate configs, automatically add
696 696 options (including extensions) based on requirements, etc.
697 697 """
698 698
699 699 # Map of requirements to list of extensions to load automatically when
700 700 # requirement is present.
701 701 autoextensions = {
702 702 b'largefiles': [b'largefiles'],
703 703 b'lfs': [b'lfs'],
704 704 }
705 705
706 706 for requirement, names in sorted(autoextensions.items()):
707 707 if requirement not in requirements:
708 708 continue
709 709
710 710 for name in names:
711 711 if not ui.hasconfig(b'extensions', name):
712 712 ui.setconfig(b'extensions', name, b'', source=b'autoload')
713 713
714 714
715 715 def gathersupportedrequirements(ui):
716 716 """Determine the complete set of recognized requirements."""
717 717 # Start with all requirements supported by this file.
718 718 supported = set(localrepository._basesupported)
719 719
720 720 # Execute ``featuresetupfuncs`` entries if they belong to an extension
721 721 # relevant to this ui instance.
722 722 modules = {m.__name__ for n, m in extensions.extensions(ui)}
723 723
724 724 for fn in featuresetupfuncs:
725 725 if fn.__module__ in modules:
726 726 fn(ui, supported)
727 727
728 728 # Add derived requirements from registered compression engines.
729 729 for name in util.compengines:
730 730 engine = util.compengines[name]
731 731 if engine.available() and engine.revlogheader():
732 732 supported.add(b'exp-compression-%s' % name)
733 733 if engine.name() == b'zstd':
734 734 supported.add(b'revlog-compression-zstd')
735 735
736 736 return supported
737 737
738 738
739 739 def ensurerequirementsrecognized(requirements, supported):
740 740 """Validate that a set of local requirements is recognized.
741 741
742 742 Receives a set of requirements. Raises an ``error.RepoError`` if there
743 743 exists any requirement in that set that currently loaded code doesn't
744 744 recognize.
745 745
746 746 Returns a set of supported requirements.
747 747 """
748 748 missing = set()
749 749
750 750 for requirement in requirements:
751 751 if requirement in supported:
752 752 continue
753 753
754 754 if not requirement or not requirement[0:1].isalnum():
755 755 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
756 756
757 757 missing.add(requirement)
758 758
759 759 if missing:
760 760 raise error.RequirementError(
761 761 _(b'repository requires features unknown to this Mercurial: %s')
762 762 % b' '.join(sorted(missing)),
763 763 hint=_(
764 764 b'see https://mercurial-scm.org/wiki/MissingRequirement '
765 765 b'for more information'
766 766 ),
767 767 )
768 768
769 769
770 770 def ensurerequirementscompatible(ui, requirements):
771 771 """Validates that a set of recognized requirements is mutually compatible.
772 772
773 773 Some requirements may not be compatible with others or require
774 774 config options that aren't enabled. This function is called during
775 775 repository opening to ensure that the set of requirements needed
776 776 to open a repository is sane and compatible with config options.
777 777
778 778 Extensions can monkeypatch this function to perform additional
779 779 checking.
780 780
781 781 ``error.RepoError`` should be raised on failure.
782 782 """
783 783 if b'exp-sparse' in requirements and not sparse.enabled:
784 784 raise error.RepoError(
785 785 _(
786 786 b'repository is using sparse feature but '
787 787 b'sparse is not enabled; enable the '
788 788 b'"sparse" extensions to access'
789 789 )
790 790 )
791 791
792 792
793 793 def makestore(requirements, path, vfstype):
794 794 """Construct a storage object for a repository."""
795 795 if b'store' in requirements:
796 796 if b'fncache' in requirements:
797 797 return storemod.fncachestore(
798 798 path, vfstype, b'dotencode' in requirements
799 799 )
800 800
801 801 return storemod.encodedstore(path, vfstype)
802 802
803 803 return storemod.basicstore(path, vfstype)
804 804
805 805
806 806 def resolvestorevfsoptions(ui, requirements, features):
807 807 """Resolve the options to pass to the store vfs opener.
808 808
809 809 The returned dict is used to influence behavior of the storage layer.
810 810 """
811 811 options = {}
812 812
813 813 if b'treemanifest' in requirements:
814 814 options[b'treemanifest'] = True
815 815
816 816 # experimental config: format.manifestcachesize
817 817 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
818 818 if manifestcachesize is not None:
819 819 options[b'manifestcachesize'] = manifestcachesize
820 820
821 821 # In the absence of another requirement superseding a revlog-related
822 822 # requirement, we have to assume the repo is using revlog version 0.
823 823 # This revlog format is super old and we don't bother trying to parse
824 824 # opener options for it because those options wouldn't do anything
825 825 # meaningful on such old repos.
826 826 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
827 827 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
828 828 else: # explicitly mark repo as using revlogv0
829 829 options[b'revlogv0'] = True
830 830
831 831 if COPIESSDC_REQUIREMENT in requirements:
832 832 options[b'copies-storage'] = b'changeset-sidedata'
833 833 else:
834 834 writecopiesto = ui.config(b'experimental', b'copies.write-to')
835 835 copiesextramode = (b'changeset-only', b'compatibility')
836 836 if writecopiesto in copiesextramode:
837 837 options[b'copies-storage'] = b'extra'
838 838
839 839 return options
840 840
841 841
842 842 def resolverevlogstorevfsoptions(ui, requirements, features):
843 843 """Resolve opener options specific to revlogs."""
844 844
845 845 options = {}
846 846 options[b'flagprocessors'] = {}
847 847
848 848 if b'revlogv1' in requirements:
849 849 options[b'revlogv1'] = True
850 850 if REVLOGV2_REQUIREMENT in requirements:
851 851 options[b'revlogv2'] = True
852 852
853 853 if b'generaldelta' in requirements:
854 854 options[b'generaldelta'] = True
855 855
856 856 # experimental config: format.chunkcachesize
857 857 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
858 858 if chunkcachesize is not None:
859 859 options[b'chunkcachesize'] = chunkcachesize
860 860
861 861 deltabothparents = ui.configbool(
862 862 b'storage', b'revlog.optimize-delta-parent-choice'
863 863 )
864 864 options[b'deltabothparents'] = deltabothparents
865 865
866 866 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
867 867 lazydeltabase = False
868 868 if lazydelta:
869 869 lazydeltabase = ui.configbool(
870 870 b'storage', b'revlog.reuse-external-delta-parent'
871 871 )
872 872 if lazydeltabase is None:
873 873 lazydeltabase = not scmutil.gddeltaconfig(ui)
874 874 options[b'lazydelta'] = lazydelta
875 875 options[b'lazydeltabase'] = lazydeltabase
876 876
877 877 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
878 878 if 0 <= chainspan:
879 879 options[b'maxdeltachainspan'] = chainspan
880 880
881 881 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
882 882 if mmapindexthreshold is not None:
883 883 options[b'mmapindexthreshold'] = mmapindexthreshold
884 884
885 885 withsparseread = ui.configbool(b'experimental', b'sparse-read')
886 886 srdensitythres = float(
887 887 ui.config(b'experimental', b'sparse-read.density-threshold')
888 888 )
889 889 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
890 890 options[b'with-sparse-read'] = withsparseread
891 891 options[b'sparse-read-density-threshold'] = srdensitythres
892 892 options[b'sparse-read-min-gap-size'] = srmingapsize
893 893
894 894 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
895 895 options[b'sparse-revlog'] = sparserevlog
896 896 if sparserevlog:
897 897 options[b'generaldelta'] = True
898 898
899 899 sidedata = SIDEDATA_REQUIREMENT in requirements
900 900 options[b'side-data'] = sidedata
901 901
902 902 maxchainlen = None
903 903 if sparserevlog:
904 904 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
905 905 # experimental config: format.maxchainlen
906 906 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
907 907 if maxchainlen is not None:
908 908 options[b'maxchainlen'] = maxchainlen
909 909
910 910 for r in requirements:
911 911 # we allow multiple compression engine requirement to co-exist because
912 912 # strickly speaking, revlog seems to support mixed compression style.
913 913 #
914 914 # The compression used for new entries will be "the last one"
915 915 prefix = r.startswith
916 916 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
917 917 options[b'compengine'] = r.split(b'-', 2)[2]
918 918
919 919 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
920 920 if options[b'zlib.level'] is not None:
921 921 if not (0 <= options[b'zlib.level'] <= 9):
922 922 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
923 923 raise error.Abort(msg % options[b'zlib.level'])
924 924 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
925 925 if options[b'zstd.level'] is not None:
926 926 if not (0 <= options[b'zstd.level'] <= 22):
927 927 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
928 928 raise error.Abort(msg % options[b'zstd.level'])
929 929
930 930 if repository.NARROW_REQUIREMENT in requirements:
931 931 options[b'enableellipsis'] = True
932 932
933 933 if ui.configbool(b'experimental', b'rust.index'):
934 934 options[b'rust.index'] = True
935 935 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
936 936 options[b'exp-persistent-nodemap'] = True
937 if ui.configbool(b'devel', b'persistent-nodemap'):
938 options[b'devel-force-nodemap'] = True
937 939
938 940 return options
939 941
940 942
941 943 def makemain(**kwargs):
942 944 """Produce a type conforming to ``ilocalrepositorymain``."""
943 945 return localrepository
944 946
945 947
946 948 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
947 949 class revlogfilestorage(object):
948 950 """File storage when using revlogs."""
949 951
950 952 def file(self, path):
951 953 if path[0] == b'/':
952 954 path = path[1:]
953 955
954 956 return filelog.filelog(self.svfs, path)
955 957
956 958
957 959 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
958 960 class revlognarrowfilestorage(object):
959 961 """File storage when using revlogs and narrow files."""
960 962
961 963 def file(self, path):
962 964 if path[0] == b'/':
963 965 path = path[1:]
964 966
965 967 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
966 968
967 969
968 970 def makefilestorage(requirements, features, **kwargs):
969 971 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
970 972 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
971 973 features.add(repository.REPO_FEATURE_STREAM_CLONE)
972 974
973 975 if repository.NARROW_REQUIREMENT in requirements:
974 976 return revlognarrowfilestorage
975 977 else:
976 978 return revlogfilestorage
977 979
978 980
979 981 # List of repository interfaces and factory functions for them. Each
980 982 # will be called in order during ``makelocalrepository()`` to iteratively
981 983 # derive the final type for a local repository instance. We capture the
982 984 # function as a lambda so we don't hold a reference and the module-level
983 985 # functions can be wrapped.
984 986 REPO_INTERFACES = [
985 987 (repository.ilocalrepositorymain, lambda: makemain),
986 988 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
987 989 ]
988 990
989 991
990 992 @interfaceutil.implementer(repository.ilocalrepositorymain)
991 993 class localrepository(object):
992 994 """Main class for representing local repositories.
993 995
994 996 All local repositories are instances of this class.
995 997
996 998 Constructed on its own, instances of this class are not usable as
997 999 repository objects. To obtain a usable repository object, call
998 1000 ``hg.repository()``, ``localrepo.instance()``, or
999 1001 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1000 1002 ``instance()`` adds support for creating new repositories.
1001 1003 ``hg.repository()`` adds more extension integration, including calling
1002 1004 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1003 1005 used.
1004 1006 """
1005 1007
1006 1008 # obsolete experimental requirements:
1007 1009 # - manifestv2: An experimental new manifest format that allowed
1008 1010 # for stem compression of long paths. Experiment ended up not
1009 1011 # being successful (repository sizes went up due to worse delta
1010 1012 # chains), and the code was deleted in 4.6.
1011 1013 supportedformats = {
1012 1014 b'revlogv1',
1013 1015 b'generaldelta',
1014 1016 b'treemanifest',
1015 1017 COPIESSDC_REQUIREMENT,
1016 1018 REVLOGV2_REQUIREMENT,
1017 1019 SIDEDATA_REQUIREMENT,
1018 1020 SPARSEREVLOG_REQUIREMENT,
1019 1021 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1020 1022 }
1021 1023 _basesupported = supportedformats | {
1022 1024 b'store',
1023 1025 b'fncache',
1024 1026 b'shared',
1025 1027 b'relshared',
1026 1028 b'dotencode',
1027 1029 b'exp-sparse',
1028 1030 b'internal-phase',
1029 1031 }
1030 1032
1031 1033 # list of prefix for file which can be written without 'wlock'
1032 1034 # Extensions should extend this list when needed
1033 1035 _wlockfreeprefix = {
1034 1036 # We migh consider requiring 'wlock' for the next
1035 1037 # two, but pretty much all the existing code assume
1036 1038 # wlock is not needed so we keep them excluded for
1037 1039 # now.
1038 1040 b'hgrc',
1039 1041 b'requires',
1040 1042 # XXX cache is a complicatged business someone
1041 1043 # should investigate this in depth at some point
1042 1044 b'cache/',
1043 1045 # XXX shouldn't be dirstate covered by the wlock?
1044 1046 b'dirstate',
1045 1047 # XXX bisect was still a bit too messy at the time
1046 1048 # this changeset was introduced. Someone should fix
1047 1049 # the remainig bit and drop this line
1048 1050 b'bisect.state',
1049 1051 }
1050 1052
1051 1053 def __init__(
1052 1054 self,
1053 1055 baseui,
1054 1056 ui,
1055 1057 origroot,
1056 1058 wdirvfs,
1057 1059 hgvfs,
1058 1060 requirements,
1059 1061 supportedrequirements,
1060 1062 sharedpath,
1061 1063 store,
1062 1064 cachevfs,
1063 1065 wcachevfs,
1064 1066 features,
1065 1067 intents=None,
1066 1068 ):
1067 1069 """Create a new local repository instance.
1068 1070
1069 1071 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1070 1072 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1071 1073 object.
1072 1074
1073 1075 Arguments:
1074 1076
1075 1077 baseui
1076 1078 ``ui.ui`` instance that ``ui`` argument was based off of.
1077 1079
1078 1080 ui
1079 1081 ``ui.ui`` instance for use by the repository.
1080 1082
1081 1083 origroot
1082 1084 ``bytes`` path to working directory root of this repository.
1083 1085
1084 1086 wdirvfs
1085 1087 ``vfs.vfs`` rooted at the working directory.
1086 1088
1087 1089 hgvfs
1088 1090 ``vfs.vfs`` rooted at .hg/
1089 1091
1090 1092 requirements
1091 1093 ``set`` of bytestrings representing repository opening requirements.
1092 1094
1093 1095 supportedrequirements
1094 1096 ``set`` of bytestrings representing repository requirements that we
1095 1097 know how to open. May be a supetset of ``requirements``.
1096 1098
1097 1099 sharedpath
1098 1100 ``bytes`` Defining path to storage base directory. Points to a
1099 1101 ``.hg/`` directory somewhere.
1100 1102
1101 1103 store
1102 1104 ``store.basicstore`` (or derived) instance providing access to
1103 1105 versioned storage.
1104 1106
1105 1107 cachevfs
1106 1108 ``vfs.vfs`` used for cache files.
1107 1109
1108 1110 wcachevfs
1109 1111 ``vfs.vfs`` used for cache files related to the working copy.
1110 1112
1111 1113 features
1112 1114 ``set`` of bytestrings defining features/capabilities of this
1113 1115 instance.
1114 1116
1115 1117 intents
1116 1118 ``set`` of system strings indicating what this repo will be used
1117 1119 for.
1118 1120 """
1119 1121 self.baseui = baseui
1120 1122 self.ui = ui
1121 1123 self.origroot = origroot
1122 1124 # vfs rooted at working directory.
1123 1125 self.wvfs = wdirvfs
1124 1126 self.root = wdirvfs.base
1125 1127 # vfs rooted at .hg/. Used to access most non-store paths.
1126 1128 self.vfs = hgvfs
1127 1129 self.path = hgvfs.base
1128 1130 self.requirements = requirements
1129 1131 self.supported = supportedrequirements
1130 1132 self.sharedpath = sharedpath
1131 1133 self.store = store
1132 1134 self.cachevfs = cachevfs
1133 1135 self.wcachevfs = wcachevfs
1134 1136 self.features = features
1135 1137
1136 1138 self.filtername = None
1137 1139
1138 1140 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1139 1141 b'devel', b'check-locks'
1140 1142 ):
1141 1143 self.vfs.audit = self._getvfsward(self.vfs.audit)
1142 1144 # A list of callback to shape the phase if no data were found.
1143 1145 # Callback are in the form: func(repo, roots) --> processed root.
1144 1146 # This list it to be filled by extension during repo setup
1145 1147 self._phasedefaults = []
1146 1148
1147 1149 color.setup(self.ui)
1148 1150
1149 1151 self.spath = self.store.path
1150 1152 self.svfs = self.store.vfs
1151 1153 self.sjoin = self.store.join
1152 1154 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1153 1155 b'devel', b'check-locks'
1154 1156 ):
1155 1157 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1156 1158 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1157 1159 else: # standard vfs
1158 1160 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1159 1161
1160 1162 self._dirstatevalidatewarned = False
1161 1163
1162 1164 self._branchcaches = branchmap.BranchMapCache()
1163 1165 self._revbranchcache = None
1164 1166 self._filterpats = {}
1165 1167 self._datafilters = {}
1166 1168 self._transref = self._lockref = self._wlockref = None
1167 1169
1168 1170 # A cache for various files under .hg/ that tracks file changes,
1169 1171 # (used by the filecache decorator)
1170 1172 #
1171 1173 # Maps a property name to its util.filecacheentry
1172 1174 self._filecache = {}
1173 1175
1174 1176 # hold sets of revision to be filtered
1175 1177 # should be cleared when something might have changed the filter value:
1176 1178 # - new changesets,
1177 1179 # - phase change,
1178 1180 # - new obsolescence marker,
1179 1181 # - working directory parent change,
1180 1182 # - bookmark changes
1181 1183 self.filteredrevcache = {}
1182 1184
1183 1185 # post-dirstate-status hooks
1184 1186 self._postdsstatus = []
1185 1187
1186 1188 # generic mapping between names and nodes
1187 1189 self.names = namespaces.namespaces()
1188 1190
1189 1191 # Key to signature value.
1190 1192 self._sparsesignaturecache = {}
1191 1193 # Signature to cached matcher instance.
1192 1194 self._sparsematchercache = {}
1193 1195
1194 1196 self._extrafilterid = repoview.extrafilter(ui)
1195 1197
1196 1198 self.filecopiesmode = None
1197 1199 if COPIESSDC_REQUIREMENT in self.requirements:
1198 1200 self.filecopiesmode = b'changeset-sidedata'
1199 1201
1200 1202 def _getvfsward(self, origfunc):
1201 1203 """build a ward for self.vfs"""
1202 1204 rref = weakref.ref(self)
1203 1205
1204 1206 def checkvfs(path, mode=None):
1205 1207 ret = origfunc(path, mode=mode)
1206 1208 repo = rref()
1207 1209 if (
1208 1210 repo is None
1209 1211 or not util.safehasattr(repo, b'_wlockref')
1210 1212 or not util.safehasattr(repo, b'_lockref')
1211 1213 ):
1212 1214 return
1213 1215 if mode in (None, b'r', b'rb'):
1214 1216 return
1215 1217 if path.startswith(repo.path):
1216 1218 # truncate name relative to the repository (.hg)
1217 1219 path = path[len(repo.path) + 1 :]
1218 1220 if path.startswith(b'cache/'):
1219 1221 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1220 1222 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1221 1223 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1222 1224 # journal is covered by 'lock'
1223 1225 if repo._currentlock(repo._lockref) is None:
1224 1226 repo.ui.develwarn(
1225 1227 b'write with no lock: "%s"' % path,
1226 1228 stacklevel=3,
1227 1229 config=b'check-locks',
1228 1230 )
1229 1231 elif repo._currentlock(repo._wlockref) is None:
1230 1232 # rest of vfs files are covered by 'wlock'
1231 1233 #
1232 1234 # exclude special files
1233 1235 for prefix in self._wlockfreeprefix:
1234 1236 if path.startswith(prefix):
1235 1237 return
1236 1238 repo.ui.develwarn(
1237 1239 b'write with no wlock: "%s"' % path,
1238 1240 stacklevel=3,
1239 1241 config=b'check-locks',
1240 1242 )
1241 1243 return ret
1242 1244
1243 1245 return checkvfs
1244 1246
1245 1247 def _getsvfsward(self, origfunc):
1246 1248 """build a ward for self.svfs"""
1247 1249 rref = weakref.ref(self)
1248 1250
1249 1251 def checksvfs(path, mode=None):
1250 1252 ret = origfunc(path, mode=mode)
1251 1253 repo = rref()
1252 1254 if repo is None or not util.safehasattr(repo, b'_lockref'):
1253 1255 return
1254 1256 if mode in (None, b'r', b'rb'):
1255 1257 return
1256 1258 if path.startswith(repo.sharedpath):
1257 1259 # truncate name relative to the repository (.hg)
1258 1260 path = path[len(repo.sharedpath) + 1 :]
1259 1261 if repo._currentlock(repo._lockref) is None:
1260 1262 repo.ui.develwarn(
1261 1263 b'write with no lock: "%s"' % path, stacklevel=4
1262 1264 )
1263 1265 return ret
1264 1266
1265 1267 return checksvfs
1266 1268
1267 1269 def close(self):
1268 1270 self._writecaches()
1269 1271
1270 1272 def _writecaches(self):
1271 1273 if self._revbranchcache:
1272 1274 self._revbranchcache.write()
1273 1275
1274 1276 def _restrictcapabilities(self, caps):
1275 1277 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1276 1278 caps = set(caps)
1277 1279 capsblob = bundle2.encodecaps(
1278 1280 bundle2.getrepocaps(self, role=b'client')
1279 1281 )
1280 1282 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1281 1283 return caps
1282 1284
1283 1285 def _writerequirements(self):
1284 1286 scmutil.writerequires(self.vfs, self.requirements)
1285 1287
1286 1288 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1287 1289 # self -> auditor -> self._checknested -> self
1288 1290
1289 1291 @property
1290 1292 def auditor(self):
1291 1293 # This is only used by context.workingctx.match in order to
1292 1294 # detect files in subrepos.
1293 1295 return pathutil.pathauditor(self.root, callback=self._checknested)
1294 1296
1295 1297 @property
1296 1298 def nofsauditor(self):
1297 1299 # This is only used by context.basectx.match in order to detect
1298 1300 # files in subrepos.
1299 1301 return pathutil.pathauditor(
1300 1302 self.root, callback=self._checknested, realfs=False, cached=True
1301 1303 )
1302 1304
1303 1305 def _checknested(self, path):
1304 1306 """Determine if path is a legal nested repository."""
1305 1307 if not path.startswith(self.root):
1306 1308 return False
1307 1309 subpath = path[len(self.root) + 1 :]
1308 1310 normsubpath = util.pconvert(subpath)
1309 1311
1310 1312 # XXX: Checking against the current working copy is wrong in
1311 1313 # the sense that it can reject things like
1312 1314 #
1313 1315 # $ hg cat -r 10 sub/x.txt
1314 1316 #
1315 1317 # if sub/ is no longer a subrepository in the working copy
1316 1318 # parent revision.
1317 1319 #
1318 1320 # However, it can of course also allow things that would have
1319 1321 # been rejected before, such as the above cat command if sub/
1320 1322 # is a subrepository now, but was a normal directory before.
1321 1323 # The old path auditor would have rejected by mistake since it
1322 1324 # panics when it sees sub/.hg/.
1323 1325 #
1324 1326 # All in all, checking against the working copy seems sensible
1325 1327 # since we want to prevent access to nested repositories on
1326 1328 # the filesystem *now*.
1327 1329 ctx = self[None]
1328 1330 parts = util.splitpath(subpath)
1329 1331 while parts:
1330 1332 prefix = b'/'.join(parts)
1331 1333 if prefix in ctx.substate:
1332 1334 if prefix == normsubpath:
1333 1335 return True
1334 1336 else:
1335 1337 sub = ctx.sub(prefix)
1336 1338 return sub.checknested(subpath[len(prefix) + 1 :])
1337 1339 else:
1338 1340 parts.pop()
1339 1341 return False
1340 1342
1341 1343 def peer(self):
1342 1344 return localpeer(self) # not cached to avoid reference cycle
1343 1345
1344 1346 def unfiltered(self):
1345 1347 """Return unfiltered version of the repository
1346 1348
1347 1349 Intended to be overwritten by filtered repo."""
1348 1350 return self
1349 1351
1350 1352 def filtered(self, name, visibilityexceptions=None):
1351 1353 """Return a filtered version of a repository
1352 1354
1353 1355 The `name` parameter is the identifier of the requested view. This
1354 1356 will return a repoview object set "exactly" to the specified view.
1355 1357
1356 1358 This function does not apply recursive filtering to a repository. For
1357 1359 example calling `repo.filtered("served")` will return a repoview using
1358 1360 the "served" view, regardless of the initial view used by `repo`.
1359 1361
1360 1362 In other word, there is always only one level of `repoview` "filtering".
1361 1363 """
1362 1364 if self._extrafilterid is not None and b'%' not in name:
1363 1365 name = name + b'%' + self._extrafilterid
1364 1366
1365 1367 cls = repoview.newtype(self.unfiltered().__class__)
1366 1368 return cls(self, name, visibilityexceptions)
1367 1369
1368 1370 @mixedrepostorecache(
1369 1371 (b'bookmarks', b'plain'),
1370 1372 (b'bookmarks.current', b'plain'),
1371 1373 (b'bookmarks', b''),
1372 1374 (b'00changelog.i', b''),
1373 1375 )
1374 1376 def _bookmarks(self):
1375 1377 # Since the multiple files involved in the transaction cannot be
1376 1378 # written atomically (with current repository format), there is a race
1377 1379 # condition here.
1378 1380 #
1379 1381 # 1) changelog content A is read
1380 1382 # 2) outside transaction update changelog to content B
1381 1383 # 3) outside transaction update bookmark file referring to content B
1382 1384 # 4) bookmarks file content is read and filtered against changelog-A
1383 1385 #
1384 1386 # When this happens, bookmarks against nodes missing from A are dropped.
1385 1387 #
1386 1388 # Having this happening during read is not great, but it become worse
1387 1389 # when this happen during write because the bookmarks to the "unknown"
1388 1390 # nodes will be dropped for good. However, writes happen within locks.
1389 1391 # This locking makes it possible to have a race free consistent read.
1390 1392 # For this purpose data read from disc before locking are
1391 1393 # "invalidated" right after the locks are taken. This invalidations are
1392 1394 # "light", the `filecache` mechanism keep the data in memory and will
1393 1395 # reuse them if the underlying files did not changed. Not parsing the
1394 1396 # same data multiple times helps performances.
1395 1397 #
1396 1398 # Unfortunately in the case describe above, the files tracked by the
1397 1399 # bookmarks file cache might not have changed, but the in-memory
1398 1400 # content is still "wrong" because we used an older changelog content
1399 1401 # to process the on-disk data. So after locking, the changelog would be
1400 1402 # refreshed but `_bookmarks` would be preserved.
1401 1403 # Adding `00changelog.i` to the list of tracked file is not
1402 1404 # enough, because at the time we build the content for `_bookmarks` in
1403 1405 # (4), the changelog file has already diverged from the content used
1404 1406 # for loading `changelog` in (1)
1405 1407 #
1406 1408 # To prevent the issue, we force the changelog to be explicitly
1407 1409 # reloaded while computing `_bookmarks`. The data race can still happen
1408 1410 # without the lock (with a narrower window), but it would no longer go
1409 1411 # undetected during the lock time refresh.
1410 1412 #
1411 1413 # The new schedule is as follow
1412 1414 #
1413 1415 # 1) filecache logic detect that `_bookmarks` needs to be computed
1414 1416 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1415 1417 # 3) We force `changelog` filecache to be tested
1416 1418 # 4) cachestat for `changelog` are captured (for changelog)
1417 1419 # 5) `_bookmarks` is computed and cached
1418 1420 #
1419 1421 # The step in (3) ensure we have a changelog at least as recent as the
1420 1422 # cache stat computed in (1). As a result at locking time:
1421 1423 # * if the changelog did not changed since (1) -> we can reuse the data
1422 1424 # * otherwise -> the bookmarks get refreshed.
1423 1425 self._refreshchangelog()
1424 1426 return bookmarks.bmstore(self)
1425 1427
1426 1428 def _refreshchangelog(self):
1427 1429 """make sure the in memory changelog match the on-disk one"""
1428 1430 if 'changelog' in vars(self) and self.currenttransaction() is None:
1429 1431 del self.changelog
1430 1432
1431 1433 @property
1432 1434 def _activebookmark(self):
1433 1435 return self._bookmarks.active
1434 1436
1435 1437 # _phasesets depend on changelog. what we need is to call
1436 1438 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1437 1439 # can't be easily expressed in filecache mechanism.
1438 1440 @storecache(b'phaseroots', b'00changelog.i')
1439 1441 def _phasecache(self):
1440 1442 return phases.phasecache(self, self._phasedefaults)
1441 1443
1442 1444 @storecache(b'obsstore')
1443 1445 def obsstore(self):
1444 1446 return obsolete.makestore(self.ui, self)
1445 1447
1446 1448 @storecache(b'00changelog.i')
1447 1449 def changelog(self):
1448 1450 return self.store.changelog(txnutil.mayhavepending(self.root))
1449 1451
1450 1452 @storecache(b'00manifest.i')
1451 1453 def manifestlog(self):
1452 1454 return self.store.manifestlog(self, self._storenarrowmatch)
1453 1455
1454 1456 @repofilecache(b'dirstate')
1455 1457 def dirstate(self):
1456 1458 return self._makedirstate()
1457 1459
1458 1460 def _makedirstate(self):
1459 1461 """Extension point for wrapping the dirstate per-repo."""
1460 1462 sparsematchfn = lambda: sparse.matcher(self)
1461 1463
1462 1464 return dirstate.dirstate(
1463 1465 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1464 1466 )
1465 1467
1466 1468 def _dirstatevalidate(self, node):
1467 1469 try:
1468 1470 self.changelog.rev(node)
1469 1471 return node
1470 1472 except error.LookupError:
1471 1473 if not self._dirstatevalidatewarned:
1472 1474 self._dirstatevalidatewarned = True
1473 1475 self.ui.warn(
1474 1476 _(b"warning: ignoring unknown working parent %s!\n")
1475 1477 % short(node)
1476 1478 )
1477 1479 return nullid
1478 1480
1479 1481 @storecache(narrowspec.FILENAME)
1480 1482 def narrowpats(self):
1481 1483 """matcher patterns for this repository's narrowspec
1482 1484
1483 1485 A tuple of (includes, excludes).
1484 1486 """
1485 1487 return narrowspec.load(self)
1486 1488
1487 1489 @storecache(narrowspec.FILENAME)
1488 1490 def _storenarrowmatch(self):
1489 1491 if repository.NARROW_REQUIREMENT not in self.requirements:
1490 1492 return matchmod.always()
1491 1493 include, exclude = self.narrowpats
1492 1494 return narrowspec.match(self.root, include=include, exclude=exclude)
1493 1495
1494 1496 @storecache(narrowspec.FILENAME)
1495 1497 def _narrowmatch(self):
1496 1498 if repository.NARROW_REQUIREMENT not in self.requirements:
1497 1499 return matchmod.always()
1498 1500 narrowspec.checkworkingcopynarrowspec(self)
1499 1501 include, exclude = self.narrowpats
1500 1502 return narrowspec.match(self.root, include=include, exclude=exclude)
1501 1503
1502 1504 def narrowmatch(self, match=None, includeexact=False):
1503 1505 """matcher corresponding the the repo's narrowspec
1504 1506
1505 1507 If `match` is given, then that will be intersected with the narrow
1506 1508 matcher.
1507 1509
1508 1510 If `includeexact` is True, then any exact matches from `match` will
1509 1511 be included even if they're outside the narrowspec.
1510 1512 """
1511 1513 if match:
1512 1514 if includeexact and not self._narrowmatch.always():
1513 1515 # do not exclude explicitly-specified paths so that they can
1514 1516 # be warned later on
1515 1517 em = matchmod.exact(match.files())
1516 1518 nm = matchmod.unionmatcher([self._narrowmatch, em])
1517 1519 return matchmod.intersectmatchers(match, nm)
1518 1520 return matchmod.intersectmatchers(match, self._narrowmatch)
1519 1521 return self._narrowmatch
1520 1522
1521 1523 def setnarrowpats(self, newincludes, newexcludes):
1522 1524 narrowspec.save(self, newincludes, newexcludes)
1523 1525 self.invalidate(clearfilecache=True)
1524 1526
1525 1527 @unfilteredpropertycache
1526 1528 def _quick_access_changeid_null(self):
1527 1529 return {
1528 1530 b'null': (nullrev, nullid),
1529 1531 nullrev: (nullrev, nullid),
1530 1532 nullid: (nullrev, nullid),
1531 1533 }
1532 1534
1533 1535 @unfilteredpropertycache
1534 1536 def _quick_access_changeid_wc(self):
1535 1537 # also fast path access to the working copy parents
1536 1538 # however, only do it for filter that ensure wc is visible.
1537 1539 quick = {}
1538 1540 cl = self.unfiltered().changelog
1539 1541 for node in self.dirstate.parents():
1540 1542 if node == nullid:
1541 1543 continue
1542 1544 rev = cl.index.get_rev(node)
1543 1545 if rev is None:
1544 1546 # unknown working copy parent case:
1545 1547 #
1546 1548 # skip the fast path and let higher code deal with it
1547 1549 continue
1548 1550 pair = (rev, node)
1549 1551 quick[rev] = pair
1550 1552 quick[node] = pair
1551 1553 # also add the parents of the parents
1552 1554 for r in cl.parentrevs(rev):
1553 1555 if r == nullrev:
1554 1556 continue
1555 1557 n = cl.node(r)
1556 1558 pair = (r, n)
1557 1559 quick[r] = pair
1558 1560 quick[n] = pair
1559 1561 p1node = self.dirstate.p1()
1560 1562 if p1node != nullid:
1561 1563 quick[b'.'] = quick[p1node]
1562 1564 return quick
1563 1565
1564 1566 @unfilteredmethod
1565 1567 def _quick_access_changeid_invalidate(self):
1566 1568 if '_quick_access_changeid_wc' in vars(self):
1567 1569 del self.__dict__['_quick_access_changeid_wc']
1568 1570
1569 1571 @property
1570 1572 def _quick_access_changeid(self):
1571 1573 """an helper dictionnary for __getitem__ calls
1572 1574
1573 1575 This contains a list of symbol we can recognise right away without
1574 1576 further processing.
1575 1577 """
1576 1578 mapping = self._quick_access_changeid_null
1577 1579 if self.filtername in repoview.filter_has_wc:
1578 1580 mapping = mapping.copy()
1579 1581 mapping.update(self._quick_access_changeid_wc)
1580 1582 return mapping
1581 1583
1582 1584 def __getitem__(self, changeid):
1583 1585 # dealing with special cases
1584 1586 if changeid is None:
1585 1587 return context.workingctx(self)
1586 1588 if isinstance(changeid, context.basectx):
1587 1589 return changeid
1588 1590
1589 1591 # dealing with multiple revisions
1590 1592 if isinstance(changeid, slice):
1591 1593 # wdirrev isn't contiguous so the slice shouldn't include it
1592 1594 return [
1593 1595 self[i]
1594 1596 for i in pycompat.xrange(*changeid.indices(len(self)))
1595 1597 if i not in self.changelog.filteredrevs
1596 1598 ]
1597 1599
1598 1600 # dealing with some special values
1599 1601 quick_access = self._quick_access_changeid.get(changeid)
1600 1602 if quick_access is not None:
1601 1603 rev, node = quick_access
1602 1604 return context.changectx(self, rev, node, maybe_filtered=False)
1603 1605 if changeid == b'tip':
1604 1606 node = self.changelog.tip()
1605 1607 rev = self.changelog.rev(node)
1606 1608 return context.changectx(self, rev, node)
1607 1609
1608 1610 # dealing with arbitrary values
1609 1611 try:
1610 1612 if isinstance(changeid, int):
1611 1613 node = self.changelog.node(changeid)
1612 1614 rev = changeid
1613 1615 elif changeid == b'.':
1614 1616 # this is a hack to delay/avoid loading obsmarkers
1615 1617 # when we know that '.' won't be hidden
1616 1618 node = self.dirstate.p1()
1617 1619 rev = self.unfiltered().changelog.rev(node)
1618 1620 elif len(changeid) == 20:
1619 1621 try:
1620 1622 node = changeid
1621 1623 rev = self.changelog.rev(changeid)
1622 1624 except error.FilteredLookupError:
1623 1625 changeid = hex(changeid) # for the error message
1624 1626 raise
1625 1627 except LookupError:
1626 1628 # check if it might have come from damaged dirstate
1627 1629 #
1628 1630 # XXX we could avoid the unfiltered if we had a recognizable
1629 1631 # exception for filtered changeset access
1630 1632 if (
1631 1633 self.local()
1632 1634 and changeid in self.unfiltered().dirstate.parents()
1633 1635 ):
1634 1636 msg = _(b"working directory has unknown parent '%s'!")
1635 1637 raise error.Abort(msg % short(changeid))
1636 1638 changeid = hex(changeid) # for the error message
1637 1639 raise
1638 1640
1639 1641 elif len(changeid) == 40:
1640 1642 node = bin(changeid)
1641 1643 rev = self.changelog.rev(node)
1642 1644 else:
1643 1645 raise error.ProgrammingError(
1644 1646 b"unsupported changeid '%s' of type %s"
1645 1647 % (changeid, pycompat.bytestr(type(changeid)))
1646 1648 )
1647 1649
1648 1650 return context.changectx(self, rev, node)
1649 1651
1650 1652 except (error.FilteredIndexError, error.FilteredLookupError):
1651 1653 raise error.FilteredRepoLookupError(
1652 1654 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1653 1655 )
1654 1656 except (IndexError, LookupError):
1655 1657 raise error.RepoLookupError(
1656 1658 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1657 1659 )
1658 1660 except error.WdirUnsupported:
1659 1661 return context.workingctx(self)
1660 1662
1661 1663 def __contains__(self, changeid):
1662 1664 """True if the given changeid exists
1663 1665
1664 1666 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1665 1667 specified.
1666 1668 """
1667 1669 try:
1668 1670 self[changeid]
1669 1671 return True
1670 1672 except error.RepoLookupError:
1671 1673 return False
1672 1674
1673 1675 def __nonzero__(self):
1674 1676 return True
1675 1677
1676 1678 __bool__ = __nonzero__
1677 1679
1678 1680 def __len__(self):
1679 1681 # no need to pay the cost of repoview.changelog
1680 1682 unfi = self.unfiltered()
1681 1683 return len(unfi.changelog)
1682 1684
1683 1685 def __iter__(self):
1684 1686 return iter(self.changelog)
1685 1687
1686 1688 def revs(self, expr, *args):
1687 1689 '''Find revisions matching a revset.
1688 1690
1689 1691 The revset is specified as a string ``expr`` that may contain
1690 1692 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1691 1693
1692 1694 Revset aliases from the configuration are not expanded. To expand
1693 1695 user aliases, consider calling ``scmutil.revrange()`` or
1694 1696 ``repo.anyrevs([expr], user=True)``.
1695 1697
1696 1698 Returns a smartset.abstractsmartset, which is a list-like interface
1697 1699 that contains integer revisions.
1698 1700 '''
1699 1701 tree = revsetlang.spectree(expr, *args)
1700 1702 return revset.makematcher(tree)(self)
1701 1703
1702 1704 def set(self, expr, *args):
1703 1705 '''Find revisions matching a revset and emit changectx instances.
1704 1706
1705 1707 This is a convenience wrapper around ``revs()`` that iterates the
1706 1708 result and is a generator of changectx instances.
1707 1709
1708 1710 Revset aliases from the configuration are not expanded. To expand
1709 1711 user aliases, consider calling ``scmutil.revrange()``.
1710 1712 '''
1711 1713 for r in self.revs(expr, *args):
1712 1714 yield self[r]
1713 1715
1714 1716 def anyrevs(self, specs, user=False, localalias=None):
1715 1717 '''Find revisions matching one of the given revsets.
1716 1718
1717 1719 Revset aliases from the configuration are not expanded by default. To
1718 1720 expand user aliases, specify ``user=True``. To provide some local
1719 1721 definitions overriding user aliases, set ``localalias`` to
1720 1722 ``{name: definitionstring}``.
1721 1723 '''
1722 1724 if specs == [b'null']:
1723 1725 return revset.baseset([nullrev])
1724 1726 if specs == [b'.']:
1725 1727 quick_data = self._quick_access_changeid.get(b'.')
1726 1728 if quick_data is not None:
1727 1729 return revset.baseset([quick_data[0]])
1728 1730 if user:
1729 1731 m = revset.matchany(
1730 1732 self.ui,
1731 1733 specs,
1732 1734 lookup=revset.lookupfn(self),
1733 1735 localalias=localalias,
1734 1736 )
1735 1737 else:
1736 1738 m = revset.matchany(None, specs, localalias=localalias)
1737 1739 return m(self)
1738 1740
1739 1741 def url(self):
1740 1742 return b'file:' + self.root
1741 1743
1742 1744 def hook(self, name, throw=False, **args):
1743 1745 """Call a hook, passing this repo instance.
1744 1746
1745 1747 This a convenience method to aid invoking hooks. Extensions likely
1746 1748 won't call this unless they have registered a custom hook or are
1747 1749 replacing code that is expected to call a hook.
1748 1750 """
1749 1751 return hook.hook(self.ui, self, name, throw, **args)
1750 1752
1751 1753 @filteredpropertycache
1752 1754 def _tagscache(self):
1753 1755 '''Returns a tagscache object that contains various tags related
1754 1756 caches.'''
1755 1757
1756 1758 # This simplifies its cache management by having one decorated
1757 1759 # function (this one) and the rest simply fetch things from it.
1758 1760 class tagscache(object):
1759 1761 def __init__(self):
1760 1762 # These two define the set of tags for this repository. tags
1761 1763 # maps tag name to node; tagtypes maps tag name to 'global' or
1762 1764 # 'local'. (Global tags are defined by .hgtags across all
1763 1765 # heads, and local tags are defined in .hg/localtags.)
1764 1766 # They constitute the in-memory cache of tags.
1765 1767 self.tags = self.tagtypes = None
1766 1768
1767 1769 self.nodetagscache = self.tagslist = None
1768 1770
1769 1771 cache = tagscache()
1770 1772 cache.tags, cache.tagtypes = self._findtags()
1771 1773
1772 1774 return cache
1773 1775
1774 1776 def tags(self):
1775 1777 '''return a mapping of tag to node'''
1776 1778 t = {}
1777 1779 if self.changelog.filteredrevs:
1778 1780 tags, tt = self._findtags()
1779 1781 else:
1780 1782 tags = self._tagscache.tags
1781 1783 rev = self.changelog.rev
1782 1784 for k, v in pycompat.iteritems(tags):
1783 1785 try:
1784 1786 # ignore tags to unknown nodes
1785 1787 rev(v)
1786 1788 t[k] = v
1787 1789 except (error.LookupError, ValueError):
1788 1790 pass
1789 1791 return t
1790 1792
1791 1793 def _findtags(self):
1792 1794 '''Do the hard work of finding tags. Return a pair of dicts
1793 1795 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1794 1796 maps tag name to a string like \'global\' or \'local\'.
1795 1797 Subclasses or extensions are free to add their own tags, but
1796 1798 should be aware that the returned dicts will be retained for the
1797 1799 duration of the localrepo object.'''
1798 1800
1799 1801 # XXX what tagtype should subclasses/extensions use? Currently
1800 1802 # mq and bookmarks add tags, but do not set the tagtype at all.
1801 1803 # Should each extension invent its own tag type? Should there
1802 1804 # be one tagtype for all such "virtual" tags? Or is the status
1803 1805 # quo fine?
1804 1806
1805 1807 # map tag name to (node, hist)
1806 1808 alltags = tagsmod.findglobaltags(self.ui, self)
1807 1809 # map tag name to tag type
1808 1810 tagtypes = dict((tag, b'global') for tag in alltags)
1809 1811
1810 1812 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1811 1813
1812 1814 # Build the return dicts. Have to re-encode tag names because
1813 1815 # the tags module always uses UTF-8 (in order not to lose info
1814 1816 # writing to the cache), but the rest of Mercurial wants them in
1815 1817 # local encoding.
1816 1818 tags = {}
1817 1819 for (name, (node, hist)) in pycompat.iteritems(alltags):
1818 1820 if node != nullid:
1819 1821 tags[encoding.tolocal(name)] = node
1820 1822 tags[b'tip'] = self.changelog.tip()
1821 1823 tagtypes = dict(
1822 1824 [
1823 1825 (encoding.tolocal(name), value)
1824 1826 for (name, value) in pycompat.iteritems(tagtypes)
1825 1827 ]
1826 1828 )
1827 1829 return (tags, tagtypes)
1828 1830
1829 1831 def tagtype(self, tagname):
1830 1832 '''
1831 1833 return the type of the given tag. result can be:
1832 1834
1833 1835 'local' : a local tag
1834 1836 'global' : a global tag
1835 1837 None : tag does not exist
1836 1838 '''
1837 1839
1838 1840 return self._tagscache.tagtypes.get(tagname)
1839 1841
1840 1842 def tagslist(self):
1841 1843 '''return a list of tags ordered by revision'''
1842 1844 if not self._tagscache.tagslist:
1843 1845 l = []
1844 1846 for t, n in pycompat.iteritems(self.tags()):
1845 1847 l.append((self.changelog.rev(n), t, n))
1846 1848 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1847 1849
1848 1850 return self._tagscache.tagslist
1849 1851
1850 1852 def nodetags(self, node):
1851 1853 '''return the tags associated with a node'''
1852 1854 if not self._tagscache.nodetagscache:
1853 1855 nodetagscache = {}
1854 1856 for t, n in pycompat.iteritems(self._tagscache.tags):
1855 1857 nodetagscache.setdefault(n, []).append(t)
1856 1858 for tags in pycompat.itervalues(nodetagscache):
1857 1859 tags.sort()
1858 1860 self._tagscache.nodetagscache = nodetagscache
1859 1861 return self._tagscache.nodetagscache.get(node, [])
1860 1862
1861 1863 def nodebookmarks(self, node):
1862 1864 """return the list of bookmarks pointing to the specified node"""
1863 1865 return self._bookmarks.names(node)
1864 1866
1865 1867 def branchmap(self):
1866 1868 '''returns a dictionary {branch: [branchheads]} with branchheads
1867 1869 ordered by increasing revision number'''
1868 1870 return self._branchcaches[self]
1869 1871
1870 1872 @unfilteredmethod
1871 1873 def revbranchcache(self):
1872 1874 if not self._revbranchcache:
1873 1875 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1874 1876 return self._revbranchcache
1875 1877
1876 1878 def branchtip(self, branch, ignoremissing=False):
1877 1879 '''return the tip node for a given branch
1878 1880
1879 1881 If ignoremissing is True, then this method will not raise an error.
1880 1882 This is helpful for callers that only expect None for a missing branch
1881 1883 (e.g. namespace).
1882 1884
1883 1885 '''
1884 1886 try:
1885 1887 return self.branchmap().branchtip(branch)
1886 1888 except KeyError:
1887 1889 if not ignoremissing:
1888 1890 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1889 1891 else:
1890 1892 pass
1891 1893
1892 1894 def lookup(self, key):
1893 1895 node = scmutil.revsymbol(self, key).node()
1894 1896 if node is None:
1895 1897 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1896 1898 return node
1897 1899
1898 1900 def lookupbranch(self, key):
1899 1901 if self.branchmap().hasbranch(key):
1900 1902 return key
1901 1903
1902 1904 return scmutil.revsymbol(self, key).branch()
1903 1905
1904 1906 def known(self, nodes):
1905 1907 cl = self.changelog
1906 1908 get_rev = cl.index.get_rev
1907 1909 filtered = cl.filteredrevs
1908 1910 result = []
1909 1911 for n in nodes:
1910 1912 r = get_rev(n)
1911 1913 resp = not (r is None or r in filtered)
1912 1914 result.append(resp)
1913 1915 return result
1914 1916
1915 1917 def local(self):
1916 1918 return self
1917 1919
1918 1920 def publishing(self):
1919 1921 # it's safe (and desirable) to trust the publish flag unconditionally
1920 1922 # so that we don't finalize changes shared between users via ssh or nfs
1921 1923 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1922 1924
1923 1925 def cancopy(self):
1924 1926 # so statichttprepo's override of local() works
1925 1927 if not self.local():
1926 1928 return False
1927 1929 if not self.publishing():
1928 1930 return True
1929 1931 # if publishing we can't copy if there is filtered content
1930 1932 return not self.filtered(b'visible').changelog.filteredrevs
1931 1933
1932 1934 def shared(self):
1933 1935 '''the type of shared repository (None if not shared)'''
1934 1936 if self.sharedpath != self.path:
1935 1937 return b'store'
1936 1938 return None
1937 1939
1938 1940 def wjoin(self, f, *insidef):
1939 1941 return self.vfs.reljoin(self.root, f, *insidef)
1940 1942
1941 1943 def setparents(self, p1, p2=nullid):
1942 1944 self[None].setparents(p1, p2)
1943 1945 self._quick_access_changeid_invalidate()
1944 1946
1945 1947 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1946 1948 """changeid must be a changeset revision, if specified.
1947 1949 fileid can be a file revision or node."""
1948 1950 return context.filectx(
1949 1951 self, path, changeid, fileid, changectx=changectx
1950 1952 )
1951 1953
1952 1954 def getcwd(self):
1953 1955 return self.dirstate.getcwd()
1954 1956
1955 1957 def pathto(self, f, cwd=None):
1956 1958 return self.dirstate.pathto(f, cwd)
1957 1959
1958 1960 def _loadfilter(self, filter):
1959 1961 if filter not in self._filterpats:
1960 1962 l = []
1961 1963 for pat, cmd in self.ui.configitems(filter):
1962 1964 if cmd == b'!':
1963 1965 continue
1964 1966 mf = matchmod.match(self.root, b'', [pat])
1965 1967 fn = None
1966 1968 params = cmd
1967 1969 for name, filterfn in pycompat.iteritems(self._datafilters):
1968 1970 if cmd.startswith(name):
1969 1971 fn = filterfn
1970 1972 params = cmd[len(name) :].lstrip()
1971 1973 break
1972 1974 if not fn:
1973 1975 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1974 1976 fn.__name__ = 'commandfilter'
1975 1977 # Wrap old filters not supporting keyword arguments
1976 1978 if not pycompat.getargspec(fn)[2]:
1977 1979 oldfn = fn
1978 1980 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1979 1981 fn.__name__ = 'compat-' + oldfn.__name__
1980 1982 l.append((mf, fn, params))
1981 1983 self._filterpats[filter] = l
1982 1984 return self._filterpats[filter]
1983 1985
1984 1986 def _filter(self, filterpats, filename, data):
1985 1987 for mf, fn, cmd in filterpats:
1986 1988 if mf(filename):
1987 1989 self.ui.debug(
1988 1990 b"filtering %s through %s\n"
1989 1991 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1990 1992 )
1991 1993 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1992 1994 break
1993 1995
1994 1996 return data
1995 1997
1996 1998 @unfilteredpropertycache
1997 1999 def _encodefilterpats(self):
1998 2000 return self._loadfilter(b'encode')
1999 2001
2000 2002 @unfilteredpropertycache
2001 2003 def _decodefilterpats(self):
2002 2004 return self._loadfilter(b'decode')
2003 2005
2004 2006 def adddatafilter(self, name, filter):
2005 2007 self._datafilters[name] = filter
2006 2008
2007 2009 def wread(self, filename):
2008 2010 if self.wvfs.islink(filename):
2009 2011 data = self.wvfs.readlink(filename)
2010 2012 else:
2011 2013 data = self.wvfs.read(filename)
2012 2014 return self._filter(self._encodefilterpats, filename, data)
2013 2015
2014 2016 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2015 2017 """write ``data`` into ``filename`` in the working directory
2016 2018
2017 2019 This returns length of written (maybe decoded) data.
2018 2020 """
2019 2021 data = self._filter(self._decodefilterpats, filename, data)
2020 2022 if b'l' in flags:
2021 2023 self.wvfs.symlink(data, filename)
2022 2024 else:
2023 2025 self.wvfs.write(
2024 2026 filename, data, backgroundclose=backgroundclose, **kwargs
2025 2027 )
2026 2028 if b'x' in flags:
2027 2029 self.wvfs.setflags(filename, False, True)
2028 2030 else:
2029 2031 self.wvfs.setflags(filename, False, False)
2030 2032 return len(data)
2031 2033
2032 2034 def wwritedata(self, filename, data):
2033 2035 return self._filter(self._decodefilterpats, filename, data)
2034 2036
2035 2037 def currenttransaction(self):
2036 2038 """return the current transaction or None if non exists"""
2037 2039 if self._transref:
2038 2040 tr = self._transref()
2039 2041 else:
2040 2042 tr = None
2041 2043
2042 2044 if tr and tr.running():
2043 2045 return tr
2044 2046 return None
2045 2047
2046 2048 def transaction(self, desc, report=None):
2047 2049 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2048 2050 b'devel', b'check-locks'
2049 2051 ):
2050 2052 if self._currentlock(self._lockref) is None:
2051 2053 raise error.ProgrammingError(b'transaction requires locking')
2052 2054 tr = self.currenttransaction()
2053 2055 if tr is not None:
2054 2056 return tr.nest(name=desc)
2055 2057
2056 2058 # abort here if the journal already exists
2057 2059 if self.svfs.exists(b"journal"):
2058 2060 raise error.RepoError(
2059 2061 _(b"abandoned transaction found"),
2060 2062 hint=_(b"run 'hg recover' to clean up transaction"),
2061 2063 )
2062 2064
2063 2065 idbase = b"%.40f#%f" % (random.random(), time.time())
2064 2066 ha = hex(hashutil.sha1(idbase).digest())
2065 2067 txnid = b'TXN:' + ha
2066 2068 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2067 2069
2068 2070 self._writejournal(desc)
2069 2071 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2070 2072 if report:
2071 2073 rp = report
2072 2074 else:
2073 2075 rp = self.ui.warn
2074 2076 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2075 2077 # we must avoid cyclic reference between repo and transaction.
2076 2078 reporef = weakref.ref(self)
2077 2079 # Code to track tag movement
2078 2080 #
2079 2081 # Since tags are all handled as file content, it is actually quite hard
2080 2082 # to track these movement from a code perspective. So we fallback to a
2081 2083 # tracking at the repository level. One could envision to track changes
2082 2084 # to the '.hgtags' file through changegroup apply but that fails to
2083 2085 # cope with case where transaction expose new heads without changegroup
2084 2086 # being involved (eg: phase movement).
2085 2087 #
2086 2088 # For now, We gate the feature behind a flag since this likely comes
2087 2089 # with performance impacts. The current code run more often than needed
2088 2090 # and do not use caches as much as it could. The current focus is on
2089 2091 # the behavior of the feature so we disable it by default. The flag
2090 2092 # will be removed when we are happy with the performance impact.
2091 2093 #
2092 2094 # Once this feature is no longer experimental move the following
2093 2095 # documentation to the appropriate help section:
2094 2096 #
2095 2097 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2096 2098 # tags (new or changed or deleted tags). In addition the details of
2097 2099 # these changes are made available in a file at:
2098 2100 # ``REPOROOT/.hg/changes/tags.changes``.
2099 2101 # Make sure you check for HG_TAG_MOVED before reading that file as it
2100 2102 # might exist from a previous transaction even if no tag were touched
2101 2103 # in this one. Changes are recorded in a line base format::
2102 2104 #
2103 2105 # <action> <hex-node> <tag-name>\n
2104 2106 #
2105 2107 # Actions are defined as follow:
2106 2108 # "-R": tag is removed,
2107 2109 # "+A": tag is added,
2108 2110 # "-M": tag is moved (old value),
2109 2111 # "+M": tag is moved (new value),
2110 2112 tracktags = lambda x: None
2111 2113 # experimental config: experimental.hook-track-tags
2112 2114 shouldtracktags = self.ui.configbool(
2113 2115 b'experimental', b'hook-track-tags'
2114 2116 )
2115 2117 if desc != b'strip' and shouldtracktags:
2116 2118 oldheads = self.changelog.headrevs()
2117 2119
2118 2120 def tracktags(tr2):
2119 2121 repo = reporef()
2120 2122 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2121 2123 newheads = repo.changelog.headrevs()
2122 2124 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2123 2125 # notes: we compare lists here.
2124 2126 # As we do it only once buiding set would not be cheaper
2125 2127 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2126 2128 if changes:
2127 2129 tr2.hookargs[b'tag_moved'] = b'1'
2128 2130 with repo.vfs(
2129 2131 b'changes/tags.changes', b'w', atomictemp=True
2130 2132 ) as changesfile:
2131 2133 # note: we do not register the file to the transaction
2132 2134 # because we needs it to still exist on the transaction
2133 2135 # is close (for txnclose hooks)
2134 2136 tagsmod.writediff(changesfile, changes)
2135 2137
2136 2138 def validate(tr2):
2137 2139 """will run pre-closing hooks"""
2138 2140 # XXX the transaction API is a bit lacking here so we take a hacky
2139 2141 # path for now
2140 2142 #
2141 2143 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2142 2144 # dict is copied before these run. In addition we needs the data
2143 2145 # available to in memory hooks too.
2144 2146 #
2145 2147 # Moreover, we also need to make sure this runs before txnclose
2146 2148 # hooks and there is no "pending" mechanism that would execute
2147 2149 # logic only if hooks are about to run.
2148 2150 #
2149 2151 # Fixing this limitation of the transaction is also needed to track
2150 2152 # other families of changes (bookmarks, phases, obsolescence).
2151 2153 #
2152 2154 # This will have to be fixed before we remove the experimental
2153 2155 # gating.
2154 2156 tracktags(tr2)
2155 2157 repo = reporef()
2156 2158
2157 2159 singleheadopt = (b'experimental', b'single-head-per-branch')
2158 2160 singlehead = repo.ui.configbool(*singleheadopt)
2159 2161 if singlehead:
2160 2162 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2161 2163 accountclosed = singleheadsub.get(
2162 2164 b"account-closed-heads", False
2163 2165 )
2164 2166 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2165 2167 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2166 2168 for name, (old, new) in sorted(
2167 2169 tr.changes[b'bookmarks'].items()
2168 2170 ):
2169 2171 args = tr.hookargs.copy()
2170 2172 args.update(bookmarks.preparehookargs(name, old, new))
2171 2173 repo.hook(
2172 2174 b'pretxnclose-bookmark',
2173 2175 throw=True,
2174 2176 **pycompat.strkwargs(args)
2175 2177 )
2176 2178 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2177 2179 cl = repo.unfiltered().changelog
2178 2180 for rev, (old, new) in tr.changes[b'phases'].items():
2179 2181 args = tr.hookargs.copy()
2180 2182 node = hex(cl.node(rev))
2181 2183 args.update(phases.preparehookargs(node, old, new))
2182 2184 repo.hook(
2183 2185 b'pretxnclose-phase',
2184 2186 throw=True,
2185 2187 **pycompat.strkwargs(args)
2186 2188 )
2187 2189
2188 2190 repo.hook(
2189 2191 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2190 2192 )
2191 2193
2192 2194 def releasefn(tr, success):
2193 2195 repo = reporef()
2194 2196 if repo is None:
2195 2197 # If the repo has been GC'd (and this release function is being
2196 2198 # called from transaction.__del__), there's not much we can do,
2197 2199 # so just leave the unfinished transaction there and let the
2198 2200 # user run `hg recover`.
2199 2201 return
2200 2202 if success:
2201 2203 # this should be explicitly invoked here, because
2202 2204 # in-memory changes aren't written out at closing
2203 2205 # transaction, if tr.addfilegenerator (via
2204 2206 # dirstate.write or so) isn't invoked while
2205 2207 # transaction running
2206 2208 repo.dirstate.write(None)
2207 2209 else:
2208 2210 # discard all changes (including ones already written
2209 2211 # out) in this transaction
2210 2212 narrowspec.restorebackup(self, b'journal.narrowspec')
2211 2213 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2212 2214 repo.dirstate.restorebackup(None, b'journal.dirstate')
2213 2215
2214 2216 repo.invalidate(clearfilecache=True)
2215 2217
2216 2218 tr = transaction.transaction(
2217 2219 rp,
2218 2220 self.svfs,
2219 2221 vfsmap,
2220 2222 b"journal",
2221 2223 b"undo",
2222 2224 aftertrans(renames),
2223 2225 self.store.createmode,
2224 2226 validator=validate,
2225 2227 releasefn=releasefn,
2226 2228 checkambigfiles=_cachedfiles,
2227 2229 name=desc,
2228 2230 )
2229 2231 tr.changes[b'origrepolen'] = len(self)
2230 2232 tr.changes[b'obsmarkers'] = set()
2231 2233 tr.changes[b'phases'] = {}
2232 2234 tr.changes[b'bookmarks'] = {}
2233 2235
2234 2236 tr.hookargs[b'txnid'] = txnid
2235 2237 tr.hookargs[b'txnname'] = desc
2236 2238 # note: writing the fncache only during finalize mean that the file is
2237 2239 # outdated when running hooks. As fncache is used for streaming clone,
2238 2240 # this is not expected to break anything that happen during the hooks.
2239 2241 tr.addfinalize(b'flush-fncache', self.store.write)
2240 2242
2241 2243 def txnclosehook(tr2):
2242 2244 """To be run if transaction is successful, will schedule a hook run
2243 2245 """
2244 2246 # Don't reference tr2 in hook() so we don't hold a reference.
2245 2247 # This reduces memory consumption when there are multiple
2246 2248 # transactions per lock. This can likely go away if issue5045
2247 2249 # fixes the function accumulation.
2248 2250 hookargs = tr2.hookargs
2249 2251
2250 2252 def hookfunc(unused_success):
2251 2253 repo = reporef()
2252 2254 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2253 2255 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2254 2256 for name, (old, new) in bmchanges:
2255 2257 args = tr.hookargs.copy()
2256 2258 args.update(bookmarks.preparehookargs(name, old, new))
2257 2259 repo.hook(
2258 2260 b'txnclose-bookmark',
2259 2261 throw=False,
2260 2262 **pycompat.strkwargs(args)
2261 2263 )
2262 2264
2263 2265 if hook.hashook(repo.ui, b'txnclose-phase'):
2264 2266 cl = repo.unfiltered().changelog
2265 2267 phasemv = sorted(tr.changes[b'phases'].items())
2266 2268 for rev, (old, new) in phasemv:
2267 2269 args = tr.hookargs.copy()
2268 2270 node = hex(cl.node(rev))
2269 2271 args.update(phases.preparehookargs(node, old, new))
2270 2272 repo.hook(
2271 2273 b'txnclose-phase',
2272 2274 throw=False,
2273 2275 **pycompat.strkwargs(args)
2274 2276 )
2275 2277
2276 2278 repo.hook(
2277 2279 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2278 2280 )
2279 2281
2280 2282 reporef()._afterlock(hookfunc)
2281 2283
2282 2284 tr.addfinalize(b'txnclose-hook', txnclosehook)
2283 2285 # Include a leading "-" to make it happen before the transaction summary
2284 2286 # reports registered via scmutil.registersummarycallback() whose names
2285 2287 # are 00-txnreport etc. That way, the caches will be warm when the
2286 2288 # callbacks run.
2287 2289 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2288 2290
2289 2291 def txnaborthook(tr2):
2290 2292 """To be run if transaction is aborted
2291 2293 """
2292 2294 reporef().hook(
2293 2295 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2294 2296 )
2295 2297
2296 2298 tr.addabort(b'txnabort-hook', txnaborthook)
2297 2299 # avoid eager cache invalidation. in-memory data should be identical
2298 2300 # to stored data if transaction has no error.
2299 2301 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2300 2302 self._transref = weakref.ref(tr)
2301 2303 scmutil.registersummarycallback(self, tr, desc)
2302 2304 return tr
2303 2305
2304 2306 def _journalfiles(self):
2305 2307 return (
2306 2308 (self.svfs, b'journal'),
2307 2309 (self.svfs, b'journal.narrowspec'),
2308 2310 (self.vfs, b'journal.narrowspec.dirstate'),
2309 2311 (self.vfs, b'journal.dirstate'),
2310 2312 (self.vfs, b'journal.branch'),
2311 2313 (self.vfs, b'journal.desc'),
2312 2314 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2313 2315 (self.svfs, b'journal.phaseroots'),
2314 2316 )
2315 2317
2316 2318 def undofiles(self):
2317 2319 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2318 2320
2319 2321 @unfilteredmethod
2320 2322 def _writejournal(self, desc):
2321 2323 self.dirstate.savebackup(None, b'journal.dirstate')
2322 2324 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2323 2325 narrowspec.savebackup(self, b'journal.narrowspec')
2324 2326 self.vfs.write(
2325 2327 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2326 2328 )
2327 2329 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2328 2330 bookmarksvfs = bookmarks.bookmarksvfs(self)
2329 2331 bookmarksvfs.write(
2330 2332 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2331 2333 )
2332 2334 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2333 2335
2334 2336 def recover(self):
2335 2337 with self.lock():
2336 2338 if self.svfs.exists(b"journal"):
2337 2339 self.ui.status(_(b"rolling back interrupted transaction\n"))
2338 2340 vfsmap = {
2339 2341 b'': self.svfs,
2340 2342 b'plain': self.vfs,
2341 2343 }
2342 2344 transaction.rollback(
2343 2345 self.svfs,
2344 2346 vfsmap,
2345 2347 b"journal",
2346 2348 self.ui.warn,
2347 2349 checkambigfiles=_cachedfiles,
2348 2350 )
2349 2351 self.invalidate()
2350 2352 return True
2351 2353 else:
2352 2354 self.ui.warn(_(b"no interrupted transaction available\n"))
2353 2355 return False
2354 2356
2355 2357 def rollback(self, dryrun=False, force=False):
2356 2358 wlock = lock = dsguard = None
2357 2359 try:
2358 2360 wlock = self.wlock()
2359 2361 lock = self.lock()
2360 2362 if self.svfs.exists(b"undo"):
2361 2363 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2362 2364
2363 2365 return self._rollback(dryrun, force, dsguard)
2364 2366 else:
2365 2367 self.ui.warn(_(b"no rollback information available\n"))
2366 2368 return 1
2367 2369 finally:
2368 2370 release(dsguard, lock, wlock)
2369 2371
2370 2372 @unfilteredmethod # Until we get smarter cache management
2371 2373 def _rollback(self, dryrun, force, dsguard):
2372 2374 ui = self.ui
2373 2375 try:
2374 2376 args = self.vfs.read(b'undo.desc').splitlines()
2375 2377 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2376 2378 if len(args) >= 3:
2377 2379 detail = args[2]
2378 2380 oldtip = oldlen - 1
2379 2381
2380 2382 if detail and ui.verbose:
2381 2383 msg = _(
2382 2384 b'repository tip rolled back to revision %d'
2383 2385 b' (undo %s: %s)\n'
2384 2386 ) % (oldtip, desc, detail)
2385 2387 else:
2386 2388 msg = _(
2387 2389 b'repository tip rolled back to revision %d (undo %s)\n'
2388 2390 ) % (oldtip, desc)
2389 2391 except IOError:
2390 2392 msg = _(b'rolling back unknown transaction\n')
2391 2393 desc = None
2392 2394
2393 2395 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2394 2396 raise error.Abort(
2395 2397 _(
2396 2398 b'rollback of last commit while not checked out '
2397 2399 b'may lose data'
2398 2400 ),
2399 2401 hint=_(b'use -f to force'),
2400 2402 )
2401 2403
2402 2404 ui.status(msg)
2403 2405 if dryrun:
2404 2406 return 0
2405 2407
2406 2408 parents = self.dirstate.parents()
2407 2409 self.destroying()
2408 2410 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2409 2411 transaction.rollback(
2410 2412 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2411 2413 )
2412 2414 bookmarksvfs = bookmarks.bookmarksvfs(self)
2413 2415 if bookmarksvfs.exists(b'undo.bookmarks'):
2414 2416 bookmarksvfs.rename(
2415 2417 b'undo.bookmarks', b'bookmarks', checkambig=True
2416 2418 )
2417 2419 if self.svfs.exists(b'undo.phaseroots'):
2418 2420 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2419 2421 self.invalidate()
2420 2422
2421 2423 has_node = self.changelog.index.has_node
2422 2424 parentgone = any(not has_node(p) for p in parents)
2423 2425 if parentgone:
2424 2426 # prevent dirstateguard from overwriting already restored one
2425 2427 dsguard.close()
2426 2428
2427 2429 narrowspec.restorebackup(self, b'undo.narrowspec')
2428 2430 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2429 2431 self.dirstate.restorebackup(None, b'undo.dirstate')
2430 2432 try:
2431 2433 branch = self.vfs.read(b'undo.branch')
2432 2434 self.dirstate.setbranch(encoding.tolocal(branch))
2433 2435 except IOError:
2434 2436 ui.warn(
2435 2437 _(
2436 2438 b'named branch could not be reset: '
2437 2439 b'current branch is still \'%s\'\n'
2438 2440 )
2439 2441 % self.dirstate.branch()
2440 2442 )
2441 2443
2442 2444 parents = tuple([p.rev() for p in self[None].parents()])
2443 2445 if len(parents) > 1:
2444 2446 ui.status(
2445 2447 _(
2446 2448 b'working directory now based on '
2447 2449 b'revisions %d and %d\n'
2448 2450 )
2449 2451 % parents
2450 2452 )
2451 2453 else:
2452 2454 ui.status(
2453 2455 _(b'working directory now based on revision %d\n') % parents
2454 2456 )
2455 2457 mergemod.mergestate.clean(self, self[b'.'].node())
2456 2458
2457 2459 # TODO: if we know which new heads may result from this rollback, pass
2458 2460 # them to destroy(), which will prevent the branchhead cache from being
2459 2461 # invalidated.
2460 2462 self.destroyed()
2461 2463 return 0
2462 2464
2463 2465 def _buildcacheupdater(self, newtransaction):
2464 2466 """called during transaction to build the callback updating cache
2465 2467
2466 2468 Lives on the repository to help extension who might want to augment
2467 2469 this logic. For this purpose, the created transaction is passed to the
2468 2470 method.
2469 2471 """
2470 2472 # we must avoid cyclic reference between repo and transaction.
2471 2473 reporef = weakref.ref(self)
2472 2474
2473 2475 def updater(tr):
2474 2476 repo = reporef()
2475 2477 repo.updatecaches(tr)
2476 2478
2477 2479 return updater
2478 2480
2479 2481 @unfilteredmethod
2480 2482 def updatecaches(self, tr=None, full=False):
2481 2483 """warm appropriate caches
2482 2484
2483 2485 If this function is called after a transaction closed. The transaction
2484 2486 will be available in the 'tr' argument. This can be used to selectively
2485 2487 update caches relevant to the changes in that transaction.
2486 2488
2487 2489 If 'full' is set, make sure all caches the function knows about have
2488 2490 up-to-date data. Even the ones usually loaded more lazily.
2489 2491 """
2490 2492 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2491 2493 # During strip, many caches are invalid but
2492 2494 # later call to `destroyed` will refresh them.
2493 2495 return
2494 2496
2495 2497 if tr is None or tr.changes[b'origrepolen'] < len(self):
2496 2498 # accessing the 'ser ved' branchmap should refresh all the others,
2497 2499 self.ui.debug(b'updating the branch cache\n')
2498 2500 self.filtered(b'served').branchmap()
2499 2501 self.filtered(b'served.hidden').branchmap()
2500 2502
2501 2503 if full:
2502 2504 unfi = self.unfiltered()
2503 2505 rbc = unfi.revbranchcache()
2504 2506 for r in unfi.changelog:
2505 2507 rbc.branchinfo(r)
2506 2508 rbc.write()
2507 2509
2508 2510 # ensure the working copy parents are in the manifestfulltextcache
2509 2511 for ctx in self[b'.'].parents():
2510 2512 ctx.manifest() # accessing the manifest is enough
2511 2513
2512 2514 # accessing fnode cache warms the cache
2513 2515 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2514 2516 # accessing tags warm the cache
2515 2517 self.tags()
2516 2518 self.filtered(b'served').tags()
2517 2519
2518 2520 # The `full` arg is documented as updating even the lazily-loaded
2519 2521 # caches immediately, so we're forcing a write to cause these caches
2520 2522 # to be warmed up even if they haven't explicitly been requested
2521 2523 # yet (if they've never been used by hg, they won't ever have been
2522 2524 # written, even if they're a subset of another kind of cache that
2523 2525 # *has* been used).
2524 2526 for filt in repoview.filtertable.keys():
2525 2527 filtered = self.filtered(filt)
2526 2528 filtered.branchmap().write(filtered)
2527 2529
2528 2530 def invalidatecaches(self):
2529 2531
2530 2532 if '_tagscache' in vars(self):
2531 2533 # can't use delattr on proxy
2532 2534 del self.__dict__['_tagscache']
2533 2535
2534 2536 self._branchcaches.clear()
2535 2537 self.invalidatevolatilesets()
2536 2538 self._sparsesignaturecache.clear()
2537 2539
2538 2540 def invalidatevolatilesets(self):
2539 2541 self.filteredrevcache.clear()
2540 2542 obsolete.clearobscaches(self)
2541 2543 self._quick_access_changeid_invalidate()
2542 2544
2543 2545 def invalidatedirstate(self):
2544 2546 '''Invalidates the dirstate, causing the next call to dirstate
2545 2547 to check if it was modified since the last time it was read,
2546 2548 rereading it if it has.
2547 2549
2548 2550 This is different to dirstate.invalidate() that it doesn't always
2549 2551 rereads the dirstate. Use dirstate.invalidate() if you want to
2550 2552 explicitly read the dirstate again (i.e. restoring it to a previous
2551 2553 known good state).'''
2552 2554 if hasunfilteredcache(self, 'dirstate'):
2553 2555 for k in self.dirstate._filecache:
2554 2556 try:
2555 2557 delattr(self.dirstate, k)
2556 2558 except AttributeError:
2557 2559 pass
2558 2560 delattr(self.unfiltered(), 'dirstate')
2559 2561
2560 2562 def invalidate(self, clearfilecache=False):
2561 2563 '''Invalidates both store and non-store parts other than dirstate
2562 2564
2563 2565 If a transaction is running, invalidation of store is omitted,
2564 2566 because discarding in-memory changes might cause inconsistency
2565 2567 (e.g. incomplete fncache causes unintentional failure, but
2566 2568 redundant one doesn't).
2567 2569 '''
2568 2570 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2569 2571 for k in list(self._filecache.keys()):
2570 2572 # dirstate is invalidated separately in invalidatedirstate()
2571 2573 if k == b'dirstate':
2572 2574 continue
2573 2575 if (
2574 2576 k == b'changelog'
2575 2577 and self.currenttransaction()
2576 2578 and self.changelog._delayed
2577 2579 ):
2578 2580 # The changelog object may store unwritten revisions. We don't
2579 2581 # want to lose them.
2580 2582 # TODO: Solve the problem instead of working around it.
2581 2583 continue
2582 2584
2583 2585 if clearfilecache:
2584 2586 del self._filecache[k]
2585 2587 try:
2586 2588 delattr(unfiltered, k)
2587 2589 except AttributeError:
2588 2590 pass
2589 2591 self.invalidatecaches()
2590 2592 if not self.currenttransaction():
2591 2593 # TODO: Changing contents of store outside transaction
2592 2594 # causes inconsistency. We should make in-memory store
2593 2595 # changes detectable, and abort if changed.
2594 2596 self.store.invalidatecaches()
2595 2597
2596 2598 def invalidateall(self):
2597 2599 '''Fully invalidates both store and non-store parts, causing the
2598 2600 subsequent operation to reread any outside changes.'''
2599 2601 # extension should hook this to invalidate its caches
2600 2602 self.invalidate()
2601 2603 self.invalidatedirstate()
2602 2604
2603 2605 @unfilteredmethod
2604 2606 def _refreshfilecachestats(self, tr):
2605 2607 """Reload stats of cached files so that they are flagged as valid"""
2606 2608 for k, ce in self._filecache.items():
2607 2609 k = pycompat.sysstr(k)
2608 2610 if k == 'dirstate' or k not in self.__dict__:
2609 2611 continue
2610 2612 ce.refresh()
2611 2613
2612 2614 def _lock(
2613 2615 self,
2614 2616 vfs,
2615 2617 lockname,
2616 2618 wait,
2617 2619 releasefn,
2618 2620 acquirefn,
2619 2621 desc,
2620 2622 inheritchecker=None,
2621 2623 parentenvvar=None,
2622 2624 ):
2623 2625 parentlock = None
2624 2626 # the contents of parentenvvar are used by the underlying lock to
2625 2627 # determine whether it can be inherited
2626 2628 if parentenvvar is not None:
2627 2629 parentlock = encoding.environ.get(parentenvvar)
2628 2630
2629 2631 timeout = 0
2630 2632 warntimeout = 0
2631 2633 if wait:
2632 2634 timeout = self.ui.configint(b"ui", b"timeout")
2633 2635 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2634 2636 # internal config: ui.signal-safe-lock
2635 2637 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2636 2638
2637 2639 l = lockmod.trylock(
2638 2640 self.ui,
2639 2641 vfs,
2640 2642 lockname,
2641 2643 timeout,
2642 2644 warntimeout,
2643 2645 releasefn=releasefn,
2644 2646 acquirefn=acquirefn,
2645 2647 desc=desc,
2646 2648 inheritchecker=inheritchecker,
2647 2649 parentlock=parentlock,
2648 2650 signalsafe=signalsafe,
2649 2651 )
2650 2652 return l
2651 2653
2652 2654 def _afterlock(self, callback):
2653 2655 """add a callback to be run when the repository is fully unlocked
2654 2656
2655 2657 The callback will be executed when the outermost lock is released
2656 2658 (with wlock being higher level than 'lock')."""
2657 2659 for ref in (self._wlockref, self._lockref):
2658 2660 l = ref and ref()
2659 2661 if l and l.held:
2660 2662 l.postrelease.append(callback)
2661 2663 break
2662 2664 else: # no lock have been found.
2663 2665 callback(True)
2664 2666
2665 2667 def lock(self, wait=True):
2666 2668 '''Lock the repository store (.hg/store) and return a weak reference
2667 2669 to the lock. Use this before modifying the store (e.g. committing or
2668 2670 stripping). If you are opening a transaction, get a lock as well.)
2669 2671
2670 2672 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2671 2673 'wlock' first to avoid a dead-lock hazard.'''
2672 2674 l = self._currentlock(self._lockref)
2673 2675 if l is not None:
2674 2676 l.lock()
2675 2677 return l
2676 2678
2677 2679 l = self._lock(
2678 2680 vfs=self.svfs,
2679 2681 lockname=b"lock",
2680 2682 wait=wait,
2681 2683 releasefn=None,
2682 2684 acquirefn=self.invalidate,
2683 2685 desc=_(b'repository %s') % self.origroot,
2684 2686 )
2685 2687 self._lockref = weakref.ref(l)
2686 2688 return l
2687 2689
2688 2690 def _wlockchecktransaction(self):
2689 2691 if self.currenttransaction() is not None:
2690 2692 raise error.LockInheritanceContractViolation(
2691 2693 b'wlock cannot be inherited in the middle of a transaction'
2692 2694 )
2693 2695
2694 2696 def wlock(self, wait=True):
2695 2697 '''Lock the non-store parts of the repository (everything under
2696 2698 .hg except .hg/store) and return a weak reference to the lock.
2697 2699
2698 2700 Use this before modifying files in .hg.
2699 2701
2700 2702 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2701 2703 'wlock' first to avoid a dead-lock hazard.'''
2702 2704 l = self._wlockref and self._wlockref()
2703 2705 if l is not None and l.held:
2704 2706 l.lock()
2705 2707 return l
2706 2708
2707 2709 # We do not need to check for non-waiting lock acquisition. Such
2708 2710 # acquisition would not cause dead-lock as they would just fail.
2709 2711 if wait and (
2710 2712 self.ui.configbool(b'devel', b'all-warnings')
2711 2713 or self.ui.configbool(b'devel', b'check-locks')
2712 2714 ):
2713 2715 if self._currentlock(self._lockref) is not None:
2714 2716 self.ui.develwarn(b'"wlock" acquired after "lock"')
2715 2717
2716 2718 def unlock():
2717 2719 if self.dirstate.pendingparentchange():
2718 2720 self.dirstate.invalidate()
2719 2721 else:
2720 2722 self.dirstate.write(None)
2721 2723
2722 2724 self._filecache[b'dirstate'].refresh()
2723 2725
2724 2726 l = self._lock(
2725 2727 self.vfs,
2726 2728 b"wlock",
2727 2729 wait,
2728 2730 unlock,
2729 2731 self.invalidatedirstate,
2730 2732 _(b'working directory of %s') % self.origroot,
2731 2733 inheritchecker=self._wlockchecktransaction,
2732 2734 parentenvvar=b'HG_WLOCK_LOCKER',
2733 2735 )
2734 2736 self._wlockref = weakref.ref(l)
2735 2737 return l
2736 2738
2737 2739 def _currentlock(self, lockref):
2738 2740 """Returns the lock if it's held, or None if it's not."""
2739 2741 if lockref is None:
2740 2742 return None
2741 2743 l = lockref()
2742 2744 if l is None or not l.held:
2743 2745 return None
2744 2746 return l
2745 2747
2746 2748 def currentwlock(self):
2747 2749 """Returns the wlock if it's held, or None if it's not."""
2748 2750 return self._currentlock(self._wlockref)
2749 2751
2750 2752 def _filecommit(
2751 2753 self,
2752 2754 fctx,
2753 2755 manifest1,
2754 2756 manifest2,
2755 2757 linkrev,
2756 2758 tr,
2757 2759 changelist,
2758 2760 includecopymeta,
2759 2761 ):
2760 2762 """
2761 2763 commit an individual file as part of a larger transaction
2762 2764 """
2763 2765
2764 2766 fname = fctx.path()
2765 2767 fparent1 = manifest1.get(fname, nullid)
2766 2768 fparent2 = manifest2.get(fname, nullid)
2767 2769 if isinstance(fctx, context.filectx):
2768 2770 node = fctx.filenode()
2769 2771 if node in [fparent1, fparent2]:
2770 2772 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2771 2773 if (
2772 2774 fparent1 != nullid
2773 2775 and manifest1.flags(fname) != fctx.flags()
2774 2776 ) or (
2775 2777 fparent2 != nullid
2776 2778 and manifest2.flags(fname) != fctx.flags()
2777 2779 ):
2778 2780 changelist.append(fname)
2779 2781 return node
2780 2782
2781 2783 flog = self.file(fname)
2782 2784 meta = {}
2783 2785 cfname = fctx.copysource()
2784 2786 if cfname and cfname != fname:
2785 2787 # Mark the new revision of this file as a copy of another
2786 2788 # file. This copy data will effectively act as a parent
2787 2789 # of this new revision. If this is a merge, the first
2788 2790 # parent will be the nullid (meaning "look up the copy data")
2789 2791 # and the second one will be the other parent. For example:
2790 2792 #
2791 2793 # 0 --- 1 --- 3 rev1 changes file foo
2792 2794 # \ / rev2 renames foo to bar and changes it
2793 2795 # \- 2 -/ rev3 should have bar with all changes and
2794 2796 # should record that bar descends from
2795 2797 # bar in rev2 and foo in rev1
2796 2798 #
2797 2799 # this allows this merge to succeed:
2798 2800 #
2799 2801 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2800 2802 # \ / merging rev3 and rev4 should use bar@rev2
2801 2803 # \- 2 --- 4 as the merge base
2802 2804 #
2803 2805
2804 2806 cnode = manifest1.get(cfname)
2805 2807 newfparent = fparent2
2806 2808
2807 2809 if manifest2: # branch merge
2808 2810 if fparent2 == nullid or cnode is None: # copied on remote side
2809 2811 if cfname in manifest2:
2810 2812 cnode = manifest2[cfname]
2811 2813 newfparent = fparent1
2812 2814
2813 2815 # Here, we used to search backwards through history to try to find
2814 2816 # where the file copy came from if the source of a copy was not in
2815 2817 # the parent directory. However, this doesn't actually make sense to
2816 2818 # do (what does a copy from something not in your working copy even
2817 2819 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2818 2820 # the user that copy information was dropped, so if they didn't
2819 2821 # expect this outcome it can be fixed, but this is the correct
2820 2822 # behavior in this circumstance.
2821 2823
2822 2824 if cnode:
2823 2825 self.ui.debug(
2824 2826 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2825 2827 )
2826 2828 if includecopymeta:
2827 2829 meta[b"copy"] = cfname
2828 2830 meta[b"copyrev"] = hex(cnode)
2829 2831 fparent1, fparent2 = nullid, newfparent
2830 2832 else:
2831 2833 self.ui.warn(
2832 2834 _(
2833 2835 b"warning: can't find ancestor for '%s' "
2834 2836 b"copied from '%s'!\n"
2835 2837 )
2836 2838 % (fname, cfname)
2837 2839 )
2838 2840
2839 2841 elif fparent1 == nullid:
2840 2842 fparent1, fparent2 = fparent2, nullid
2841 2843 elif fparent2 != nullid:
2842 2844 # is one parent an ancestor of the other?
2843 2845 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2844 2846 if fparent1 in fparentancestors:
2845 2847 fparent1, fparent2 = fparent2, nullid
2846 2848 elif fparent2 in fparentancestors:
2847 2849 fparent2 = nullid
2848 2850
2849 2851 # is the file changed?
2850 2852 text = fctx.data()
2851 2853 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2852 2854 changelist.append(fname)
2853 2855 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2854 2856 # are just the flags changed during merge?
2855 2857 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2856 2858 changelist.append(fname)
2857 2859
2858 2860 return fparent1
2859 2861
2860 2862 def checkcommitpatterns(self, wctx, match, status, fail):
2861 2863 """check for commit arguments that aren't committable"""
2862 2864 if match.isexact() or match.prefix():
2863 2865 matched = set(status.modified + status.added + status.removed)
2864 2866
2865 2867 for f in match.files():
2866 2868 f = self.dirstate.normalize(f)
2867 2869 if f == b'.' or f in matched or f in wctx.substate:
2868 2870 continue
2869 2871 if f in status.deleted:
2870 2872 fail(f, _(b'file not found!'))
2871 2873 # Is it a directory that exists or used to exist?
2872 2874 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2873 2875 d = f + b'/'
2874 2876 for mf in matched:
2875 2877 if mf.startswith(d):
2876 2878 break
2877 2879 else:
2878 2880 fail(f, _(b"no match under directory!"))
2879 2881 elif f not in self.dirstate:
2880 2882 fail(f, _(b"file not tracked!"))
2881 2883
2882 2884 @unfilteredmethod
2883 2885 def commit(
2884 2886 self,
2885 2887 text=b"",
2886 2888 user=None,
2887 2889 date=None,
2888 2890 match=None,
2889 2891 force=False,
2890 2892 editor=None,
2891 2893 extra=None,
2892 2894 ):
2893 2895 """Add a new revision to current repository.
2894 2896
2895 2897 Revision information is gathered from the working directory,
2896 2898 match can be used to filter the committed files. If editor is
2897 2899 supplied, it is called to get a commit message.
2898 2900 """
2899 2901 if extra is None:
2900 2902 extra = {}
2901 2903
2902 2904 def fail(f, msg):
2903 2905 raise error.Abort(b'%s: %s' % (f, msg))
2904 2906
2905 2907 if not match:
2906 2908 match = matchmod.always()
2907 2909
2908 2910 if not force:
2909 2911 match.bad = fail
2910 2912
2911 2913 # lock() for recent changelog (see issue4368)
2912 2914 with self.wlock(), self.lock():
2913 2915 wctx = self[None]
2914 2916 merge = len(wctx.parents()) > 1
2915 2917
2916 2918 if not force and merge and not match.always():
2917 2919 raise error.Abort(
2918 2920 _(
2919 2921 b'cannot partially commit a merge '
2920 2922 b'(do not specify files or patterns)'
2921 2923 )
2922 2924 )
2923 2925
2924 2926 status = self.status(match=match, clean=force)
2925 2927 if force:
2926 2928 status.modified.extend(
2927 2929 status.clean
2928 2930 ) # mq may commit clean files
2929 2931
2930 2932 # check subrepos
2931 2933 subs, commitsubs, newstate = subrepoutil.precommit(
2932 2934 self.ui, wctx, status, match, force=force
2933 2935 )
2934 2936
2935 2937 # make sure all explicit patterns are matched
2936 2938 if not force:
2937 2939 self.checkcommitpatterns(wctx, match, status, fail)
2938 2940
2939 2941 cctx = context.workingcommitctx(
2940 2942 self, status, text, user, date, extra
2941 2943 )
2942 2944
2943 2945 # internal config: ui.allowemptycommit
2944 2946 allowemptycommit = (
2945 2947 wctx.branch() != wctx.p1().branch()
2946 2948 or extra.get(b'close')
2947 2949 or merge
2948 2950 or cctx.files()
2949 2951 or self.ui.configbool(b'ui', b'allowemptycommit')
2950 2952 )
2951 2953 if not allowemptycommit:
2952 2954 return None
2953 2955
2954 2956 if merge and cctx.deleted():
2955 2957 raise error.Abort(_(b"cannot commit merge with missing files"))
2956 2958
2957 2959 ms = mergemod.mergestate.read(self)
2958 2960 mergeutil.checkunresolved(ms)
2959 2961
2960 2962 if editor:
2961 2963 cctx._text = editor(self, cctx, subs)
2962 2964 edited = text != cctx._text
2963 2965
2964 2966 # Save commit message in case this transaction gets rolled back
2965 2967 # (e.g. by a pretxncommit hook). Leave the content alone on
2966 2968 # the assumption that the user will use the same editor again.
2967 2969 msgfn = self.savecommitmessage(cctx._text)
2968 2970
2969 2971 # commit subs and write new state
2970 2972 if subs:
2971 2973 uipathfn = scmutil.getuipathfn(self)
2972 2974 for s in sorted(commitsubs):
2973 2975 sub = wctx.sub(s)
2974 2976 self.ui.status(
2975 2977 _(b'committing subrepository %s\n')
2976 2978 % uipathfn(subrepoutil.subrelpath(sub))
2977 2979 )
2978 2980 sr = sub.commit(cctx._text, user, date)
2979 2981 newstate[s] = (newstate[s][0], sr)
2980 2982 subrepoutil.writestate(self, newstate)
2981 2983
2982 2984 p1, p2 = self.dirstate.parents()
2983 2985 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2984 2986 try:
2985 2987 self.hook(
2986 2988 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2987 2989 )
2988 2990 with self.transaction(b'commit'):
2989 2991 ret = self.commitctx(cctx, True)
2990 2992 # update bookmarks, dirstate and mergestate
2991 2993 bookmarks.update(self, [p1, p2], ret)
2992 2994 cctx.markcommitted(ret)
2993 2995 ms.reset()
2994 2996 except: # re-raises
2995 2997 if edited:
2996 2998 self.ui.write(
2997 2999 _(b'note: commit message saved in %s\n') % msgfn
2998 3000 )
2999 3001 raise
3000 3002
3001 3003 def commithook(unused_success):
3002 3004 # hack for command that use a temporary commit (eg: histedit)
3003 3005 # temporary commit got stripped before hook release
3004 3006 if self.changelog.hasnode(ret):
3005 3007 self.hook(
3006 3008 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3007 3009 )
3008 3010
3009 3011 self._afterlock(commithook)
3010 3012 return ret
3011 3013
3012 3014 @unfilteredmethod
3013 3015 def commitctx(self, ctx, error=False, origctx=None):
3014 3016 """Add a new revision to current repository.
3015 3017 Revision information is passed via the context argument.
3016 3018
3017 3019 ctx.files() should list all files involved in this commit, i.e.
3018 3020 modified/added/removed files. On merge, it may be wider than the
3019 3021 ctx.files() to be committed, since any file nodes derived directly
3020 3022 from p1 or p2 are excluded from the committed ctx.files().
3021 3023
3022 3024 origctx is for convert to work around the problem that bug
3023 3025 fixes to the files list in changesets change hashes. For
3024 3026 convert to be the identity, it can pass an origctx and this
3025 3027 function will use the same files list when it makes sense to
3026 3028 do so.
3027 3029 """
3028 3030
3029 3031 p1, p2 = ctx.p1(), ctx.p2()
3030 3032 user = ctx.user()
3031 3033
3032 3034 if self.filecopiesmode == b'changeset-sidedata':
3033 3035 writechangesetcopy = True
3034 3036 writefilecopymeta = True
3035 3037 writecopiesto = None
3036 3038 else:
3037 3039 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3038 3040 writefilecopymeta = writecopiesto != b'changeset-only'
3039 3041 writechangesetcopy = writecopiesto in (
3040 3042 b'changeset-only',
3041 3043 b'compatibility',
3042 3044 )
3043 3045 p1copies, p2copies = None, None
3044 3046 if writechangesetcopy:
3045 3047 p1copies = ctx.p1copies()
3046 3048 p2copies = ctx.p2copies()
3047 3049 filesadded, filesremoved = None, None
3048 3050 with self.lock(), self.transaction(b"commit") as tr:
3049 3051 trp = weakref.proxy(tr)
3050 3052
3051 3053 if ctx.manifestnode():
3052 3054 # reuse an existing manifest revision
3053 3055 self.ui.debug(b'reusing known manifest\n')
3054 3056 mn = ctx.manifestnode()
3055 3057 files = ctx.files()
3056 3058 if writechangesetcopy:
3057 3059 filesadded = ctx.filesadded()
3058 3060 filesremoved = ctx.filesremoved()
3059 3061 elif ctx.files():
3060 3062 m1ctx = p1.manifestctx()
3061 3063 m2ctx = p2.manifestctx()
3062 3064 mctx = m1ctx.copy()
3063 3065
3064 3066 m = mctx.read()
3065 3067 m1 = m1ctx.read()
3066 3068 m2 = m2ctx.read()
3067 3069
3068 3070 # check in files
3069 3071 added = []
3070 3072 changed = []
3071 3073 removed = list(ctx.removed())
3072 3074 linkrev = len(self)
3073 3075 self.ui.note(_(b"committing files:\n"))
3074 3076 uipathfn = scmutil.getuipathfn(self)
3075 3077 for f in sorted(ctx.modified() + ctx.added()):
3076 3078 self.ui.note(uipathfn(f) + b"\n")
3077 3079 try:
3078 3080 fctx = ctx[f]
3079 3081 if fctx is None:
3080 3082 removed.append(f)
3081 3083 else:
3082 3084 added.append(f)
3083 3085 m[f] = self._filecommit(
3084 3086 fctx,
3085 3087 m1,
3086 3088 m2,
3087 3089 linkrev,
3088 3090 trp,
3089 3091 changed,
3090 3092 writefilecopymeta,
3091 3093 )
3092 3094 m.setflag(f, fctx.flags())
3093 3095 except OSError:
3094 3096 self.ui.warn(
3095 3097 _(b"trouble committing %s!\n") % uipathfn(f)
3096 3098 )
3097 3099 raise
3098 3100 except IOError as inst:
3099 3101 errcode = getattr(inst, 'errno', errno.ENOENT)
3100 3102 if error or errcode and errcode != errno.ENOENT:
3101 3103 self.ui.warn(
3102 3104 _(b"trouble committing %s!\n") % uipathfn(f)
3103 3105 )
3104 3106 raise
3105 3107
3106 3108 # update manifest
3107 3109 removed = [f for f in removed if f in m1 or f in m2]
3108 3110 drop = sorted([f for f in removed if f in m])
3109 3111 for f in drop:
3110 3112 del m[f]
3111 3113 if p2.rev() != nullrev:
3112 3114
3113 3115 @util.cachefunc
3114 3116 def mas():
3115 3117 p1n = p1.node()
3116 3118 p2n = p2.node()
3117 3119 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3118 3120 if not cahs:
3119 3121 cahs = [nullrev]
3120 3122 return [self[r].manifest() for r in cahs]
3121 3123
3122 3124 def deletionfromparent(f):
3123 3125 # When a file is removed relative to p1 in a merge, this
3124 3126 # function determines whether the absence is due to a
3125 3127 # deletion from a parent, or whether the merge commit
3126 3128 # itself deletes the file. We decide this by doing a
3127 3129 # simplified three way merge of the manifest entry for
3128 3130 # the file. There are two ways we decide the merge
3129 3131 # itself didn't delete a file:
3130 3132 # - neither parent (nor the merge) contain the file
3131 3133 # - exactly one parent contains the file, and that
3132 3134 # parent has the same filelog entry as the merge
3133 3135 # ancestor (or all of them if there two). In other
3134 3136 # words, that parent left the file unchanged while the
3135 3137 # other one deleted it.
3136 3138 # One way to think about this is that deleting a file is
3137 3139 # similar to emptying it, so the list of changed files
3138 3140 # should be similar either way. The computation
3139 3141 # described above is not done directly in _filecommit
3140 3142 # when creating the list of changed files, however
3141 3143 # it does something very similar by comparing filelog
3142 3144 # nodes.
3143 3145 if f in m1:
3144 3146 return f not in m2 and all(
3145 3147 f in ma and ma.find(f) == m1.find(f)
3146 3148 for ma in mas()
3147 3149 )
3148 3150 elif f in m2:
3149 3151 return all(
3150 3152 f in ma and ma.find(f) == m2.find(f)
3151 3153 for ma in mas()
3152 3154 )
3153 3155 else:
3154 3156 return True
3155 3157
3156 3158 removed = [f for f in removed if not deletionfromparent(f)]
3157 3159
3158 3160 files = changed + removed
3159 3161 md = None
3160 3162 if not files:
3161 3163 # if no "files" actually changed in terms of the changelog,
3162 3164 # try hard to detect unmodified manifest entry so that the
3163 3165 # exact same commit can be reproduced later on convert.
3164 3166 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3165 3167 if not files and md:
3166 3168 self.ui.debug(
3167 3169 b'not reusing manifest (no file change in '
3168 3170 b'changelog, but manifest differs)\n'
3169 3171 )
3170 3172 if files or md:
3171 3173 self.ui.note(_(b"committing manifest\n"))
3172 3174 # we're using narrowmatch here since it's already applied at
3173 3175 # other stages (such as dirstate.walk), so we're already
3174 3176 # ignoring things outside of narrowspec in most cases. The
3175 3177 # one case where we might have files outside the narrowspec
3176 3178 # at this point is merges, and we already error out in the
3177 3179 # case where the merge has files outside of the narrowspec,
3178 3180 # so this is safe.
3179 3181 mn = mctx.write(
3180 3182 trp,
3181 3183 linkrev,
3182 3184 p1.manifestnode(),
3183 3185 p2.manifestnode(),
3184 3186 added,
3185 3187 drop,
3186 3188 match=self.narrowmatch(),
3187 3189 )
3188 3190
3189 3191 if writechangesetcopy:
3190 3192 filesadded = [
3191 3193 f for f in changed if not (f in m1 or f in m2)
3192 3194 ]
3193 3195 filesremoved = removed
3194 3196 else:
3195 3197 self.ui.debug(
3196 3198 b'reusing manifest from p1 (listed files '
3197 3199 b'actually unchanged)\n'
3198 3200 )
3199 3201 mn = p1.manifestnode()
3200 3202 else:
3201 3203 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3202 3204 mn = p1.manifestnode()
3203 3205 files = []
3204 3206
3205 3207 if writecopiesto == b'changeset-only':
3206 3208 # If writing only to changeset extras, use None to indicate that
3207 3209 # no entry should be written. If writing to both, write an empty
3208 3210 # entry to prevent the reader from falling back to reading
3209 3211 # filelogs.
3210 3212 p1copies = p1copies or None
3211 3213 p2copies = p2copies or None
3212 3214 filesadded = filesadded or None
3213 3215 filesremoved = filesremoved or None
3214 3216
3215 3217 if origctx and origctx.manifestnode() == mn:
3216 3218 files = origctx.files()
3217 3219
3218 3220 # update changelog
3219 3221 self.ui.note(_(b"committing changelog\n"))
3220 3222 self.changelog.delayupdate(tr)
3221 3223 n = self.changelog.add(
3222 3224 mn,
3223 3225 files,
3224 3226 ctx.description(),
3225 3227 trp,
3226 3228 p1.node(),
3227 3229 p2.node(),
3228 3230 user,
3229 3231 ctx.date(),
3230 3232 ctx.extra().copy(),
3231 3233 p1copies,
3232 3234 p2copies,
3233 3235 filesadded,
3234 3236 filesremoved,
3235 3237 )
3236 3238 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3237 3239 self.hook(
3238 3240 b'pretxncommit',
3239 3241 throw=True,
3240 3242 node=hex(n),
3241 3243 parent1=xp1,
3242 3244 parent2=xp2,
3243 3245 )
3244 3246 # set the new commit is proper phase
3245 3247 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3246 3248 if targetphase:
3247 3249 # retract boundary do not alter parent changeset.
3248 3250 # if a parent have higher the resulting phase will
3249 3251 # be compliant anyway
3250 3252 #
3251 3253 # if minimal phase was 0 we don't need to retract anything
3252 3254 phases.registernew(self, tr, targetphase, [n])
3253 3255 return n
3254 3256
3255 3257 @unfilteredmethod
3256 3258 def destroying(self):
3257 3259 '''Inform the repository that nodes are about to be destroyed.
3258 3260 Intended for use by strip and rollback, so there's a common
3259 3261 place for anything that has to be done before destroying history.
3260 3262
3261 3263 This is mostly useful for saving state that is in memory and waiting
3262 3264 to be flushed when the current lock is released. Because a call to
3263 3265 destroyed is imminent, the repo will be invalidated causing those
3264 3266 changes to stay in memory (waiting for the next unlock), or vanish
3265 3267 completely.
3266 3268 '''
3267 3269 # When using the same lock to commit and strip, the phasecache is left
3268 3270 # dirty after committing. Then when we strip, the repo is invalidated,
3269 3271 # causing those changes to disappear.
3270 3272 if '_phasecache' in vars(self):
3271 3273 self._phasecache.write()
3272 3274
3273 3275 @unfilteredmethod
3274 3276 def destroyed(self):
3275 3277 '''Inform the repository that nodes have been destroyed.
3276 3278 Intended for use by strip and rollback, so there's a common
3277 3279 place for anything that has to be done after destroying history.
3278 3280 '''
3279 3281 # When one tries to:
3280 3282 # 1) destroy nodes thus calling this method (e.g. strip)
3281 3283 # 2) use phasecache somewhere (e.g. commit)
3282 3284 #
3283 3285 # then 2) will fail because the phasecache contains nodes that were
3284 3286 # removed. We can either remove phasecache from the filecache,
3285 3287 # causing it to reload next time it is accessed, or simply filter
3286 3288 # the removed nodes now and write the updated cache.
3287 3289 self._phasecache.filterunknown(self)
3288 3290 self._phasecache.write()
3289 3291
3290 3292 # refresh all repository caches
3291 3293 self.updatecaches()
3292 3294
3293 3295 # Ensure the persistent tag cache is updated. Doing it now
3294 3296 # means that the tag cache only has to worry about destroyed
3295 3297 # heads immediately after a strip/rollback. That in turn
3296 3298 # guarantees that "cachetip == currenttip" (comparing both rev
3297 3299 # and node) always means no nodes have been added or destroyed.
3298 3300
3299 3301 # XXX this is suboptimal when qrefresh'ing: we strip the current
3300 3302 # head, refresh the tag cache, then immediately add a new head.
3301 3303 # But I think doing it this way is necessary for the "instant
3302 3304 # tag cache retrieval" case to work.
3303 3305 self.invalidate()
3304 3306
3305 3307 def status(
3306 3308 self,
3307 3309 node1=b'.',
3308 3310 node2=None,
3309 3311 match=None,
3310 3312 ignored=False,
3311 3313 clean=False,
3312 3314 unknown=False,
3313 3315 listsubrepos=False,
3314 3316 ):
3315 3317 '''a convenience method that calls node1.status(node2)'''
3316 3318 return self[node1].status(
3317 3319 node2, match, ignored, clean, unknown, listsubrepos
3318 3320 )
3319 3321
3320 3322 def addpostdsstatus(self, ps):
3321 3323 """Add a callback to run within the wlock, at the point at which status
3322 3324 fixups happen.
3323 3325
3324 3326 On status completion, callback(wctx, status) will be called with the
3325 3327 wlock held, unless the dirstate has changed from underneath or the wlock
3326 3328 couldn't be grabbed.
3327 3329
3328 3330 Callbacks should not capture and use a cached copy of the dirstate --
3329 3331 it might change in the meanwhile. Instead, they should access the
3330 3332 dirstate via wctx.repo().dirstate.
3331 3333
3332 3334 This list is emptied out after each status run -- extensions should
3333 3335 make sure it adds to this list each time dirstate.status is called.
3334 3336 Extensions should also make sure they don't call this for statuses
3335 3337 that don't involve the dirstate.
3336 3338 """
3337 3339
3338 3340 # The list is located here for uniqueness reasons -- it is actually
3339 3341 # managed by the workingctx, but that isn't unique per-repo.
3340 3342 self._postdsstatus.append(ps)
3341 3343
3342 3344 def postdsstatus(self):
3343 3345 """Used by workingctx to get the list of post-dirstate-status hooks."""
3344 3346 return self._postdsstatus
3345 3347
3346 3348 def clearpostdsstatus(self):
3347 3349 """Used by workingctx to clear post-dirstate-status hooks."""
3348 3350 del self._postdsstatus[:]
3349 3351
3350 3352 def heads(self, start=None):
3351 3353 if start is None:
3352 3354 cl = self.changelog
3353 3355 headrevs = reversed(cl.headrevs())
3354 3356 return [cl.node(rev) for rev in headrevs]
3355 3357
3356 3358 heads = self.changelog.heads(start)
3357 3359 # sort the output in rev descending order
3358 3360 return sorted(heads, key=self.changelog.rev, reverse=True)
3359 3361
3360 3362 def branchheads(self, branch=None, start=None, closed=False):
3361 3363 '''return a (possibly filtered) list of heads for the given branch
3362 3364
3363 3365 Heads are returned in topological order, from newest to oldest.
3364 3366 If branch is None, use the dirstate branch.
3365 3367 If start is not None, return only heads reachable from start.
3366 3368 If closed is True, return heads that are marked as closed as well.
3367 3369 '''
3368 3370 if branch is None:
3369 3371 branch = self[None].branch()
3370 3372 branches = self.branchmap()
3371 3373 if not branches.hasbranch(branch):
3372 3374 return []
3373 3375 # the cache returns heads ordered lowest to highest
3374 3376 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3375 3377 if start is not None:
3376 3378 # filter out the heads that cannot be reached from startrev
3377 3379 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3378 3380 bheads = [h for h in bheads if h in fbheads]
3379 3381 return bheads
3380 3382
3381 3383 def branches(self, nodes):
3382 3384 if not nodes:
3383 3385 nodes = [self.changelog.tip()]
3384 3386 b = []
3385 3387 for n in nodes:
3386 3388 t = n
3387 3389 while True:
3388 3390 p = self.changelog.parents(n)
3389 3391 if p[1] != nullid or p[0] == nullid:
3390 3392 b.append((t, n, p[0], p[1]))
3391 3393 break
3392 3394 n = p[0]
3393 3395 return b
3394 3396
3395 3397 def between(self, pairs):
3396 3398 r = []
3397 3399
3398 3400 for top, bottom in pairs:
3399 3401 n, l, i = top, [], 0
3400 3402 f = 1
3401 3403
3402 3404 while n != bottom and n != nullid:
3403 3405 p = self.changelog.parents(n)[0]
3404 3406 if i == f:
3405 3407 l.append(n)
3406 3408 f = f * 2
3407 3409 n = p
3408 3410 i += 1
3409 3411
3410 3412 r.append(l)
3411 3413
3412 3414 return r
3413 3415
3414 3416 def checkpush(self, pushop):
3415 3417 """Extensions can override this function if additional checks have
3416 3418 to be performed before pushing, or call it if they override push
3417 3419 command.
3418 3420 """
3419 3421
3420 3422 @unfilteredpropertycache
3421 3423 def prepushoutgoinghooks(self):
3422 3424 """Return util.hooks consists of a pushop with repo, remote, outgoing
3423 3425 methods, which are called before pushing changesets.
3424 3426 """
3425 3427 return util.hooks()
3426 3428
3427 3429 def pushkey(self, namespace, key, old, new):
3428 3430 try:
3429 3431 tr = self.currenttransaction()
3430 3432 hookargs = {}
3431 3433 if tr is not None:
3432 3434 hookargs.update(tr.hookargs)
3433 3435 hookargs = pycompat.strkwargs(hookargs)
3434 3436 hookargs['namespace'] = namespace
3435 3437 hookargs['key'] = key
3436 3438 hookargs['old'] = old
3437 3439 hookargs['new'] = new
3438 3440 self.hook(b'prepushkey', throw=True, **hookargs)
3439 3441 except error.HookAbort as exc:
3440 3442 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3441 3443 if exc.hint:
3442 3444 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3443 3445 return False
3444 3446 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3445 3447 ret = pushkey.push(self, namespace, key, old, new)
3446 3448
3447 3449 def runhook(unused_success):
3448 3450 self.hook(
3449 3451 b'pushkey',
3450 3452 namespace=namespace,
3451 3453 key=key,
3452 3454 old=old,
3453 3455 new=new,
3454 3456 ret=ret,
3455 3457 )
3456 3458
3457 3459 self._afterlock(runhook)
3458 3460 return ret
3459 3461
3460 3462 def listkeys(self, namespace):
3461 3463 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3462 3464 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3463 3465 values = pushkey.list(self, namespace)
3464 3466 self.hook(b'listkeys', namespace=namespace, values=values)
3465 3467 return values
3466 3468
3467 3469 def debugwireargs(self, one, two, three=None, four=None, five=None):
3468 3470 '''used to test argument passing over the wire'''
3469 3471 return b"%s %s %s %s %s" % (
3470 3472 one,
3471 3473 two,
3472 3474 pycompat.bytestr(three),
3473 3475 pycompat.bytestr(four),
3474 3476 pycompat.bytestr(five),
3475 3477 )
3476 3478
3477 3479 def savecommitmessage(self, text):
3478 3480 fp = self.vfs(b'last-message.txt', b'wb')
3479 3481 try:
3480 3482 fp.write(text)
3481 3483 finally:
3482 3484 fp.close()
3483 3485 return self.pathto(fp.name[len(self.root) + 1 :])
3484 3486
3485 3487
3486 3488 # used to avoid circular references so destructors work
3487 3489 def aftertrans(files):
3488 3490 renamefiles = [tuple(t) for t in files]
3489 3491
3490 3492 def a():
3491 3493 for vfs, src, dest in renamefiles:
3492 3494 # if src and dest refer to a same file, vfs.rename is a no-op,
3493 3495 # leaving both src and dest on disk. delete dest to make sure
3494 3496 # the rename couldn't be such a no-op.
3495 3497 vfs.tryunlink(dest)
3496 3498 try:
3497 3499 vfs.rename(src, dest)
3498 3500 except OSError: # journal file does not yet exist
3499 3501 pass
3500 3502
3501 3503 return a
3502 3504
3503 3505
3504 3506 def undoname(fn):
3505 3507 base, name = os.path.split(fn)
3506 3508 assert name.startswith(b'journal')
3507 3509 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3508 3510
3509 3511
3510 3512 def instance(ui, path, create, intents=None, createopts=None):
3511 3513 localpath = util.urllocalpath(path)
3512 3514 if create:
3513 3515 createrepository(ui, localpath, createopts=createopts)
3514 3516
3515 3517 return makelocalrepository(ui, localpath, intents=intents)
3516 3518
3517 3519
3518 3520 def islocal(path):
3519 3521 return True
3520 3522
3521 3523
3522 3524 def defaultcreateopts(ui, createopts=None):
3523 3525 """Populate the default creation options for a repository.
3524 3526
3525 3527 A dictionary of explicitly requested creation options can be passed
3526 3528 in. Missing keys will be populated.
3527 3529 """
3528 3530 createopts = dict(createopts or {})
3529 3531
3530 3532 if b'backend' not in createopts:
3531 3533 # experimental config: storage.new-repo-backend
3532 3534 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3533 3535
3534 3536 return createopts
3535 3537
3536 3538
3537 3539 def newreporequirements(ui, createopts):
3538 3540 """Determine the set of requirements for a new local repository.
3539 3541
3540 3542 Extensions can wrap this function to specify custom requirements for
3541 3543 new repositories.
3542 3544 """
3543 3545 # If the repo is being created from a shared repository, we copy
3544 3546 # its requirements.
3545 3547 if b'sharedrepo' in createopts:
3546 3548 requirements = set(createopts[b'sharedrepo'].requirements)
3547 3549 if createopts.get(b'sharedrelative'):
3548 3550 requirements.add(b'relshared')
3549 3551 else:
3550 3552 requirements.add(b'shared')
3551 3553
3552 3554 return requirements
3553 3555
3554 3556 if b'backend' not in createopts:
3555 3557 raise error.ProgrammingError(
3556 3558 b'backend key not present in createopts; '
3557 3559 b'was defaultcreateopts() called?'
3558 3560 )
3559 3561
3560 3562 if createopts[b'backend'] != b'revlogv1':
3561 3563 raise error.Abort(
3562 3564 _(
3563 3565 b'unable to determine repository requirements for '
3564 3566 b'storage backend: %s'
3565 3567 )
3566 3568 % createopts[b'backend']
3567 3569 )
3568 3570
3569 3571 requirements = {b'revlogv1'}
3570 3572 if ui.configbool(b'format', b'usestore'):
3571 3573 requirements.add(b'store')
3572 3574 if ui.configbool(b'format', b'usefncache'):
3573 3575 requirements.add(b'fncache')
3574 3576 if ui.configbool(b'format', b'dotencode'):
3575 3577 requirements.add(b'dotencode')
3576 3578
3577 3579 compengine = ui.config(b'format', b'revlog-compression')
3578 3580 if compengine not in util.compengines:
3579 3581 raise error.Abort(
3580 3582 _(
3581 3583 b'compression engine %s defined by '
3582 3584 b'format.revlog-compression not available'
3583 3585 )
3584 3586 % compengine,
3585 3587 hint=_(
3586 3588 b'run "hg debuginstall" to list available '
3587 3589 b'compression engines'
3588 3590 ),
3589 3591 )
3590 3592
3591 3593 # zlib is the historical default and doesn't need an explicit requirement.
3592 3594 elif compengine == b'zstd':
3593 3595 requirements.add(b'revlog-compression-zstd')
3594 3596 elif compengine != b'zlib':
3595 3597 requirements.add(b'exp-compression-%s' % compengine)
3596 3598
3597 3599 if scmutil.gdinitconfig(ui):
3598 3600 requirements.add(b'generaldelta')
3599 3601 if ui.configbool(b'format', b'sparse-revlog'):
3600 3602 requirements.add(SPARSEREVLOG_REQUIREMENT)
3601 3603
3602 3604 # experimental config: format.exp-use-side-data
3603 3605 if ui.configbool(b'format', b'exp-use-side-data'):
3604 3606 requirements.add(SIDEDATA_REQUIREMENT)
3605 3607 # experimental config: format.exp-use-copies-side-data-changeset
3606 3608 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3607 3609 requirements.add(SIDEDATA_REQUIREMENT)
3608 3610 requirements.add(COPIESSDC_REQUIREMENT)
3609 3611 if ui.configbool(b'experimental', b'treemanifest'):
3610 3612 requirements.add(b'treemanifest')
3611 3613
3612 3614 revlogv2 = ui.config(b'experimental', b'revlogv2')
3613 3615 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3614 3616 requirements.remove(b'revlogv1')
3615 3617 # generaldelta is implied by revlogv2.
3616 3618 requirements.discard(b'generaldelta')
3617 3619 requirements.add(REVLOGV2_REQUIREMENT)
3618 3620 # experimental config: format.internal-phase
3619 3621 if ui.configbool(b'format', b'internal-phase'):
3620 3622 requirements.add(b'internal-phase')
3621 3623
3622 3624 if createopts.get(b'narrowfiles'):
3623 3625 requirements.add(repository.NARROW_REQUIREMENT)
3624 3626
3625 3627 if createopts.get(b'lfs'):
3626 3628 requirements.add(b'lfs')
3627 3629
3628 3630 if ui.configbool(b'format', b'bookmarks-in-store'):
3629 3631 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3630 3632
3631 3633 return requirements
3632 3634
3633 3635
3634 3636 def filterknowncreateopts(ui, createopts):
3635 3637 """Filters a dict of repo creation options against options that are known.
3636 3638
3637 3639 Receives a dict of repo creation options and returns a dict of those
3638 3640 options that we don't know how to handle.
3639 3641
3640 3642 This function is called as part of repository creation. If the
3641 3643 returned dict contains any items, repository creation will not
3642 3644 be allowed, as it means there was a request to create a repository
3643 3645 with options not recognized by loaded code.
3644 3646
3645 3647 Extensions can wrap this function to filter out creation options
3646 3648 they know how to handle.
3647 3649 """
3648 3650 known = {
3649 3651 b'backend',
3650 3652 b'lfs',
3651 3653 b'narrowfiles',
3652 3654 b'sharedrepo',
3653 3655 b'sharedrelative',
3654 3656 b'shareditems',
3655 3657 b'shallowfilestore',
3656 3658 }
3657 3659
3658 3660 return {k: v for k, v in createopts.items() if k not in known}
3659 3661
3660 3662
3661 3663 def createrepository(ui, path, createopts=None):
3662 3664 """Create a new repository in a vfs.
3663 3665
3664 3666 ``path`` path to the new repo's working directory.
3665 3667 ``createopts`` options for the new repository.
3666 3668
3667 3669 The following keys for ``createopts`` are recognized:
3668 3670
3669 3671 backend
3670 3672 The storage backend to use.
3671 3673 lfs
3672 3674 Repository will be created with ``lfs`` requirement. The lfs extension
3673 3675 will automatically be loaded when the repository is accessed.
3674 3676 narrowfiles
3675 3677 Set up repository to support narrow file storage.
3676 3678 sharedrepo
3677 3679 Repository object from which storage should be shared.
3678 3680 sharedrelative
3679 3681 Boolean indicating if the path to the shared repo should be
3680 3682 stored as relative. By default, the pointer to the "parent" repo
3681 3683 is stored as an absolute path.
3682 3684 shareditems
3683 3685 Set of items to share to the new repository (in addition to storage).
3684 3686 shallowfilestore
3685 3687 Indicates that storage for files should be shallow (not all ancestor
3686 3688 revisions are known).
3687 3689 """
3688 3690 createopts = defaultcreateopts(ui, createopts=createopts)
3689 3691
3690 3692 unknownopts = filterknowncreateopts(ui, createopts)
3691 3693
3692 3694 if not isinstance(unknownopts, dict):
3693 3695 raise error.ProgrammingError(
3694 3696 b'filterknowncreateopts() did not return a dict'
3695 3697 )
3696 3698
3697 3699 if unknownopts:
3698 3700 raise error.Abort(
3699 3701 _(
3700 3702 b'unable to create repository because of unknown '
3701 3703 b'creation option: %s'
3702 3704 )
3703 3705 % b', '.join(sorted(unknownopts)),
3704 3706 hint=_(b'is a required extension not loaded?'),
3705 3707 )
3706 3708
3707 3709 requirements = newreporequirements(ui, createopts=createopts)
3708 3710
3709 3711 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3710 3712
3711 3713 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3712 3714 if hgvfs.exists():
3713 3715 raise error.RepoError(_(b'repository %s already exists') % path)
3714 3716
3715 3717 if b'sharedrepo' in createopts:
3716 3718 sharedpath = createopts[b'sharedrepo'].sharedpath
3717 3719
3718 3720 if createopts.get(b'sharedrelative'):
3719 3721 try:
3720 3722 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3721 3723 except (IOError, ValueError) as e:
3722 3724 # ValueError is raised on Windows if the drive letters differ
3723 3725 # on each path.
3724 3726 raise error.Abort(
3725 3727 _(b'cannot calculate relative path'),
3726 3728 hint=stringutil.forcebytestr(e),
3727 3729 )
3728 3730
3729 3731 if not wdirvfs.exists():
3730 3732 wdirvfs.makedirs()
3731 3733
3732 3734 hgvfs.makedir(notindexed=True)
3733 3735 if b'sharedrepo' not in createopts:
3734 3736 hgvfs.mkdir(b'cache')
3735 3737 hgvfs.mkdir(b'wcache')
3736 3738
3737 3739 if b'store' in requirements and b'sharedrepo' not in createopts:
3738 3740 hgvfs.mkdir(b'store')
3739 3741
3740 3742 # We create an invalid changelog outside the store so very old
3741 3743 # Mercurial versions (which didn't know about the requirements
3742 3744 # file) encounter an error on reading the changelog. This
3743 3745 # effectively locks out old clients and prevents them from
3744 3746 # mucking with a repo in an unknown format.
3745 3747 #
3746 3748 # The revlog header has version 2, which won't be recognized by
3747 3749 # such old clients.
3748 3750 hgvfs.append(
3749 3751 b'00changelog.i',
3750 3752 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3751 3753 b'layout',
3752 3754 )
3753 3755
3754 3756 scmutil.writerequires(hgvfs, requirements)
3755 3757
3756 3758 # Write out file telling readers where to find the shared store.
3757 3759 if b'sharedrepo' in createopts:
3758 3760 hgvfs.write(b'sharedpath', sharedpath)
3759 3761
3760 3762 if createopts.get(b'shareditems'):
3761 3763 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3762 3764 hgvfs.write(b'shared', shared)
3763 3765
3764 3766
3765 3767 def poisonrepository(repo):
3766 3768 """Poison a repository instance so it can no longer be used."""
3767 3769 # Perform any cleanup on the instance.
3768 3770 repo.close()
3769 3771
3770 3772 # Our strategy is to replace the type of the object with one that
3771 3773 # has all attribute lookups result in error.
3772 3774 #
3773 3775 # But we have to allow the close() method because some constructors
3774 3776 # of repos call close() on repo references.
3775 3777 class poisonedrepository(object):
3776 3778 def __getattribute__(self, item):
3777 3779 if item == 'close':
3778 3780 return object.__getattribute__(self, item)
3779 3781
3780 3782 raise error.ProgrammingError(
3781 3783 b'repo instances should not be used after unshare'
3782 3784 )
3783 3785
3784 3786 def close(self):
3785 3787 pass
3786 3788
3787 3789 # We may have a repoview, which intercepts __setattr__. So be sure
3788 3790 # we operate at the lowest level possible.
3789 3791 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,236 +1,251 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import nullid, nullrev
14 14 from .. import (
15 15 pycompat,
16 16 util,
17 17 )
18 18
19 19 from ..revlogutils import nodemap as nodemaputil
20 20
21 21 stringio = pycompat.bytesio
22 22
23 23
24 24 _pack = struct.pack
25 25 _unpack = struct.unpack
26 26 _compress = zlib.compress
27 27 _decompress = zlib.decompress
28 28
29 29 # Some code below makes tuples directly because it's more convenient. However,
30 30 # code outside this module should always use dirstatetuple.
31 31 def dirstatetuple(*x):
32 32 # x is a tuple
33 33 return x
34 34
35 35
36 36 indexformatng = b">Qiiiiii20s12x"
37 37 indexfirst = struct.calcsize(b'Q')
38 38 sizeint = struct.calcsize(b'i')
39 39 indexsize = struct.calcsize(indexformatng)
40 40
41 41
42 42 def gettype(q):
43 43 return int(q & 0xFFFF)
44 44
45 45
46 46 def offset_type(offset, type):
47 47 return int(int(offset) << 16 | type)
48 48
49 49
50 50 class BaseIndexObject(object):
51 51 @property
52 52 def nodemap(self):
53 53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
54 54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
55 55 return self._nodemap
56 56
57 57 @util.propertycache
58 58 def _nodemap(self):
59 59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
60 60 for r in range(0, len(self)):
61 61 n = self[r][7]
62 62 nodemap[n] = r
63 63 return nodemap
64 64
65 65 def has_node(self, node):
66 66 """return True if the node exist in the index"""
67 67 return node in self._nodemap
68 68
69 69 def rev(self, node):
70 70 """return a revision for a node
71 71
72 72 If the node is unknown, raise a RevlogError"""
73 73 return self._nodemap[node]
74 74
75 75 def get_rev(self, node):
76 76 """return a revision for a node
77 77
78 78 If the node is unknown, return None"""
79 79 return self._nodemap.get(node)
80 80
81 81 def _stripnodes(self, start):
82 82 if '_nodemap' in vars(self):
83 83 for r in range(start, len(self)):
84 84 n = self[r][7]
85 85 del self._nodemap[n]
86 86
87 87 def clearcaches(self):
88 88 self.__dict__.pop('_nodemap', None)
89 89
90 90 def __len__(self):
91 91 return self._lgt + len(self._extra)
92 92
93 93 def append(self, tup):
94 94 if '_nodemap' in vars(self):
95 95 self._nodemap[tup[7]] = len(self)
96 96 self._extra.append(tup)
97 97
98 98 def _check_index(self, i):
99 99 if not isinstance(i, int):
100 100 raise TypeError(b"expecting int indexes")
101 101 if i < 0 or i >= len(self):
102 102 raise IndexError
103 103
104 104 def __getitem__(self, i):
105 105 if i == -1:
106 106 return (0, 0, 0, -1, -1, -1, -1, nullid)
107 107 self._check_index(i)
108 108 if i >= self._lgt:
109 109 return self._extra[i - self._lgt]
110 110 index = self._calculate_index(i)
111 111 r = struct.unpack(indexformatng, self._data[index : index + indexsize])
112 112 if i == 0:
113 113 e = list(r)
114 114 type = gettype(e[0])
115 115 e[0] = offset_type(0, type)
116 116 return tuple(e)
117 117 return r
118 118
119 119
120 120 class IndexObject(BaseIndexObject):
121 121 def __init__(self, data):
122 122 assert len(data) % indexsize == 0
123 123 self._data = data
124 124 self._lgt = len(data) // indexsize
125 125 self._extra = []
126 126
127 127 def _calculate_index(self, i):
128 128 return i * indexsize
129 129
130 130 def __delitem__(self, i):
131 131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
132 132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
133 133 i = i.start
134 134 self._check_index(i)
135 135 self._stripnodes(i)
136 136 if i < self._lgt:
137 137 self._data = self._data[: i * indexsize]
138 138 self._lgt = i
139 139 self._extra = []
140 140 else:
141 141 self._extra = self._extra[: i - self._lgt]
142 142
143 143
144 class PersistentNodeMapIndexObject(IndexObject):
145 """a Debug oriented class to test persistent nodemap
146
147 We need a simple python object to test API and higher level behavior. See
148 the Rust implementation for more serious usage. This should be used only
149 through the dedicated `devel.persistent-nodemap` config.
150 """
151
152
144 153 class InlinedIndexObject(BaseIndexObject):
145 154 def __init__(self, data, inline=0):
146 155 self._data = data
147 156 self._lgt = self._inline_scan(None)
148 157 self._inline_scan(self._lgt)
149 158 self._extra = []
150 159
151 160 def _inline_scan(self, lgt):
152 161 off = 0
153 162 if lgt is not None:
154 163 self._offsets = [0] * lgt
155 164 count = 0
156 165 while off <= len(self._data) - indexsize:
157 166 (s,) = struct.unpack(
158 167 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
159 168 )
160 169 if lgt is not None:
161 170 self._offsets[count] = off
162 171 count += 1
163 172 off += indexsize + s
164 173 if off != len(self._data):
165 174 raise ValueError(b"corrupted data")
166 175 return count
167 176
168 177 def __delitem__(self, i):
169 178 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
170 179 raise ValueError(b"deleting slices only supports a:-1 with step 1")
171 180 i = i.start
172 181 self._check_index(i)
173 182 self._stripnodes(i)
174 183 if i < self._lgt:
175 184 self._offsets = self._offsets[:i]
176 185 self._lgt = i
177 186 self._extra = []
178 187 else:
179 188 self._extra = self._extra[: i - self._lgt]
180 189
181 190 def _calculate_index(self, i):
182 191 return self._offsets[i]
183 192
184 193
185 194 def parse_index2(data, inline):
186 195 if not inline:
187 196 return IndexObject(data), None
188 197 return InlinedIndexObject(data, inline), (0, data)
189 198
190 199
200 def parse_index_devel_nodemap(data, inline):
201 """like parse_index2, but alway return a PersistentNodeMapIndexObject
202 """
203 return PersistentNodeMapIndexObject(data), None
204
205
191 206 def parse_dirstate(dmap, copymap, st):
192 207 parents = [st[:20], st[20:40]]
193 208 # dereference fields so they will be local in loop
194 209 format = b">cllll"
195 210 e_size = struct.calcsize(format)
196 211 pos1 = 40
197 212 l = len(st)
198 213
199 214 # the inner loop
200 215 while pos1 < l:
201 216 pos2 = pos1 + e_size
202 217 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
203 218 pos1 = pos2 + e[4]
204 219 f = st[pos2:pos1]
205 220 if b'\0' in f:
206 221 f, c = f.split(b'\0')
207 222 copymap[f] = c
208 223 dmap[f] = e[:4]
209 224 return parents
210 225
211 226
212 227 def pack_dirstate(dmap, copymap, pl, now):
213 228 now = int(now)
214 229 cs = stringio()
215 230 write = cs.write
216 231 write(b"".join(pl))
217 232 for f, e in pycompat.iteritems(dmap):
218 233 if e[0] == b'n' and e[3] == now:
219 234 # The file was last modified "simultaneously" with the current
220 235 # write to dirstate (i.e. within the same second for file-
221 236 # systems with a granularity of 1 sec). This commonly happens
222 237 # for at least a couple of files on 'update'.
223 238 # The user could change the file without changing its size
224 239 # within the same second. Invalidate the file's mtime in
225 240 # dirstate, forcing future 'status' calls to compare the
226 241 # contents of the file if the size is the same. This prevents
227 242 # mistakenly treating such files as clean.
228 243 e = dirstatetuple(e[0], e[1], e[2], -1)
229 244 dmap[f] = e
230 245
231 246 if f in copymap:
232 247 f = b"%s\0%s" % (f, copymap[f])
233 248 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
234 249 write(e)
235 250 write(f)
236 251 return cs.getvalue()
@@ -1,2996 +1,3019 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import io
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullhex,
29 29 nullid,
30 30 nullrev,
31 31 short,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .pycompat import getattr
39 39 from .revlogutils.constants import (
40 40 FLAG_GENERALDELTA,
41 41 FLAG_INLINE_DATA,
42 42 REVLOGV0,
43 43 REVLOGV1,
44 44 REVLOGV1_FLAGS,
45 45 REVLOGV2,
46 46 REVLOGV2_FLAGS,
47 47 REVLOG_DEFAULT_FLAGS,
48 48 REVLOG_DEFAULT_FORMAT,
49 49 REVLOG_DEFAULT_VERSION,
50 50 )
51 51 from .revlogutils.flagutil import (
52 52 REVIDX_DEFAULT_FLAGS,
53 53 REVIDX_ELLIPSIS,
54 54 REVIDX_EXTSTORED,
55 55 REVIDX_FLAGS_ORDER,
56 56 REVIDX_ISCENSORED,
57 57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 58 REVIDX_SIDEDATA,
59 59 )
60 60 from .thirdparty import attr
61 61 from . import (
62 62 ancestor,
63 63 dagop,
64 64 error,
65 65 mdiff,
66 66 policy,
67 67 pycompat,
68 68 templatefilters,
69 69 util,
70 70 )
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75 from .revlogutils import (
76 76 deltas as deltautil,
77 77 flagutil,
78 78 nodemap as nodemaputil,
79 79 sidedata as sidedatautil,
80 80 )
81 81 from .utils import (
82 82 storageutil,
83 83 stringutil,
84 84 )
85 85
86 86 # blanked usage of all the name to prevent pyflakes constraints
87 87 # We need these name available in the module for extensions.
88 88 REVLOGV0
89 89 REVLOGV1
90 90 REVLOGV2
91 91 FLAG_INLINE_DATA
92 92 FLAG_GENERALDELTA
93 93 REVLOG_DEFAULT_FLAGS
94 94 REVLOG_DEFAULT_FORMAT
95 95 REVLOG_DEFAULT_VERSION
96 96 REVLOGV1_FLAGS
97 97 REVLOGV2_FLAGS
98 98 REVIDX_ISCENSORED
99 99 REVIDX_ELLIPSIS
100 100 REVIDX_SIDEDATA
101 101 REVIDX_EXTSTORED
102 102 REVIDX_DEFAULT_FLAGS
103 103 REVIDX_FLAGS_ORDER
104 104 REVIDX_RAWTEXT_CHANGING_FLAGS
105 105
106 106 parsers = policy.importmod('parsers')
107 107 rustancestor = policy.importrust('ancestor')
108 108 rustdagop = policy.importrust('dagop')
109 109 rustrevlog = policy.importrust('revlog')
110 110
111 111 # Aliased for performance.
112 112 _zlibdecompress = zlib.decompress
113 113
114 114 # max size of revlog with inline data
115 115 _maxinline = 131072
116 116 _chunksize = 1048576
117 117
118 118 # Flag processors for REVIDX_ELLIPSIS.
119 119 def ellipsisreadprocessor(rl, text):
120 120 return text, False, {}
121 121
122 122
123 123 def ellipsiswriteprocessor(rl, text, sidedata):
124 124 return text, False
125 125
126 126
127 127 def ellipsisrawprocessor(rl, text):
128 128 return False
129 129
130 130
131 131 ellipsisprocessor = (
132 132 ellipsisreadprocessor,
133 133 ellipsiswriteprocessor,
134 134 ellipsisrawprocessor,
135 135 )
136 136
137 137
138 138 def getoffset(q):
139 139 return int(q >> 16)
140 140
141 141
142 142 def gettype(q):
143 143 return int(q & 0xFFFF)
144 144
145 145
146 146 def offset_type(offset, type):
147 147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
148 148 raise ValueError(b'unknown revlog index flags')
149 149 return int(int(offset) << 16 | type)
150 150
151 151
152 152 def _verify_revision(rl, skipflags, state, node):
153 153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 154 point for extensions to influence the operation."""
155 155 if skipflags:
156 156 state[b'skipread'].add(node)
157 157 else:
158 158 # Side-effect: read content and verify hash.
159 159 rl.revision(node)
160 160
161 161
162 162 @attr.s(slots=True, frozen=True)
163 163 class _revisioninfo(object):
164 164 """Information about a revision that allows building its fulltext
165 165 node: expected hash of the revision
166 166 p1, p2: parent revs of the revision
167 167 btext: built text cache consisting of a one-element list
168 168 cachedelta: (baserev, uncompressed_delta) or None
169 169 flags: flags associated to the revision storage
170 170
171 171 One of btext[0] or cachedelta must be set.
172 172 """
173 173
174 174 node = attr.ib()
175 175 p1 = attr.ib()
176 176 p2 = attr.ib()
177 177 btext = attr.ib()
178 178 textlen = attr.ib()
179 179 cachedelta = attr.ib()
180 180 flags = attr.ib()
181 181
182 182
183 183 @interfaceutil.implementer(repository.irevisiondelta)
184 184 @attr.s(slots=True)
185 185 class revlogrevisiondelta(object):
186 186 node = attr.ib()
187 187 p1node = attr.ib()
188 188 p2node = attr.ib()
189 189 basenode = attr.ib()
190 190 flags = attr.ib()
191 191 baserevisionsize = attr.ib()
192 192 revision = attr.ib()
193 193 delta = attr.ib()
194 194 linknode = attr.ib(default=None)
195 195
196 196
197 197 @interfaceutil.implementer(repository.iverifyproblem)
198 198 @attr.s(frozen=True)
199 199 class revlogproblem(object):
200 200 warning = attr.ib(default=None)
201 201 error = attr.ib(default=None)
202 202 node = attr.ib(default=None)
203 203
204 204
205 205 # index v0:
206 206 # 4 bytes: offset
207 207 # 4 bytes: compressed length
208 208 # 4 bytes: base rev
209 209 # 4 bytes: link rev
210 210 # 20 bytes: parent 1 nodeid
211 211 # 20 bytes: parent 2 nodeid
212 212 # 20 bytes: nodeid
213 213 indexformatv0 = struct.Struct(b">4l20s20s20s")
214 214 indexformatv0_pack = indexformatv0.pack
215 215 indexformatv0_unpack = indexformatv0.unpack
216 216
217 217
218 218 class revlogoldindex(list):
219 219 @property
220 220 def nodemap(self):
221 221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
222 222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
223 223 return self._nodemap
224 224
225 225 @util.propertycache
226 226 def _nodemap(self):
227 227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
228 228 for r in range(0, len(self)):
229 229 n = self[r][7]
230 230 nodemap[n] = r
231 231 return nodemap
232 232
233 233 def has_node(self, node):
234 234 """return True if the node exist in the index"""
235 235 return node in self._nodemap
236 236
237 237 def rev(self, node):
238 238 """return a revision for a node
239 239
240 240 If the node is unknown, raise a RevlogError"""
241 241 return self._nodemap[node]
242 242
243 243 def get_rev(self, node):
244 244 """return a revision for a node
245 245
246 246 If the node is unknown, return None"""
247 247 return self._nodemap.get(node)
248 248
249 249 def append(self, tup):
250 250 self._nodemap[tup[7]] = len(self)
251 251 super(revlogoldindex, self).append(tup)
252 252
253 253 def __delitem__(self, i):
254 254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
255 255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
256 256 for r in pycompat.xrange(i.start, len(self)):
257 257 del self._nodemap[self[r][7]]
258 258 super(revlogoldindex, self).__delitem__(i)
259 259
260 260 def clearcaches(self):
261 261 self.__dict__.pop('_nodemap', None)
262 262
263 263 def __getitem__(self, i):
264 264 if i == -1:
265 265 return (0, 0, 0, -1, -1, -1, -1, nullid)
266 266 return list.__getitem__(self, i)
267 267
268 268
269 269 class revlogoldio(object):
270 270 def __init__(self):
271 271 self.size = indexformatv0.size
272 272
273 273 def parseindex(self, data, inline):
274 274 s = self.size
275 275 index = []
276 276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
277 277 n = off = 0
278 278 l = len(data)
279 279 while off + s <= l:
280 280 cur = data[off : off + s]
281 281 off += s
282 282 e = indexformatv0_unpack(cur)
283 283 # transform to revlogv1 format
284 284 e2 = (
285 285 offset_type(e[0], 0),
286 286 e[1],
287 287 -1,
288 288 e[2],
289 289 e[3],
290 290 nodemap.get(e[4], nullrev),
291 291 nodemap.get(e[5], nullrev),
292 292 e[6],
293 293 )
294 294 index.append(e2)
295 295 nodemap[e[6]] = n
296 296 n += 1
297 297
298 298 index = revlogoldindex(index)
299 299 return index, None
300 300
301 301 def packentry(self, entry, node, version, rev):
302 302 if gettype(entry[0]):
303 303 raise error.RevlogError(
304 304 _(b'index entry flags need revlog version 1')
305 305 )
306 306 e2 = (
307 307 getoffset(entry[0]),
308 308 entry[1],
309 309 entry[3],
310 310 entry[4],
311 311 node(entry[5]),
312 312 node(entry[6]),
313 313 entry[7],
314 314 )
315 315 return indexformatv0_pack(*e2)
316 316
317 317
318 318 # index ng:
319 319 # 6 bytes: offset
320 320 # 2 bytes: flags
321 321 # 4 bytes: compressed length
322 322 # 4 bytes: uncompressed length
323 323 # 4 bytes: base rev
324 324 # 4 bytes: link rev
325 325 # 4 bytes: parent 1 rev
326 326 # 4 bytes: parent 2 rev
327 327 # 32 bytes: nodeid
328 328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
329 329 indexformatng_pack = indexformatng.pack
330 330 versionformat = struct.Struct(b">I")
331 331 versionformat_pack = versionformat.pack
332 332 versionformat_unpack = versionformat.unpack
333 333
334 334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
335 335 # signed integer)
336 336 _maxentrysize = 0x7FFFFFFF
337 337
338 338
339 339 class revlogio(object):
340 340 def __init__(self):
341 341 self.size = indexformatng.size
342 342
343 343 def parseindex(self, data, inline):
344 344 # call the C implementation to parse the index data
345 345 index, cache = parsers.parse_index2(data, inline)
346 346 return index, cache
347 347
348 348 def packentry(self, entry, node, version, rev):
349 349 p = indexformatng_pack(*entry)
350 350 if rev == 0:
351 351 p = versionformat_pack(version) + p[4:]
352 352 return p
353 353
354 354
355 NodemapRevlogIO = None
356
357 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
358
359 class NodemapRevlogIO(revlogio):
360 """A debug oriented IO class that return a PersistentNodeMapIndexObject
361
362 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
363 """
364
365 def parseindex(self, data, inline):
366 index, cache = parsers.parse_index_devel_nodemap(data, inline)
367 return index, cache
368
369
355 370 class rustrevlogio(revlogio):
356 371 def parseindex(self, data, inline):
357 372 index, cache = super(rustrevlogio, self).parseindex(data, inline)
358 373 return rustrevlog.MixedIndex(index), cache
359 374
360 375
361 376 class revlog(object):
362 377 """
363 378 the underlying revision storage object
364 379
365 380 A revlog consists of two parts, an index and the revision data.
366 381
367 382 The index is a file with a fixed record size containing
368 383 information on each revision, including its nodeid (hash), the
369 384 nodeids of its parents, the position and offset of its data within
370 385 the data file, and the revision it's based on. Finally, each entry
371 386 contains a linkrev entry that can serve as a pointer to external
372 387 data.
373 388
374 389 The revision data itself is a linear collection of data chunks.
375 390 Each chunk represents a revision and is usually represented as a
376 391 delta against the previous chunk. To bound lookup time, runs of
377 392 deltas are limited to about 2 times the length of the original
378 393 version data. This makes retrieval of a version proportional to
379 394 its size, or O(1) relative to the number of revisions.
380 395
381 396 Both pieces of the revlog are written to in an append-only
382 397 fashion, which means we never need to rewrite a file to insert or
383 398 remove data, and can use some simple techniques to avoid the need
384 399 for locking while reading.
385 400
386 401 If checkambig, indexfile is opened with checkambig=True at
387 402 writing, to avoid file stat ambiguity.
388 403
389 404 If mmaplargeindex is True, and an mmapindexthreshold is set, the
390 405 index will be mmapped rather than read if it is larger than the
391 406 configured threshold.
392 407
393 408 If censorable is True, the revlog can have censored revisions.
394 409
395 410 If `upperboundcomp` is not None, this is the expected maximal gain from
396 411 compression for the data content.
397 412 """
398 413
399 414 _flagserrorclass = error.RevlogError
400 415
401 416 def __init__(
402 417 self,
403 418 opener,
404 419 indexfile,
405 420 datafile=None,
406 421 checkambig=False,
407 422 mmaplargeindex=False,
408 423 censorable=False,
409 424 upperboundcomp=None,
410 425 persistentnodemap=False,
411 426 ):
412 427 """
413 428 create a revlog object
414 429
415 430 opener is a function that abstracts the file opening operation
416 431 and can be used to implement COW semantics or the like.
417 432
418 433 """
419 434 self.upperboundcomp = upperboundcomp
420 435 self.indexfile = indexfile
421 436 self.datafile = datafile or (indexfile[:-2] + b".d")
422 437 self.nodemap_file = None
423 438 if persistentnodemap:
424 439 self.nodemap_file = indexfile[:-2] + b".n"
425 440
426 441 self.opener = opener
427 442 # When True, indexfile is opened with checkambig=True at writing, to
428 443 # avoid file stat ambiguity.
429 444 self._checkambig = checkambig
430 445 self._mmaplargeindex = mmaplargeindex
431 446 self._censorable = censorable
432 447 # 3-tuple of (node, rev, text) for a raw revision.
433 448 self._revisioncache = None
434 449 # Maps rev to chain base rev.
435 450 self._chainbasecache = util.lrucachedict(100)
436 451 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
437 452 self._chunkcache = (0, b'')
438 453 # How much data to read and cache into the raw revlog data cache.
439 454 self._chunkcachesize = 65536
440 455 self._maxchainlen = None
441 456 self._deltabothparents = True
442 457 self.index = None
443 458 # Mapping of partial identifiers to full nodes.
444 459 self._pcache = {}
445 460 # Mapping of revision integer to full node.
446 461 self._compengine = b'zlib'
447 462 self._compengineopts = {}
448 463 self._maxdeltachainspan = -1
449 464 self._withsparseread = False
450 465 self._sparserevlog = False
451 466 self._srdensitythreshold = 0.50
452 467 self._srmingapsize = 262144
453 468
454 469 # Make copy of flag processors so each revlog instance can support
455 470 # custom flags.
456 471 self._flagprocessors = dict(flagutil.flagprocessors)
457 472
458 473 # 2-tuple of file handles being used for active writing.
459 474 self._writinghandles = None
460 475
461 476 self._loadindex()
462 477
463 478 def _loadindex(self):
464 479 mmapindexthreshold = None
465 480 opts = self.opener.options
466 481
467 482 if b'revlogv2' in opts:
468 483 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
469 484 elif b'revlogv1' in opts:
470 485 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
471 486 if b'generaldelta' in opts:
472 487 newversionflags |= FLAG_GENERALDELTA
473 488 elif b'revlogv0' in self.opener.options:
474 489 newversionflags = REVLOGV0
475 490 else:
476 491 newversionflags = REVLOG_DEFAULT_VERSION
477 492
478 493 if b'chunkcachesize' in opts:
479 494 self._chunkcachesize = opts[b'chunkcachesize']
480 495 if b'maxchainlen' in opts:
481 496 self._maxchainlen = opts[b'maxchainlen']
482 497 if b'deltabothparents' in opts:
483 498 self._deltabothparents = opts[b'deltabothparents']
484 499 self._lazydelta = bool(opts.get(b'lazydelta', True))
485 500 self._lazydeltabase = False
486 501 if self._lazydelta:
487 502 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
488 503 if b'compengine' in opts:
489 504 self._compengine = opts[b'compengine']
490 505 if b'zlib.level' in opts:
491 506 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
492 507 if b'zstd.level' in opts:
493 508 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
494 509 if b'maxdeltachainspan' in opts:
495 510 self._maxdeltachainspan = opts[b'maxdeltachainspan']
496 511 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
497 512 mmapindexthreshold = opts[b'mmapindexthreshold']
498 513 self.hassidedata = bool(opts.get(b'side-data', False))
499 514 if self.hassidedata:
500 515 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
501 516 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
502 517 withsparseread = bool(opts.get(b'with-sparse-read', False))
503 518 # sparse-revlog forces sparse-read
504 519 self._withsparseread = self._sparserevlog or withsparseread
505 520 if b'sparse-read-density-threshold' in opts:
506 521 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
507 522 if b'sparse-read-min-gap-size' in opts:
508 523 self._srmingapsize = opts[b'sparse-read-min-gap-size']
509 524 if opts.get(b'enableellipsis'):
510 525 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
511 526
512 527 # revlog v0 doesn't have flag processors
513 528 for flag, processor in pycompat.iteritems(
514 529 opts.get(b'flagprocessors', {})
515 530 ):
516 531 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
517 532
518 533 if self._chunkcachesize <= 0:
519 534 raise error.RevlogError(
520 535 _(b'revlog chunk cache size %r is not greater than 0')
521 536 % self._chunkcachesize
522 537 )
523 538 elif self._chunkcachesize & (self._chunkcachesize - 1):
524 539 raise error.RevlogError(
525 540 _(b'revlog chunk cache size %r is not a power of 2')
526 541 % self._chunkcachesize
527 542 )
528 543
529 544 indexdata = b''
530 545 self._initempty = True
531 546 try:
532 547 with self._indexfp() as f:
533 548 if (
534 549 mmapindexthreshold is not None
535 550 and self.opener.fstat(f).st_size >= mmapindexthreshold
536 551 ):
537 552 # TODO: should .close() to release resources without
538 553 # relying on Python GC
539 554 indexdata = util.buffer(util.mmapread(f))
540 555 else:
541 556 indexdata = f.read()
542 557 if len(indexdata) > 0:
543 558 versionflags = versionformat_unpack(indexdata[:4])[0]
544 559 self._initempty = False
545 560 else:
546 561 versionflags = newversionflags
547 562 except IOError as inst:
548 563 if inst.errno != errno.ENOENT:
549 564 raise
550 565
551 566 versionflags = newversionflags
552 567
553 568 self.version = versionflags
554 569
555 570 flags = versionflags & ~0xFFFF
556 571 fmt = versionflags & 0xFFFF
557 572
558 573 if fmt == REVLOGV0:
559 574 if flags:
560 575 raise error.RevlogError(
561 576 _(b'unknown flags (%#04x) in version %d revlog %s')
562 577 % (flags >> 16, fmt, self.indexfile)
563 578 )
564 579
565 580 self._inline = False
566 581 self._generaldelta = False
567 582
568 583 elif fmt == REVLOGV1:
569 584 if flags & ~REVLOGV1_FLAGS:
570 585 raise error.RevlogError(
571 586 _(b'unknown flags (%#04x) in version %d revlog %s')
572 587 % (flags >> 16, fmt, self.indexfile)
573 588 )
574 589
575 590 self._inline = versionflags & FLAG_INLINE_DATA
576 591 self._generaldelta = versionflags & FLAG_GENERALDELTA
577 592
578 593 elif fmt == REVLOGV2:
579 594 if flags & ~REVLOGV2_FLAGS:
580 595 raise error.RevlogError(
581 596 _(b'unknown flags (%#04x) in version %d revlog %s')
582 597 % (flags >> 16, fmt, self.indexfile)
583 598 )
584 599
585 600 self._inline = versionflags & FLAG_INLINE_DATA
586 601 # generaldelta implied by version 2 revlogs.
587 602 self._generaldelta = True
588 603
589 604 else:
590 605 raise error.RevlogError(
591 606 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
592 607 )
593 608 # sparse-revlog can't be on without general-delta (issue6056)
594 609 if not self._generaldelta:
595 610 self._sparserevlog = False
596 611
597 612 self._storedeltachains = True
598 613
614 devel_nodemap = (
615 self.nodemap_file
616 and opts.get(b'devel-force-nodemap', False)
617 and NodemapRevlogIO is not None
618 )
619
599 620 self._io = revlogio()
600 621 if self.version == REVLOGV0:
601 622 self._io = revlogoldio()
623 elif devel_nodemap:
624 self._io = NodemapRevlogIO()
602 625 elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
603 626 self._io = rustrevlogio()
604 627 try:
605 628 d = self._io.parseindex(indexdata, self._inline)
606 629 except (ValueError, IndexError):
607 630 raise error.RevlogError(
608 631 _(b"index %s is corrupted") % self.indexfile
609 632 )
610 633 self.index, self._chunkcache = d
611 634 if not self._chunkcache:
612 635 self._chunkclear()
613 636 # revnum -> (chain-length, sum-delta-length)
614 637 self._chaininfocache = {}
615 638 # revlog header -> revlog compressor
616 639 self._decompressors = {}
617 640
618 641 @util.propertycache
619 642 def _compressor(self):
620 643 engine = util.compengines[self._compengine]
621 644 return engine.revlogcompressor(self._compengineopts)
622 645
623 646 def _indexfp(self, mode=b'r'):
624 647 """file object for the revlog's index file"""
625 648 args = {'mode': mode}
626 649 if mode != b'r':
627 650 args['checkambig'] = self._checkambig
628 651 if mode == b'w':
629 652 args['atomictemp'] = True
630 653 return self.opener(self.indexfile, **args)
631 654
632 655 def _datafp(self, mode=b'r'):
633 656 """file object for the revlog's data file"""
634 657 return self.opener(self.datafile, mode=mode)
635 658
636 659 @contextlib.contextmanager
637 660 def _datareadfp(self, existingfp=None):
638 661 """file object suitable to read data"""
639 662 # Use explicit file handle, if given.
640 663 if existingfp is not None:
641 664 yield existingfp
642 665
643 666 # Use a file handle being actively used for writes, if available.
644 667 # There is some danger to doing this because reads will seek the
645 668 # file. However, _writeentry() performs a SEEK_END before all writes,
646 669 # so we should be safe.
647 670 elif self._writinghandles:
648 671 if self._inline:
649 672 yield self._writinghandles[0]
650 673 else:
651 674 yield self._writinghandles[1]
652 675
653 676 # Otherwise open a new file handle.
654 677 else:
655 678 if self._inline:
656 679 func = self._indexfp
657 680 else:
658 681 func = self._datafp
659 682 with func() as fp:
660 683 yield fp
661 684
662 685 def tiprev(self):
663 686 return len(self.index) - 1
664 687
665 688 def tip(self):
666 689 return self.node(self.tiprev())
667 690
668 691 def __contains__(self, rev):
669 692 return 0 <= rev < len(self)
670 693
671 694 def __len__(self):
672 695 return len(self.index)
673 696
674 697 def __iter__(self):
675 698 return iter(pycompat.xrange(len(self)))
676 699
677 700 def revs(self, start=0, stop=None):
678 701 """iterate over all rev in this revlog (from start to stop)"""
679 702 return storageutil.iterrevs(len(self), start=start, stop=stop)
680 703
681 704 @property
682 705 def nodemap(self):
683 706 msg = (
684 707 b"revlog.nodemap is deprecated, "
685 708 b"use revlog.index.[has_node|rev|get_rev]"
686 709 )
687 710 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
688 711 return self.index.nodemap
689 712
690 713 @property
691 714 def _nodecache(self):
692 715 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
693 716 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
694 717 return self.index.nodemap
695 718
696 719 def hasnode(self, node):
697 720 try:
698 721 self.rev(node)
699 722 return True
700 723 except KeyError:
701 724 return False
702 725
703 726 def candelta(self, baserev, rev):
704 727 """whether two revisions (baserev, rev) can be delta-ed or not"""
705 728 # Disable delta if either rev requires a content-changing flag
706 729 # processor (ex. LFS). This is because such flag processor can alter
707 730 # the rawtext content that the delta will be based on, and two clients
708 731 # could have a same revlog node with different flags (i.e. different
709 732 # rawtext contents) and the delta could be incompatible.
710 733 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
711 734 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
712 735 ):
713 736 return False
714 737 return True
715 738
716 739 def clearcaches(self):
717 740 self._revisioncache = None
718 741 self._chainbasecache.clear()
719 742 self._chunkcache = (0, b'')
720 743 self._pcache = {}
721 744 self.index.clearcaches()
722 745
723 746 def rev(self, node):
724 747 try:
725 748 return self.index.rev(node)
726 749 except TypeError:
727 750 raise
728 751 except error.RevlogError:
729 752 # parsers.c radix tree lookup failed
730 753 if node == wdirid or node in wdirfilenodeids:
731 754 raise error.WdirUnsupported
732 755 raise error.LookupError(node, self.indexfile, _(b'no node'))
733 756
734 757 # Accessors for index entries.
735 758
736 759 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
737 760 # are flags.
738 761 def start(self, rev):
739 762 return int(self.index[rev][0] >> 16)
740 763
741 764 def flags(self, rev):
742 765 return self.index[rev][0] & 0xFFFF
743 766
744 767 def length(self, rev):
745 768 return self.index[rev][1]
746 769
747 770 def rawsize(self, rev):
748 771 """return the length of the uncompressed text for a given revision"""
749 772 l = self.index[rev][2]
750 773 if l >= 0:
751 774 return l
752 775
753 776 t = self.rawdata(rev)
754 777 return len(t)
755 778
756 779 def size(self, rev):
757 780 """length of non-raw text (processed by a "read" flag processor)"""
758 781 # fast path: if no "read" flag processor could change the content,
759 782 # size is rawsize. note: ELLIPSIS is known to not change the content.
760 783 flags = self.flags(rev)
761 784 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
762 785 return self.rawsize(rev)
763 786
764 787 return len(self.revision(rev, raw=False))
765 788
766 789 def chainbase(self, rev):
767 790 base = self._chainbasecache.get(rev)
768 791 if base is not None:
769 792 return base
770 793
771 794 index = self.index
772 795 iterrev = rev
773 796 base = index[iterrev][3]
774 797 while base != iterrev:
775 798 iterrev = base
776 799 base = index[iterrev][3]
777 800
778 801 self._chainbasecache[rev] = base
779 802 return base
780 803
781 804 def linkrev(self, rev):
782 805 return self.index[rev][4]
783 806
784 807 def parentrevs(self, rev):
785 808 try:
786 809 entry = self.index[rev]
787 810 except IndexError:
788 811 if rev == wdirrev:
789 812 raise error.WdirUnsupported
790 813 raise
791 814
792 815 return entry[5], entry[6]
793 816
794 817 # fast parentrevs(rev) where rev isn't filtered
795 818 _uncheckedparentrevs = parentrevs
796 819
797 820 def node(self, rev):
798 821 try:
799 822 return self.index[rev][7]
800 823 except IndexError:
801 824 if rev == wdirrev:
802 825 raise error.WdirUnsupported
803 826 raise
804 827
805 828 # Derived from index values.
806 829
807 830 def end(self, rev):
808 831 return self.start(rev) + self.length(rev)
809 832
810 833 def parents(self, node):
811 834 i = self.index
812 835 d = i[self.rev(node)]
813 836 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
814 837
815 838 def chainlen(self, rev):
816 839 return self._chaininfo(rev)[0]
817 840
818 841 def _chaininfo(self, rev):
819 842 chaininfocache = self._chaininfocache
820 843 if rev in chaininfocache:
821 844 return chaininfocache[rev]
822 845 index = self.index
823 846 generaldelta = self._generaldelta
824 847 iterrev = rev
825 848 e = index[iterrev]
826 849 clen = 0
827 850 compresseddeltalen = 0
828 851 while iterrev != e[3]:
829 852 clen += 1
830 853 compresseddeltalen += e[1]
831 854 if generaldelta:
832 855 iterrev = e[3]
833 856 else:
834 857 iterrev -= 1
835 858 if iterrev in chaininfocache:
836 859 t = chaininfocache[iterrev]
837 860 clen += t[0]
838 861 compresseddeltalen += t[1]
839 862 break
840 863 e = index[iterrev]
841 864 else:
842 865 # Add text length of base since decompressing that also takes
843 866 # work. For cache hits the length is already included.
844 867 compresseddeltalen += e[1]
845 868 r = (clen, compresseddeltalen)
846 869 chaininfocache[rev] = r
847 870 return r
848 871
849 872 def _deltachain(self, rev, stoprev=None):
850 873 """Obtain the delta chain for a revision.
851 874
852 875 ``stoprev`` specifies a revision to stop at. If not specified, we
853 876 stop at the base of the chain.
854 877
855 878 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
856 879 revs in ascending order and ``stopped`` is a bool indicating whether
857 880 ``stoprev`` was hit.
858 881 """
859 882 # Try C implementation.
860 883 try:
861 884 return self.index.deltachain(rev, stoprev, self._generaldelta)
862 885 except AttributeError:
863 886 pass
864 887
865 888 chain = []
866 889
867 890 # Alias to prevent attribute lookup in tight loop.
868 891 index = self.index
869 892 generaldelta = self._generaldelta
870 893
871 894 iterrev = rev
872 895 e = index[iterrev]
873 896 while iterrev != e[3] and iterrev != stoprev:
874 897 chain.append(iterrev)
875 898 if generaldelta:
876 899 iterrev = e[3]
877 900 else:
878 901 iterrev -= 1
879 902 e = index[iterrev]
880 903
881 904 if iterrev == stoprev:
882 905 stopped = True
883 906 else:
884 907 chain.append(iterrev)
885 908 stopped = False
886 909
887 910 chain.reverse()
888 911 return chain, stopped
889 912
890 913 def ancestors(self, revs, stoprev=0, inclusive=False):
891 914 """Generate the ancestors of 'revs' in reverse revision order.
892 915 Does not generate revs lower than stoprev.
893 916
894 917 See the documentation for ancestor.lazyancestors for more details."""
895 918
896 919 # first, make sure start revisions aren't filtered
897 920 revs = list(revs)
898 921 checkrev = self.node
899 922 for r in revs:
900 923 checkrev(r)
901 924 # and we're sure ancestors aren't filtered as well
902 925
903 926 if rustancestor is not None:
904 927 lazyancestors = rustancestor.LazyAncestors
905 928 arg = self.index
906 929 elif util.safehasattr(parsers, b'rustlazyancestors'):
907 930 lazyancestors = ancestor.rustlazyancestors
908 931 arg = self.index
909 932 else:
910 933 lazyancestors = ancestor.lazyancestors
911 934 arg = self._uncheckedparentrevs
912 935 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
913 936
914 937 def descendants(self, revs):
915 938 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
916 939
917 940 def findcommonmissing(self, common=None, heads=None):
918 941 """Return a tuple of the ancestors of common and the ancestors of heads
919 942 that are not ancestors of common. In revset terminology, we return the
920 943 tuple:
921 944
922 945 ::common, (::heads) - (::common)
923 946
924 947 The list is sorted by revision number, meaning it is
925 948 topologically sorted.
926 949
927 950 'heads' and 'common' are both lists of node IDs. If heads is
928 951 not supplied, uses all of the revlog's heads. If common is not
929 952 supplied, uses nullid."""
930 953 if common is None:
931 954 common = [nullid]
932 955 if heads is None:
933 956 heads = self.heads()
934 957
935 958 common = [self.rev(n) for n in common]
936 959 heads = [self.rev(n) for n in heads]
937 960
938 961 # we want the ancestors, but inclusive
939 962 class lazyset(object):
940 963 def __init__(self, lazyvalues):
941 964 self.addedvalues = set()
942 965 self.lazyvalues = lazyvalues
943 966
944 967 def __contains__(self, value):
945 968 return value in self.addedvalues or value in self.lazyvalues
946 969
947 970 def __iter__(self):
948 971 added = self.addedvalues
949 972 for r in added:
950 973 yield r
951 974 for r in self.lazyvalues:
952 975 if not r in added:
953 976 yield r
954 977
955 978 def add(self, value):
956 979 self.addedvalues.add(value)
957 980
958 981 def update(self, values):
959 982 self.addedvalues.update(values)
960 983
961 984 has = lazyset(self.ancestors(common))
962 985 has.add(nullrev)
963 986 has.update(common)
964 987
965 988 # take all ancestors from heads that aren't in has
966 989 missing = set()
967 990 visit = collections.deque(r for r in heads if r not in has)
968 991 while visit:
969 992 r = visit.popleft()
970 993 if r in missing:
971 994 continue
972 995 else:
973 996 missing.add(r)
974 997 for p in self.parentrevs(r):
975 998 if p not in has:
976 999 visit.append(p)
977 1000 missing = list(missing)
978 1001 missing.sort()
979 1002 return has, [self.node(miss) for miss in missing]
980 1003
981 1004 def incrementalmissingrevs(self, common=None):
982 1005 """Return an object that can be used to incrementally compute the
983 1006 revision numbers of the ancestors of arbitrary sets that are not
984 1007 ancestors of common. This is an ancestor.incrementalmissingancestors
985 1008 object.
986 1009
987 1010 'common' is a list of revision numbers. If common is not supplied, uses
988 1011 nullrev.
989 1012 """
990 1013 if common is None:
991 1014 common = [nullrev]
992 1015
993 1016 if rustancestor is not None:
994 1017 return rustancestor.MissingAncestors(self.index, common)
995 1018 return ancestor.incrementalmissingancestors(self.parentrevs, common)
996 1019
997 1020 def findmissingrevs(self, common=None, heads=None):
998 1021 """Return the revision numbers of the ancestors of heads that
999 1022 are not ancestors of common.
1000 1023
1001 1024 More specifically, return a list of revision numbers corresponding to
1002 1025 nodes N such that every N satisfies the following constraints:
1003 1026
1004 1027 1. N is an ancestor of some node in 'heads'
1005 1028 2. N is not an ancestor of any node in 'common'
1006 1029
1007 1030 The list is sorted by revision number, meaning it is
1008 1031 topologically sorted.
1009 1032
1010 1033 'heads' and 'common' are both lists of revision numbers. If heads is
1011 1034 not supplied, uses all of the revlog's heads. If common is not
1012 1035 supplied, uses nullid."""
1013 1036 if common is None:
1014 1037 common = [nullrev]
1015 1038 if heads is None:
1016 1039 heads = self.headrevs()
1017 1040
1018 1041 inc = self.incrementalmissingrevs(common=common)
1019 1042 return inc.missingancestors(heads)
1020 1043
1021 1044 def findmissing(self, common=None, heads=None):
1022 1045 """Return the ancestors of heads that are not ancestors of common.
1023 1046
1024 1047 More specifically, return a list of nodes N such that every N
1025 1048 satisfies the following constraints:
1026 1049
1027 1050 1. N is an ancestor of some node in 'heads'
1028 1051 2. N is not an ancestor of any node in 'common'
1029 1052
1030 1053 The list is sorted by revision number, meaning it is
1031 1054 topologically sorted.
1032 1055
1033 1056 'heads' and 'common' are both lists of node IDs. If heads is
1034 1057 not supplied, uses all of the revlog's heads. If common is not
1035 1058 supplied, uses nullid."""
1036 1059 if common is None:
1037 1060 common = [nullid]
1038 1061 if heads is None:
1039 1062 heads = self.heads()
1040 1063
1041 1064 common = [self.rev(n) for n in common]
1042 1065 heads = [self.rev(n) for n in heads]
1043 1066
1044 1067 inc = self.incrementalmissingrevs(common=common)
1045 1068 return [self.node(r) for r in inc.missingancestors(heads)]
1046 1069
1047 1070 def nodesbetween(self, roots=None, heads=None):
1048 1071 """Return a topological path from 'roots' to 'heads'.
1049 1072
1050 1073 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1051 1074 topologically sorted list of all nodes N that satisfy both of
1052 1075 these constraints:
1053 1076
1054 1077 1. N is a descendant of some node in 'roots'
1055 1078 2. N is an ancestor of some node in 'heads'
1056 1079
1057 1080 Every node is considered to be both a descendant and an ancestor
1058 1081 of itself, so every reachable node in 'roots' and 'heads' will be
1059 1082 included in 'nodes'.
1060 1083
1061 1084 'outroots' is the list of reachable nodes in 'roots', i.e., the
1062 1085 subset of 'roots' that is returned in 'nodes'. Likewise,
1063 1086 'outheads' is the subset of 'heads' that is also in 'nodes'.
1064 1087
1065 1088 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1066 1089 unspecified, uses nullid as the only root. If 'heads' is
1067 1090 unspecified, uses list of all of the revlog's heads."""
1068 1091 nonodes = ([], [], [])
1069 1092 if roots is not None:
1070 1093 roots = list(roots)
1071 1094 if not roots:
1072 1095 return nonodes
1073 1096 lowestrev = min([self.rev(n) for n in roots])
1074 1097 else:
1075 1098 roots = [nullid] # Everybody's a descendant of nullid
1076 1099 lowestrev = nullrev
1077 1100 if (lowestrev == nullrev) and (heads is None):
1078 1101 # We want _all_ the nodes!
1079 1102 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1080 1103 if heads is None:
1081 1104 # All nodes are ancestors, so the latest ancestor is the last
1082 1105 # node.
1083 1106 highestrev = len(self) - 1
1084 1107 # Set ancestors to None to signal that every node is an ancestor.
1085 1108 ancestors = None
1086 1109 # Set heads to an empty dictionary for later discovery of heads
1087 1110 heads = {}
1088 1111 else:
1089 1112 heads = list(heads)
1090 1113 if not heads:
1091 1114 return nonodes
1092 1115 ancestors = set()
1093 1116 # Turn heads into a dictionary so we can remove 'fake' heads.
1094 1117 # Also, later we will be using it to filter out the heads we can't
1095 1118 # find from roots.
1096 1119 heads = dict.fromkeys(heads, False)
1097 1120 # Start at the top and keep marking parents until we're done.
1098 1121 nodestotag = set(heads)
1099 1122 # Remember where the top was so we can use it as a limit later.
1100 1123 highestrev = max([self.rev(n) for n in nodestotag])
1101 1124 while nodestotag:
1102 1125 # grab a node to tag
1103 1126 n = nodestotag.pop()
1104 1127 # Never tag nullid
1105 1128 if n == nullid:
1106 1129 continue
1107 1130 # A node's revision number represents its place in a
1108 1131 # topologically sorted list of nodes.
1109 1132 r = self.rev(n)
1110 1133 if r >= lowestrev:
1111 1134 if n not in ancestors:
1112 1135 # If we are possibly a descendant of one of the roots
1113 1136 # and we haven't already been marked as an ancestor
1114 1137 ancestors.add(n) # Mark as ancestor
1115 1138 # Add non-nullid parents to list of nodes to tag.
1116 1139 nodestotag.update(
1117 1140 [p for p in self.parents(n) if p != nullid]
1118 1141 )
1119 1142 elif n in heads: # We've seen it before, is it a fake head?
1120 1143 # So it is, real heads should not be the ancestors of
1121 1144 # any other heads.
1122 1145 heads.pop(n)
1123 1146 if not ancestors:
1124 1147 return nonodes
1125 1148 # Now that we have our set of ancestors, we want to remove any
1126 1149 # roots that are not ancestors.
1127 1150
1128 1151 # If one of the roots was nullid, everything is included anyway.
1129 1152 if lowestrev > nullrev:
1130 1153 # But, since we weren't, let's recompute the lowest rev to not
1131 1154 # include roots that aren't ancestors.
1132 1155
1133 1156 # Filter out roots that aren't ancestors of heads
1134 1157 roots = [root for root in roots if root in ancestors]
1135 1158 # Recompute the lowest revision
1136 1159 if roots:
1137 1160 lowestrev = min([self.rev(root) for root in roots])
1138 1161 else:
1139 1162 # No more roots? Return empty list
1140 1163 return nonodes
1141 1164 else:
1142 1165 # We are descending from nullid, and don't need to care about
1143 1166 # any other roots.
1144 1167 lowestrev = nullrev
1145 1168 roots = [nullid]
1146 1169 # Transform our roots list into a set.
1147 1170 descendants = set(roots)
1148 1171 # Also, keep the original roots so we can filter out roots that aren't
1149 1172 # 'real' roots (i.e. are descended from other roots).
1150 1173 roots = descendants.copy()
1151 1174 # Our topologically sorted list of output nodes.
1152 1175 orderedout = []
1153 1176 # Don't start at nullid since we don't want nullid in our output list,
1154 1177 # and if nullid shows up in descendants, empty parents will look like
1155 1178 # they're descendants.
1156 1179 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1157 1180 n = self.node(r)
1158 1181 isdescendant = False
1159 1182 if lowestrev == nullrev: # Everybody is a descendant of nullid
1160 1183 isdescendant = True
1161 1184 elif n in descendants:
1162 1185 # n is already a descendant
1163 1186 isdescendant = True
1164 1187 # This check only needs to be done here because all the roots
1165 1188 # will start being marked is descendants before the loop.
1166 1189 if n in roots:
1167 1190 # If n was a root, check if it's a 'real' root.
1168 1191 p = tuple(self.parents(n))
1169 1192 # If any of its parents are descendants, it's not a root.
1170 1193 if (p[0] in descendants) or (p[1] in descendants):
1171 1194 roots.remove(n)
1172 1195 else:
1173 1196 p = tuple(self.parents(n))
1174 1197 # A node is a descendant if either of its parents are
1175 1198 # descendants. (We seeded the dependents list with the roots
1176 1199 # up there, remember?)
1177 1200 if (p[0] in descendants) or (p[1] in descendants):
1178 1201 descendants.add(n)
1179 1202 isdescendant = True
1180 1203 if isdescendant and ((ancestors is None) or (n in ancestors)):
1181 1204 # Only include nodes that are both descendants and ancestors.
1182 1205 orderedout.append(n)
1183 1206 if (ancestors is not None) and (n in heads):
1184 1207 # We're trying to figure out which heads are reachable
1185 1208 # from roots.
1186 1209 # Mark this head as having been reached
1187 1210 heads[n] = True
1188 1211 elif ancestors is None:
1189 1212 # Otherwise, we're trying to discover the heads.
1190 1213 # Assume this is a head because if it isn't, the next step
1191 1214 # will eventually remove it.
1192 1215 heads[n] = True
1193 1216 # But, obviously its parents aren't.
1194 1217 for p in self.parents(n):
1195 1218 heads.pop(p, None)
1196 1219 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1197 1220 roots = list(roots)
1198 1221 assert orderedout
1199 1222 assert roots
1200 1223 assert heads
1201 1224 return (orderedout, roots, heads)
1202 1225
1203 1226 def headrevs(self, revs=None):
1204 1227 if revs is None:
1205 1228 try:
1206 1229 return self.index.headrevs()
1207 1230 except AttributeError:
1208 1231 return self._headrevs()
1209 1232 if rustdagop is not None:
1210 1233 return rustdagop.headrevs(self.index, revs)
1211 1234 return dagop.headrevs(revs, self._uncheckedparentrevs)
1212 1235
1213 1236 def computephases(self, roots):
1214 1237 return self.index.computephasesmapsets(roots)
1215 1238
1216 1239 def _headrevs(self):
1217 1240 count = len(self)
1218 1241 if not count:
1219 1242 return [nullrev]
1220 1243 # we won't iter over filtered rev so nobody is a head at start
1221 1244 ishead = [0] * (count + 1)
1222 1245 index = self.index
1223 1246 for r in self:
1224 1247 ishead[r] = 1 # I may be an head
1225 1248 e = index[r]
1226 1249 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1227 1250 return [r for r, val in enumerate(ishead) if val]
1228 1251
1229 1252 def heads(self, start=None, stop=None):
1230 1253 """return the list of all nodes that have no children
1231 1254
1232 1255 if start is specified, only heads that are descendants of
1233 1256 start will be returned
1234 1257 if stop is specified, it will consider all the revs from stop
1235 1258 as if they had no children
1236 1259 """
1237 1260 if start is None and stop is None:
1238 1261 if not len(self):
1239 1262 return [nullid]
1240 1263 return [self.node(r) for r in self.headrevs()]
1241 1264
1242 1265 if start is None:
1243 1266 start = nullrev
1244 1267 else:
1245 1268 start = self.rev(start)
1246 1269
1247 1270 stoprevs = set(self.rev(n) for n in stop or [])
1248 1271
1249 1272 revs = dagop.headrevssubset(
1250 1273 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1251 1274 )
1252 1275
1253 1276 return [self.node(rev) for rev in revs]
1254 1277
1255 1278 def children(self, node):
1256 1279 """find the children of a given node"""
1257 1280 c = []
1258 1281 p = self.rev(node)
1259 1282 for r in self.revs(start=p + 1):
1260 1283 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1261 1284 if prevs:
1262 1285 for pr in prevs:
1263 1286 if pr == p:
1264 1287 c.append(self.node(r))
1265 1288 elif p == nullrev:
1266 1289 c.append(self.node(r))
1267 1290 return c
1268 1291
1269 1292 def commonancestorsheads(self, a, b):
1270 1293 """calculate all the heads of the common ancestors of nodes a and b"""
1271 1294 a, b = self.rev(a), self.rev(b)
1272 1295 ancs = self._commonancestorsheads(a, b)
1273 1296 return pycompat.maplist(self.node, ancs)
1274 1297
1275 1298 def _commonancestorsheads(self, *revs):
1276 1299 """calculate all the heads of the common ancestors of revs"""
1277 1300 try:
1278 1301 ancs = self.index.commonancestorsheads(*revs)
1279 1302 except (AttributeError, OverflowError): # C implementation failed
1280 1303 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1281 1304 return ancs
1282 1305
1283 1306 def isancestor(self, a, b):
1284 1307 """return True if node a is an ancestor of node b
1285 1308
1286 1309 A revision is considered an ancestor of itself."""
1287 1310 a, b = self.rev(a), self.rev(b)
1288 1311 return self.isancestorrev(a, b)
1289 1312
1290 1313 def isancestorrev(self, a, b):
1291 1314 """return True if revision a is an ancestor of revision b
1292 1315
1293 1316 A revision is considered an ancestor of itself.
1294 1317
1295 1318 The implementation of this is trivial but the use of
1296 1319 reachableroots is not."""
1297 1320 if a == nullrev:
1298 1321 return True
1299 1322 elif a == b:
1300 1323 return True
1301 1324 elif a > b:
1302 1325 return False
1303 1326 return bool(self.reachableroots(a, [b], [a], includepath=False))
1304 1327
1305 1328 def reachableroots(self, minroot, heads, roots, includepath=False):
1306 1329 """return (heads(::(<roots> and <roots>::<heads>)))
1307 1330
1308 1331 If includepath is True, return (<roots>::<heads>)."""
1309 1332 try:
1310 1333 return self.index.reachableroots2(
1311 1334 minroot, heads, roots, includepath
1312 1335 )
1313 1336 except AttributeError:
1314 1337 return dagop._reachablerootspure(
1315 1338 self.parentrevs, minroot, roots, heads, includepath
1316 1339 )
1317 1340
1318 1341 def ancestor(self, a, b):
1319 1342 """calculate the "best" common ancestor of nodes a and b"""
1320 1343
1321 1344 a, b = self.rev(a), self.rev(b)
1322 1345 try:
1323 1346 ancs = self.index.ancestors(a, b)
1324 1347 except (AttributeError, OverflowError):
1325 1348 ancs = ancestor.ancestors(self.parentrevs, a, b)
1326 1349 if ancs:
1327 1350 # choose a consistent winner when there's a tie
1328 1351 return min(map(self.node, ancs))
1329 1352 return nullid
1330 1353
1331 1354 def _match(self, id):
1332 1355 if isinstance(id, int):
1333 1356 # rev
1334 1357 return self.node(id)
1335 1358 if len(id) == 20:
1336 1359 # possibly a binary node
1337 1360 # odds of a binary node being all hex in ASCII are 1 in 10**25
1338 1361 try:
1339 1362 node = id
1340 1363 self.rev(node) # quick search the index
1341 1364 return node
1342 1365 except error.LookupError:
1343 1366 pass # may be partial hex id
1344 1367 try:
1345 1368 # str(rev)
1346 1369 rev = int(id)
1347 1370 if b"%d" % rev != id:
1348 1371 raise ValueError
1349 1372 if rev < 0:
1350 1373 rev = len(self) + rev
1351 1374 if rev < 0 or rev >= len(self):
1352 1375 raise ValueError
1353 1376 return self.node(rev)
1354 1377 except (ValueError, OverflowError):
1355 1378 pass
1356 1379 if len(id) == 40:
1357 1380 try:
1358 1381 # a full hex nodeid?
1359 1382 node = bin(id)
1360 1383 self.rev(node)
1361 1384 return node
1362 1385 except (TypeError, error.LookupError):
1363 1386 pass
1364 1387
1365 1388 def _partialmatch(self, id):
1366 1389 # we don't care wdirfilenodeids as they should be always full hash
1367 1390 maybewdir = wdirhex.startswith(id)
1368 1391 try:
1369 1392 partial = self.index.partialmatch(id)
1370 1393 if partial and self.hasnode(partial):
1371 1394 if maybewdir:
1372 1395 # single 'ff...' match in radix tree, ambiguous with wdir
1373 1396 raise error.RevlogError
1374 1397 return partial
1375 1398 if maybewdir:
1376 1399 # no 'ff...' match in radix tree, wdir identified
1377 1400 raise error.WdirUnsupported
1378 1401 return None
1379 1402 except error.RevlogError:
1380 1403 # parsers.c radix tree lookup gave multiple matches
1381 1404 # fast path: for unfiltered changelog, radix tree is accurate
1382 1405 if not getattr(self, 'filteredrevs', None):
1383 1406 raise error.AmbiguousPrefixLookupError(
1384 1407 id, self.indexfile, _(b'ambiguous identifier')
1385 1408 )
1386 1409 # fall through to slow path that filters hidden revisions
1387 1410 except (AttributeError, ValueError):
1388 1411 # we are pure python, or key was too short to search radix tree
1389 1412 pass
1390 1413
1391 1414 if id in self._pcache:
1392 1415 return self._pcache[id]
1393 1416
1394 1417 if len(id) <= 40:
1395 1418 try:
1396 1419 # hex(node)[:...]
1397 1420 l = len(id) // 2 # grab an even number of digits
1398 1421 prefix = bin(id[: l * 2])
1399 1422 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1400 1423 nl = [
1401 1424 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1402 1425 ]
1403 1426 if nullhex.startswith(id):
1404 1427 nl.append(nullid)
1405 1428 if len(nl) > 0:
1406 1429 if len(nl) == 1 and not maybewdir:
1407 1430 self._pcache[id] = nl[0]
1408 1431 return nl[0]
1409 1432 raise error.AmbiguousPrefixLookupError(
1410 1433 id, self.indexfile, _(b'ambiguous identifier')
1411 1434 )
1412 1435 if maybewdir:
1413 1436 raise error.WdirUnsupported
1414 1437 return None
1415 1438 except TypeError:
1416 1439 pass
1417 1440
1418 1441 def lookup(self, id):
1419 1442 """locate a node based on:
1420 1443 - revision number or str(revision number)
1421 1444 - nodeid or subset of hex nodeid
1422 1445 """
1423 1446 n = self._match(id)
1424 1447 if n is not None:
1425 1448 return n
1426 1449 n = self._partialmatch(id)
1427 1450 if n:
1428 1451 return n
1429 1452
1430 1453 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1431 1454
1432 1455 def shortest(self, node, minlength=1):
1433 1456 """Find the shortest unambiguous prefix that matches node."""
1434 1457
1435 1458 def isvalid(prefix):
1436 1459 try:
1437 1460 matchednode = self._partialmatch(prefix)
1438 1461 except error.AmbiguousPrefixLookupError:
1439 1462 return False
1440 1463 except error.WdirUnsupported:
1441 1464 # single 'ff...' match
1442 1465 return True
1443 1466 if matchednode is None:
1444 1467 raise error.LookupError(node, self.indexfile, _(b'no node'))
1445 1468 return True
1446 1469
1447 1470 def maybewdir(prefix):
1448 1471 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1449 1472
1450 1473 hexnode = hex(node)
1451 1474
1452 1475 def disambiguate(hexnode, minlength):
1453 1476 """Disambiguate against wdirid."""
1454 1477 for length in range(minlength, 41):
1455 1478 prefix = hexnode[:length]
1456 1479 if not maybewdir(prefix):
1457 1480 return prefix
1458 1481
1459 1482 if not getattr(self, 'filteredrevs', None):
1460 1483 try:
1461 1484 length = max(self.index.shortest(node), minlength)
1462 1485 return disambiguate(hexnode, length)
1463 1486 except error.RevlogError:
1464 1487 if node != wdirid:
1465 1488 raise error.LookupError(node, self.indexfile, _(b'no node'))
1466 1489 except AttributeError:
1467 1490 # Fall through to pure code
1468 1491 pass
1469 1492
1470 1493 if node == wdirid:
1471 1494 for length in range(minlength, 41):
1472 1495 prefix = hexnode[:length]
1473 1496 if isvalid(prefix):
1474 1497 return prefix
1475 1498
1476 1499 for length in range(minlength, 41):
1477 1500 prefix = hexnode[:length]
1478 1501 if isvalid(prefix):
1479 1502 return disambiguate(hexnode, length)
1480 1503
1481 1504 def cmp(self, node, text):
1482 1505 """compare text with a given file revision
1483 1506
1484 1507 returns True if text is different than what is stored.
1485 1508 """
1486 1509 p1, p2 = self.parents(node)
1487 1510 return storageutil.hashrevisionsha1(text, p1, p2) != node
1488 1511
1489 1512 def _cachesegment(self, offset, data):
1490 1513 """Add a segment to the revlog cache.
1491 1514
1492 1515 Accepts an absolute offset and the data that is at that location.
1493 1516 """
1494 1517 o, d = self._chunkcache
1495 1518 # try to add to existing cache
1496 1519 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1497 1520 self._chunkcache = o, d + data
1498 1521 else:
1499 1522 self._chunkcache = offset, data
1500 1523
1501 1524 def _readsegment(self, offset, length, df=None):
1502 1525 """Load a segment of raw data from the revlog.
1503 1526
1504 1527 Accepts an absolute offset, length to read, and an optional existing
1505 1528 file handle to read from.
1506 1529
1507 1530 If an existing file handle is passed, it will be seeked and the
1508 1531 original seek position will NOT be restored.
1509 1532
1510 1533 Returns a str or buffer of raw byte data.
1511 1534
1512 1535 Raises if the requested number of bytes could not be read.
1513 1536 """
1514 1537 # Cache data both forward and backward around the requested
1515 1538 # data, in a fixed size window. This helps speed up operations
1516 1539 # involving reading the revlog backwards.
1517 1540 cachesize = self._chunkcachesize
1518 1541 realoffset = offset & ~(cachesize - 1)
1519 1542 reallength = (
1520 1543 (offset + length + cachesize) & ~(cachesize - 1)
1521 1544 ) - realoffset
1522 1545 with self._datareadfp(df) as df:
1523 1546 df.seek(realoffset)
1524 1547 d = df.read(reallength)
1525 1548
1526 1549 self._cachesegment(realoffset, d)
1527 1550 if offset != realoffset or reallength != length:
1528 1551 startoffset = offset - realoffset
1529 1552 if len(d) - startoffset < length:
1530 1553 raise error.RevlogError(
1531 1554 _(
1532 1555 b'partial read of revlog %s; expected %d bytes from '
1533 1556 b'offset %d, got %d'
1534 1557 )
1535 1558 % (
1536 1559 self.indexfile if self._inline else self.datafile,
1537 1560 length,
1538 1561 realoffset,
1539 1562 len(d) - startoffset,
1540 1563 )
1541 1564 )
1542 1565
1543 1566 return util.buffer(d, startoffset, length)
1544 1567
1545 1568 if len(d) < length:
1546 1569 raise error.RevlogError(
1547 1570 _(
1548 1571 b'partial read of revlog %s; expected %d bytes from offset '
1549 1572 b'%d, got %d'
1550 1573 )
1551 1574 % (
1552 1575 self.indexfile if self._inline else self.datafile,
1553 1576 length,
1554 1577 offset,
1555 1578 len(d),
1556 1579 )
1557 1580 )
1558 1581
1559 1582 return d
1560 1583
1561 1584 def _getsegment(self, offset, length, df=None):
1562 1585 """Obtain a segment of raw data from the revlog.
1563 1586
1564 1587 Accepts an absolute offset, length of bytes to obtain, and an
1565 1588 optional file handle to the already-opened revlog. If the file
1566 1589 handle is used, it's original seek position will not be preserved.
1567 1590
1568 1591 Requests for data may be returned from a cache.
1569 1592
1570 1593 Returns a str or a buffer instance of raw byte data.
1571 1594 """
1572 1595 o, d = self._chunkcache
1573 1596 l = len(d)
1574 1597
1575 1598 # is it in the cache?
1576 1599 cachestart = offset - o
1577 1600 cacheend = cachestart + length
1578 1601 if cachestart >= 0 and cacheend <= l:
1579 1602 if cachestart == 0 and cacheend == l:
1580 1603 return d # avoid a copy
1581 1604 return util.buffer(d, cachestart, cacheend - cachestart)
1582 1605
1583 1606 return self._readsegment(offset, length, df=df)
1584 1607
1585 1608 def _getsegmentforrevs(self, startrev, endrev, df=None):
1586 1609 """Obtain a segment of raw data corresponding to a range of revisions.
1587 1610
1588 1611 Accepts the start and end revisions and an optional already-open
1589 1612 file handle to be used for reading. If the file handle is read, its
1590 1613 seek position will not be preserved.
1591 1614
1592 1615 Requests for data may be satisfied by a cache.
1593 1616
1594 1617 Returns a 2-tuple of (offset, data) for the requested range of
1595 1618 revisions. Offset is the integer offset from the beginning of the
1596 1619 revlog and data is a str or buffer of the raw byte data.
1597 1620
1598 1621 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1599 1622 to determine where each revision's data begins and ends.
1600 1623 """
1601 1624 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1602 1625 # (functions are expensive).
1603 1626 index = self.index
1604 1627 istart = index[startrev]
1605 1628 start = int(istart[0] >> 16)
1606 1629 if startrev == endrev:
1607 1630 end = start + istart[1]
1608 1631 else:
1609 1632 iend = index[endrev]
1610 1633 end = int(iend[0] >> 16) + iend[1]
1611 1634
1612 1635 if self._inline:
1613 1636 start += (startrev + 1) * self._io.size
1614 1637 end += (endrev + 1) * self._io.size
1615 1638 length = end - start
1616 1639
1617 1640 return start, self._getsegment(start, length, df=df)
1618 1641
1619 1642 def _chunk(self, rev, df=None):
1620 1643 """Obtain a single decompressed chunk for a revision.
1621 1644
1622 1645 Accepts an integer revision and an optional already-open file handle
1623 1646 to be used for reading. If used, the seek position of the file will not
1624 1647 be preserved.
1625 1648
1626 1649 Returns a str holding uncompressed data for the requested revision.
1627 1650 """
1628 1651 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1629 1652
1630 1653 def _chunks(self, revs, df=None, targetsize=None):
1631 1654 """Obtain decompressed chunks for the specified revisions.
1632 1655
1633 1656 Accepts an iterable of numeric revisions that are assumed to be in
1634 1657 ascending order. Also accepts an optional already-open file handle
1635 1658 to be used for reading. If used, the seek position of the file will
1636 1659 not be preserved.
1637 1660
1638 1661 This function is similar to calling ``self._chunk()`` multiple times,
1639 1662 but is faster.
1640 1663
1641 1664 Returns a list with decompressed data for each requested revision.
1642 1665 """
1643 1666 if not revs:
1644 1667 return []
1645 1668 start = self.start
1646 1669 length = self.length
1647 1670 inline = self._inline
1648 1671 iosize = self._io.size
1649 1672 buffer = util.buffer
1650 1673
1651 1674 l = []
1652 1675 ladd = l.append
1653 1676
1654 1677 if not self._withsparseread:
1655 1678 slicedchunks = (revs,)
1656 1679 else:
1657 1680 slicedchunks = deltautil.slicechunk(
1658 1681 self, revs, targetsize=targetsize
1659 1682 )
1660 1683
1661 1684 for revschunk in slicedchunks:
1662 1685 firstrev = revschunk[0]
1663 1686 # Skip trailing revisions with empty diff
1664 1687 for lastrev in revschunk[::-1]:
1665 1688 if length(lastrev) != 0:
1666 1689 break
1667 1690
1668 1691 try:
1669 1692 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1670 1693 except OverflowError:
1671 1694 # issue4215 - we can't cache a run of chunks greater than
1672 1695 # 2G on Windows
1673 1696 return [self._chunk(rev, df=df) for rev in revschunk]
1674 1697
1675 1698 decomp = self.decompress
1676 1699 for rev in revschunk:
1677 1700 chunkstart = start(rev)
1678 1701 if inline:
1679 1702 chunkstart += (rev + 1) * iosize
1680 1703 chunklength = length(rev)
1681 1704 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1682 1705
1683 1706 return l
1684 1707
1685 1708 def _chunkclear(self):
1686 1709 """Clear the raw chunk cache."""
1687 1710 self._chunkcache = (0, b'')
1688 1711
1689 1712 def deltaparent(self, rev):
1690 1713 """return deltaparent of the given revision"""
1691 1714 base = self.index[rev][3]
1692 1715 if base == rev:
1693 1716 return nullrev
1694 1717 elif self._generaldelta:
1695 1718 return base
1696 1719 else:
1697 1720 return rev - 1
1698 1721
1699 1722 def issnapshot(self, rev):
1700 1723 """tells whether rev is a snapshot
1701 1724 """
1702 1725 if not self._sparserevlog:
1703 1726 return self.deltaparent(rev) == nullrev
1704 1727 elif util.safehasattr(self.index, b'issnapshot'):
1705 1728 # directly assign the method to cache the testing and access
1706 1729 self.issnapshot = self.index.issnapshot
1707 1730 return self.issnapshot(rev)
1708 1731 if rev == nullrev:
1709 1732 return True
1710 1733 entry = self.index[rev]
1711 1734 base = entry[3]
1712 1735 if base == rev:
1713 1736 return True
1714 1737 if base == nullrev:
1715 1738 return True
1716 1739 p1 = entry[5]
1717 1740 p2 = entry[6]
1718 1741 if base == p1 or base == p2:
1719 1742 return False
1720 1743 return self.issnapshot(base)
1721 1744
1722 1745 def snapshotdepth(self, rev):
1723 1746 """number of snapshot in the chain before this one"""
1724 1747 if not self.issnapshot(rev):
1725 1748 raise error.ProgrammingError(b'revision %d not a snapshot')
1726 1749 return len(self._deltachain(rev)[0]) - 1
1727 1750
1728 1751 def revdiff(self, rev1, rev2):
1729 1752 """return or calculate a delta between two revisions
1730 1753
1731 1754 The delta calculated is in binary form and is intended to be written to
1732 1755 revlog data directly. So this function needs raw revision data.
1733 1756 """
1734 1757 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1735 1758 return bytes(self._chunk(rev2))
1736 1759
1737 1760 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1738 1761
1739 1762 def _processflags(self, text, flags, operation, raw=False):
1740 1763 """deprecated entry point to access flag processors"""
1741 1764 msg = b'_processflag(...) use the specialized variant'
1742 1765 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1743 1766 if raw:
1744 1767 return text, flagutil.processflagsraw(self, text, flags)
1745 1768 elif operation == b'read':
1746 1769 return flagutil.processflagsread(self, text, flags)
1747 1770 else: # write operation
1748 1771 return flagutil.processflagswrite(self, text, flags)
1749 1772
1750 1773 def revision(self, nodeorrev, _df=None, raw=False):
1751 1774 """return an uncompressed revision of a given node or revision
1752 1775 number.
1753 1776
1754 1777 _df - an existing file handle to read from. (internal-only)
1755 1778 raw - an optional argument specifying if the revision data is to be
1756 1779 treated as raw data when applying flag transforms. 'raw' should be set
1757 1780 to True when generating changegroups or in debug commands.
1758 1781 """
1759 1782 if raw:
1760 1783 msg = (
1761 1784 b'revlog.revision(..., raw=True) is deprecated, '
1762 1785 b'use revlog.rawdata(...)'
1763 1786 )
1764 1787 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1765 1788 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1766 1789
1767 1790 def sidedata(self, nodeorrev, _df=None):
1768 1791 """a map of extra data related to the changeset but not part of the hash
1769 1792
1770 1793 This function currently return a dictionary. However, more advanced
1771 1794 mapping object will likely be used in the future for a more
1772 1795 efficient/lazy code.
1773 1796 """
1774 1797 return self._revisiondata(nodeorrev, _df)[1]
1775 1798
1776 1799 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1777 1800 # deal with <nodeorrev> argument type
1778 1801 if isinstance(nodeorrev, int):
1779 1802 rev = nodeorrev
1780 1803 node = self.node(rev)
1781 1804 else:
1782 1805 node = nodeorrev
1783 1806 rev = None
1784 1807
1785 1808 # fast path the special `nullid` rev
1786 1809 if node == nullid:
1787 1810 return b"", {}
1788 1811
1789 1812 # ``rawtext`` is the text as stored inside the revlog. Might be the
1790 1813 # revision or might need to be processed to retrieve the revision.
1791 1814 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1792 1815
1793 1816 if raw and validated:
1794 1817 # if we don't want to process the raw text and that raw
1795 1818 # text is cached, we can exit early.
1796 1819 return rawtext, {}
1797 1820 if rev is None:
1798 1821 rev = self.rev(node)
1799 1822 # the revlog's flag for this revision
1800 1823 # (usually alter its state or content)
1801 1824 flags = self.flags(rev)
1802 1825
1803 1826 if validated and flags == REVIDX_DEFAULT_FLAGS:
1804 1827 # no extra flags set, no flag processor runs, text = rawtext
1805 1828 return rawtext, {}
1806 1829
1807 1830 sidedata = {}
1808 1831 if raw:
1809 1832 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1810 1833 text = rawtext
1811 1834 else:
1812 1835 try:
1813 1836 r = flagutil.processflagsread(self, rawtext, flags)
1814 1837 except error.SidedataHashError as exc:
1815 1838 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1816 1839 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1817 1840 raise error.RevlogError(msg)
1818 1841 text, validatehash, sidedata = r
1819 1842 if validatehash:
1820 1843 self.checkhash(text, node, rev=rev)
1821 1844 if not validated:
1822 1845 self._revisioncache = (node, rev, rawtext)
1823 1846
1824 1847 return text, sidedata
1825 1848
1826 1849 def _rawtext(self, node, rev, _df=None):
1827 1850 """return the possibly unvalidated rawtext for a revision
1828 1851
1829 1852 returns (rev, rawtext, validated)
1830 1853 """
1831 1854
1832 1855 # revision in the cache (could be useful to apply delta)
1833 1856 cachedrev = None
1834 1857 # An intermediate text to apply deltas to
1835 1858 basetext = None
1836 1859
1837 1860 # Check if we have the entry in cache
1838 1861 # The cache entry looks like (node, rev, rawtext)
1839 1862 if self._revisioncache:
1840 1863 if self._revisioncache[0] == node:
1841 1864 return (rev, self._revisioncache[2], True)
1842 1865 cachedrev = self._revisioncache[1]
1843 1866
1844 1867 if rev is None:
1845 1868 rev = self.rev(node)
1846 1869
1847 1870 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1848 1871 if stopped:
1849 1872 basetext = self._revisioncache[2]
1850 1873
1851 1874 # drop cache to save memory, the caller is expected to
1852 1875 # update self._revisioncache after validating the text
1853 1876 self._revisioncache = None
1854 1877
1855 1878 targetsize = None
1856 1879 rawsize = self.index[rev][2]
1857 1880 if 0 <= rawsize:
1858 1881 targetsize = 4 * rawsize
1859 1882
1860 1883 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1861 1884 if basetext is None:
1862 1885 basetext = bytes(bins[0])
1863 1886 bins = bins[1:]
1864 1887
1865 1888 rawtext = mdiff.patches(basetext, bins)
1866 1889 del basetext # let us have a chance to free memory early
1867 1890 return (rev, rawtext, False)
1868 1891
1869 1892 def rawdata(self, nodeorrev, _df=None):
1870 1893 """return an uncompressed raw data of a given node or revision number.
1871 1894
1872 1895 _df - an existing file handle to read from. (internal-only)
1873 1896 """
1874 1897 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1875 1898
1876 1899 def hash(self, text, p1, p2):
1877 1900 """Compute a node hash.
1878 1901
1879 1902 Available as a function so that subclasses can replace the hash
1880 1903 as needed.
1881 1904 """
1882 1905 return storageutil.hashrevisionsha1(text, p1, p2)
1883 1906
1884 1907 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1885 1908 """Check node hash integrity.
1886 1909
1887 1910 Available as a function so that subclasses can extend hash mismatch
1888 1911 behaviors as needed.
1889 1912 """
1890 1913 try:
1891 1914 if p1 is None and p2 is None:
1892 1915 p1, p2 = self.parents(node)
1893 1916 if node != self.hash(text, p1, p2):
1894 1917 # Clear the revision cache on hash failure. The revision cache
1895 1918 # only stores the raw revision and clearing the cache does have
1896 1919 # the side-effect that we won't have a cache hit when the raw
1897 1920 # revision data is accessed. But this case should be rare and
1898 1921 # it is extra work to teach the cache about the hash
1899 1922 # verification state.
1900 1923 if self._revisioncache and self._revisioncache[0] == node:
1901 1924 self._revisioncache = None
1902 1925
1903 1926 revornode = rev
1904 1927 if revornode is None:
1905 1928 revornode = templatefilters.short(hex(node))
1906 1929 raise error.RevlogError(
1907 1930 _(b"integrity check failed on %s:%s")
1908 1931 % (self.indexfile, pycompat.bytestr(revornode))
1909 1932 )
1910 1933 except error.RevlogError:
1911 1934 if self._censorable and storageutil.iscensoredtext(text):
1912 1935 raise error.CensoredNodeError(self.indexfile, node, text)
1913 1936 raise
1914 1937
1915 1938 def _enforceinlinesize(self, tr, fp=None):
1916 1939 """Check if the revlog is too big for inline and convert if so.
1917 1940
1918 1941 This should be called after revisions are added to the revlog. If the
1919 1942 revlog has grown too large to be an inline revlog, it will convert it
1920 1943 to use multiple index and data files.
1921 1944 """
1922 1945 tiprev = len(self) - 1
1923 1946 if (
1924 1947 not self._inline
1925 1948 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1926 1949 ):
1927 1950 return
1928 1951
1929 1952 trinfo = tr.find(self.indexfile)
1930 1953 if trinfo is None:
1931 1954 raise error.RevlogError(
1932 1955 _(b"%s not found in the transaction") % self.indexfile
1933 1956 )
1934 1957
1935 1958 trindex = trinfo[2]
1936 1959 if trindex is not None:
1937 1960 dataoff = self.start(trindex)
1938 1961 else:
1939 1962 # revlog was stripped at start of transaction, use all leftover data
1940 1963 trindex = len(self) - 1
1941 1964 dataoff = self.end(tiprev)
1942 1965
1943 1966 tr.add(self.datafile, dataoff)
1944 1967
1945 1968 if fp:
1946 1969 fp.flush()
1947 1970 fp.close()
1948 1971 # We can't use the cached file handle after close(). So prevent
1949 1972 # its usage.
1950 1973 self._writinghandles = None
1951 1974
1952 1975 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1953 1976 for r in self:
1954 1977 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1955 1978
1956 1979 with self._indexfp(b'w') as fp:
1957 1980 self.version &= ~FLAG_INLINE_DATA
1958 1981 self._inline = False
1959 1982 io = self._io
1960 1983 for i in self:
1961 1984 e = io.packentry(self.index[i], self.node, self.version, i)
1962 1985 fp.write(e)
1963 1986
1964 1987 # the temp file replace the real index when we exit the context
1965 1988 # manager
1966 1989
1967 1990 tr.replace(self.indexfile, trindex * self._io.size)
1968 1991 nodemaputil.setup_persistent_nodemap(tr, self)
1969 1992 self._chunkclear()
1970 1993
1971 1994 def _nodeduplicatecallback(self, transaction, node):
1972 1995 """called when trying to add a node already stored.
1973 1996 """
1974 1997
1975 1998 def addrevision(
1976 1999 self,
1977 2000 text,
1978 2001 transaction,
1979 2002 link,
1980 2003 p1,
1981 2004 p2,
1982 2005 cachedelta=None,
1983 2006 node=None,
1984 2007 flags=REVIDX_DEFAULT_FLAGS,
1985 2008 deltacomputer=None,
1986 2009 sidedata=None,
1987 2010 ):
1988 2011 """add a revision to the log
1989 2012
1990 2013 text - the revision data to add
1991 2014 transaction - the transaction object used for rollback
1992 2015 link - the linkrev data to add
1993 2016 p1, p2 - the parent nodeids of the revision
1994 2017 cachedelta - an optional precomputed delta
1995 2018 node - nodeid of revision; typically node is not specified, and it is
1996 2019 computed by default as hash(text, p1, p2), however subclasses might
1997 2020 use different hashing method (and override checkhash() in such case)
1998 2021 flags - the known flags to set on the revision
1999 2022 deltacomputer - an optional deltacomputer instance shared between
2000 2023 multiple calls
2001 2024 """
2002 2025 if link == nullrev:
2003 2026 raise error.RevlogError(
2004 2027 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2005 2028 )
2006 2029
2007 2030 if sidedata is None:
2008 2031 sidedata = {}
2009 2032 flags = flags & ~REVIDX_SIDEDATA
2010 2033 elif not self.hassidedata:
2011 2034 raise error.ProgrammingError(
2012 2035 _(b"trying to add sidedata to a revlog who don't support them")
2013 2036 )
2014 2037 else:
2015 2038 flags |= REVIDX_SIDEDATA
2016 2039
2017 2040 if flags:
2018 2041 node = node or self.hash(text, p1, p2)
2019 2042
2020 2043 rawtext, validatehash = flagutil.processflagswrite(
2021 2044 self, text, flags, sidedata=sidedata
2022 2045 )
2023 2046
2024 2047 # If the flag processor modifies the revision data, ignore any provided
2025 2048 # cachedelta.
2026 2049 if rawtext != text:
2027 2050 cachedelta = None
2028 2051
2029 2052 if len(rawtext) > _maxentrysize:
2030 2053 raise error.RevlogError(
2031 2054 _(
2032 2055 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2033 2056 )
2034 2057 % (self.indexfile, len(rawtext))
2035 2058 )
2036 2059
2037 2060 node = node or self.hash(rawtext, p1, p2)
2038 2061 if self.index.has_node(node):
2039 2062 return node
2040 2063
2041 2064 if validatehash:
2042 2065 self.checkhash(rawtext, node, p1=p1, p2=p2)
2043 2066
2044 2067 return self.addrawrevision(
2045 2068 rawtext,
2046 2069 transaction,
2047 2070 link,
2048 2071 p1,
2049 2072 p2,
2050 2073 node,
2051 2074 flags,
2052 2075 cachedelta=cachedelta,
2053 2076 deltacomputer=deltacomputer,
2054 2077 )
2055 2078
2056 2079 def addrawrevision(
2057 2080 self,
2058 2081 rawtext,
2059 2082 transaction,
2060 2083 link,
2061 2084 p1,
2062 2085 p2,
2063 2086 node,
2064 2087 flags,
2065 2088 cachedelta=None,
2066 2089 deltacomputer=None,
2067 2090 ):
2068 2091 """add a raw revision with known flags, node and parents
2069 2092 useful when reusing a revision not stored in this revlog (ex: received
2070 2093 over wire, or read from an external bundle).
2071 2094 """
2072 2095 dfh = None
2073 2096 if not self._inline:
2074 2097 dfh = self._datafp(b"a+")
2075 2098 ifh = self._indexfp(b"a+")
2076 2099 try:
2077 2100 return self._addrevision(
2078 2101 node,
2079 2102 rawtext,
2080 2103 transaction,
2081 2104 link,
2082 2105 p1,
2083 2106 p2,
2084 2107 flags,
2085 2108 cachedelta,
2086 2109 ifh,
2087 2110 dfh,
2088 2111 deltacomputer=deltacomputer,
2089 2112 )
2090 2113 finally:
2091 2114 if dfh:
2092 2115 dfh.close()
2093 2116 ifh.close()
2094 2117
2095 2118 def compress(self, data):
2096 2119 """Generate a possibly-compressed representation of data."""
2097 2120 if not data:
2098 2121 return b'', data
2099 2122
2100 2123 compressed = self._compressor.compress(data)
2101 2124
2102 2125 if compressed:
2103 2126 # The revlog compressor added the header in the returned data.
2104 2127 return b'', compressed
2105 2128
2106 2129 if data[0:1] == b'\0':
2107 2130 return b'', data
2108 2131 return b'u', data
2109 2132
2110 2133 def decompress(self, data):
2111 2134 """Decompress a revlog chunk.
2112 2135
2113 2136 The chunk is expected to begin with a header identifying the
2114 2137 format type so it can be routed to an appropriate decompressor.
2115 2138 """
2116 2139 if not data:
2117 2140 return data
2118 2141
2119 2142 # Revlogs are read much more frequently than they are written and many
2120 2143 # chunks only take microseconds to decompress, so performance is
2121 2144 # important here.
2122 2145 #
2123 2146 # We can make a few assumptions about revlogs:
2124 2147 #
2125 2148 # 1) the majority of chunks will be compressed (as opposed to inline
2126 2149 # raw data).
2127 2150 # 2) decompressing *any* data will likely by at least 10x slower than
2128 2151 # returning raw inline data.
2129 2152 # 3) we want to prioritize common and officially supported compression
2130 2153 # engines
2131 2154 #
2132 2155 # It follows that we want to optimize for "decompress compressed data
2133 2156 # when encoded with common and officially supported compression engines"
2134 2157 # case over "raw data" and "data encoded by less common or non-official
2135 2158 # compression engines." That is why we have the inline lookup first
2136 2159 # followed by the compengines lookup.
2137 2160 #
2138 2161 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2139 2162 # compressed chunks. And this matters for changelog and manifest reads.
2140 2163 t = data[0:1]
2141 2164
2142 2165 if t == b'x':
2143 2166 try:
2144 2167 return _zlibdecompress(data)
2145 2168 except zlib.error as e:
2146 2169 raise error.RevlogError(
2147 2170 _(b'revlog decompress error: %s')
2148 2171 % stringutil.forcebytestr(e)
2149 2172 )
2150 2173 # '\0' is more common than 'u' so it goes first.
2151 2174 elif t == b'\0':
2152 2175 return data
2153 2176 elif t == b'u':
2154 2177 return util.buffer(data, 1)
2155 2178
2156 2179 try:
2157 2180 compressor = self._decompressors[t]
2158 2181 except KeyError:
2159 2182 try:
2160 2183 engine = util.compengines.forrevlogheader(t)
2161 2184 compressor = engine.revlogcompressor(self._compengineopts)
2162 2185 self._decompressors[t] = compressor
2163 2186 except KeyError:
2164 2187 raise error.RevlogError(_(b'unknown compression type %r') % t)
2165 2188
2166 2189 return compressor.decompress(data)
2167 2190
2168 2191 def _addrevision(
2169 2192 self,
2170 2193 node,
2171 2194 rawtext,
2172 2195 transaction,
2173 2196 link,
2174 2197 p1,
2175 2198 p2,
2176 2199 flags,
2177 2200 cachedelta,
2178 2201 ifh,
2179 2202 dfh,
2180 2203 alwayscache=False,
2181 2204 deltacomputer=None,
2182 2205 ):
2183 2206 """internal function to add revisions to the log
2184 2207
2185 2208 see addrevision for argument descriptions.
2186 2209
2187 2210 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2188 2211
2189 2212 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2190 2213 be used.
2191 2214
2192 2215 invariants:
2193 2216 - rawtext is optional (can be None); if not set, cachedelta must be set.
2194 2217 if both are set, they must correspond to each other.
2195 2218 """
2196 2219 if node == nullid:
2197 2220 raise error.RevlogError(
2198 2221 _(b"%s: attempt to add null revision") % self.indexfile
2199 2222 )
2200 2223 if node == wdirid or node in wdirfilenodeids:
2201 2224 raise error.RevlogError(
2202 2225 _(b"%s: attempt to add wdir revision") % self.indexfile
2203 2226 )
2204 2227
2205 2228 if self._inline:
2206 2229 fh = ifh
2207 2230 else:
2208 2231 fh = dfh
2209 2232
2210 2233 btext = [rawtext]
2211 2234
2212 2235 curr = len(self)
2213 2236 prev = curr - 1
2214 2237 offset = self.end(prev)
2215 2238 p1r, p2r = self.rev(p1), self.rev(p2)
2216 2239
2217 2240 # full versions are inserted when the needed deltas
2218 2241 # become comparable to the uncompressed text
2219 2242 if rawtext is None:
2220 2243 # need rawtext size, before changed by flag processors, which is
2221 2244 # the non-raw size. use revlog explicitly to avoid filelog's extra
2222 2245 # logic that might remove metadata size.
2223 2246 textlen = mdiff.patchedsize(
2224 2247 revlog.size(self, cachedelta[0]), cachedelta[1]
2225 2248 )
2226 2249 else:
2227 2250 textlen = len(rawtext)
2228 2251
2229 2252 if deltacomputer is None:
2230 2253 deltacomputer = deltautil.deltacomputer(self)
2231 2254
2232 2255 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2233 2256
2234 2257 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2235 2258
2236 2259 e = (
2237 2260 offset_type(offset, flags),
2238 2261 deltainfo.deltalen,
2239 2262 textlen,
2240 2263 deltainfo.base,
2241 2264 link,
2242 2265 p1r,
2243 2266 p2r,
2244 2267 node,
2245 2268 )
2246 2269 self.index.append(e)
2247 2270
2248 2271 entry = self._io.packentry(e, self.node, self.version, curr)
2249 2272 self._writeentry(
2250 2273 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2251 2274 )
2252 2275
2253 2276 rawtext = btext[0]
2254 2277
2255 2278 if alwayscache and rawtext is None:
2256 2279 rawtext = deltacomputer.buildtext(revinfo, fh)
2257 2280
2258 2281 if type(rawtext) == bytes: # only accept immutable objects
2259 2282 self._revisioncache = (node, curr, rawtext)
2260 2283 self._chainbasecache[curr] = deltainfo.chainbase
2261 2284 return node
2262 2285
2263 2286 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2264 2287 # Files opened in a+ mode have inconsistent behavior on various
2265 2288 # platforms. Windows requires that a file positioning call be made
2266 2289 # when the file handle transitions between reads and writes. See
2267 2290 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2268 2291 # platforms, Python or the platform itself can be buggy. Some versions
2269 2292 # of Solaris have been observed to not append at the end of the file
2270 2293 # if the file was seeked to before the end. See issue4943 for more.
2271 2294 #
2272 2295 # We work around this issue by inserting a seek() before writing.
2273 2296 # Note: This is likely not necessary on Python 3. However, because
2274 2297 # the file handle is reused for reads and may be seeked there, we need
2275 2298 # to be careful before changing this.
2276 2299 ifh.seek(0, os.SEEK_END)
2277 2300 if dfh:
2278 2301 dfh.seek(0, os.SEEK_END)
2279 2302
2280 2303 curr = len(self) - 1
2281 2304 if not self._inline:
2282 2305 transaction.add(self.datafile, offset)
2283 2306 transaction.add(self.indexfile, curr * len(entry))
2284 2307 if data[0]:
2285 2308 dfh.write(data[0])
2286 2309 dfh.write(data[1])
2287 2310 ifh.write(entry)
2288 2311 else:
2289 2312 offset += curr * self._io.size
2290 2313 transaction.add(self.indexfile, offset, curr)
2291 2314 ifh.write(entry)
2292 2315 ifh.write(data[0])
2293 2316 ifh.write(data[1])
2294 2317 self._enforceinlinesize(transaction, ifh)
2295 2318 nodemaputil.setup_persistent_nodemap(transaction, self)
2296 2319
2297 2320 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2298 2321 """
2299 2322 add a delta group
2300 2323
2301 2324 given a set of deltas, add them to the revision log. the
2302 2325 first delta is against its parent, which should be in our
2303 2326 log, the rest are against the previous delta.
2304 2327
2305 2328 If ``addrevisioncb`` is defined, it will be called with arguments of
2306 2329 this revlog and the node that was added.
2307 2330 """
2308 2331
2309 2332 if self._writinghandles:
2310 2333 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2311 2334
2312 2335 nodes = []
2313 2336
2314 2337 r = len(self)
2315 2338 end = 0
2316 2339 if r:
2317 2340 end = self.end(r - 1)
2318 2341 ifh = self._indexfp(b"a+")
2319 2342 isize = r * self._io.size
2320 2343 if self._inline:
2321 2344 transaction.add(self.indexfile, end + isize, r)
2322 2345 dfh = None
2323 2346 else:
2324 2347 transaction.add(self.indexfile, isize, r)
2325 2348 transaction.add(self.datafile, end)
2326 2349 dfh = self._datafp(b"a+")
2327 2350
2328 2351 def flush():
2329 2352 if dfh:
2330 2353 dfh.flush()
2331 2354 ifh.flush()
2332 2355
2333 2356 self._writinghandles = (ifh, dfh)
2334 2357
2335 2358 try:
2336 2359 deltacomputer = deltautil.deltacomputer(self)
2337 2360 # loop through our set of deltas
2338 2361 for data in deltas:
2339 2362 node, p1, p2, linknode, deltabase, delta, flags = data
2340 2363 link = linkmapper(linknode)
2341 2364 flags = flags or REVIDX_DEFAULT_FLAGS
2342 2365
2343 2366 nodes.append(node)
2344 2367
2345 2368 if self.index.has_node(node):
2346 2369 self._nodeduplicatecallback(transaction, node)
2347 2370 # this can happen if two branches make the same change
2348 2371 continue
2349 2372
2350 2373 for p in (p1, p2):
2351 2374 if not self.index.has_node(p):
2352 2375 raise error.LookupError(
2353 2376 p, self.indexfile, _(b'unknown parent')
2354 2377 )
2355 2378
2356 2379 if not self.index.has_node(deltabase):
2357 2380 raise error.LookupError(
2358 2381 deltabase, self.indexfile, _(b'unknown delta base')
2359 2382 )
2360 2383
2361 2384 baserev = self.rev(deltabase)
2362 2385
2363 2386 if baserev != nullrev and self.iscensored(baserev):
2364 2387 # if base is censored, delta must be full replacement in a
2365 2388 # single patch operation
2366 2389 hlen = struct.calcsize(b">lll")
2367 2390 oldlen = self.rawsize(baserev)
2368 2391 newlen = len(delta) - hlen
2369 2392 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2370 2393 raise error.CensoredBaseError(
2371 2394 self.indexfile, self.node(baserev)
2372 2395 )
2373 2396
2374 2397 if not flags and self._peek_iscensored(baserev, delta, flush):
2375 2398 flags |= REVIDX_ISCENSORED
2376 2399
2377 2400 # We assume consumers of addrevisioncb will want to retrieve
2378 2401 # the added revision, which will require a call to
2379 2402 # revision(). revision() will fast path if there is a cache
2380 2403 # hit. So, we tell _addrevision() to always cache in this case.
2381 2404 # We're only using addgroup() in the context of changegroup
2382 2405 # generation so the revision data can always be handled as raw
2383 2406 # by the flagprocessor.
2384 2407 self._addrevision(
2385 2408 node,
2386 2409 None,
2387 2410 transaction,
2388 2411 link,
2389 2412 p1,
2390 2413 p2,
2391 2414 flags,
2392 2415 (baserev, delta),
2393 2416 ifh,
2394 2417 dfh,
2395 2418 alwayscache=bool(addrevisioncb),
2396 2419 deltacomputer=deltacomputer,
2397 2420 )
2398 2421
2399 2422 if addrevisioncb:
2400 2423 addrevisioncb(self, node)
2401 2424
2402 2425 if not dfh and not self._inline:
2403 2426 # addrevision switched from inline to conventional
2404 2427 # reopen the index
2405 2428 ifh.close()
2406 2429 dfh = self._datafp(b"a+")
2407 2430 ifh = self._indexfp(b"a+")
2408 2431 self._writinghandles = (ifh, dfh)
2409 2432 finally:
2410 2433 self._writinghandles = None
2411 2434
2412 2435 if dfh:
2413 2436 dfh.close()
2414 2437 ifh.close()
2415 2438
2416 2439 return nodes
2417 2440
2418 2441 def iscensored(self, rev):
2419 2442 """Check if a file revision is censored."""
2420 2443 if not self._censorable:
2421 2444 return False
2422 2445
2423 2446 return self.flags(rev) & REVIDX_ISCENSORED
2424 2447
2425 2448 def _peek_iscensored(self, baserev, delta, flush):
2426 2449 """Quickly check if a delta produces a censored revision."""
2427 2450 if not self._censorable:
2428 2451 return False
2429 2452
2430 2453 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2431 2454
2432 2455 def getstrippoint(self, minlink):
2433 2456 """find the minimum rev that must be stripped to strip the linkrev
2434 2457
2435 2458 Returns a tuple containing the minimum rev and a set of all revs that
2436 2459 have linkrevs that will be broken by this strip.
2437 2460 """
2438 2461 return storageutil.resolvestripinfo(
2439 2462 minlink,
2440 2463 len(self) - 1,
2441 2464 self.headrevs(),
2442 2465 self.linkrev,
2443 2466 self.parentrevs,
2444 2467 )
2445 2468
2446 2469 def strip(self, minlink, transaction):
2447 2470 """truncate the revlog on the first revision with a linkrev >= minlink
2448 2471
2449 2472 This function is called when we're stripping revision minlink and
2450 2473 its descendants from the repository.
2451 2474
2452 2475 We have to remove all revisions with linkrev >= minlink, because
2453 2476 the equivalent changelog revisions will be renumbered after the
2454 2477 strip.
2455 2478
2456 2479 So we truncate the revlog on the first of these revisions, and
2457 2480 trust that the caller has saved the revisions that shouldn't be
2458 2481 removed and that it'll re-add them after this truncation.
2459 2482 """
2460 2483 if len(self) == 0:
2461 2484 return
2462 2485
2463 2486 rev, _ = self.getstrippoint(minlink)
2464 2487 if rev == len(self):
2465 2488 return
2466 2489
2467 2490 # first truncate the files on disk
2468 2491 end = self.start(rev)
2469 2492 if not self._inline:
2470 2493 transaction.add(self.datafile, end)
2471 2494 end = rev * self._io.size
2472 2495 else:
2473 2496 end += rev * self._io.size
2474 2497
2475 2498 transaction.add(self.indexfile, end)
2476 2499
2477 2500 # then reset internal state in memory to forget those revisions
2478 2501 self._revisioncache = None
2479 2502 self._chaininfocache = {}
2480 2503 self._chunkclear()
2481 2504
2482 2505 del self.index[rev:-1]
2483 2506
2484 2507 def checksize(self):
2485 2508 """Check size of index and data files
2486 2509
2487 2510 return a (dd, di) tuple.
2488 2511 - dd: extra bytes for the "data" file
2489 2512 - di: extra bytes for the "index" file
2490 2513
2491 2514 A healthy revlog will return (0, 0).
2492 2515 """
2493 2516 expected = 0
2494 2517 if len(self):
2495 2518 expected = max(0, self.end(len(self) - 1))
2496 2519
2497 2520 try:
2498 2521 with self._datafp() as f:
2499 2522 f.seek(0, io.SEEK_END)
2500 2523 actual = f.tell()
2501 2524 dd = actual - expected
2502 2525 except IOError as inst:
2503 2526 if inst.errno != errno.ENOENT:
2504 2527 raise
2505 2528 dd = 0
2506 2529
2507 2530 try:
2508 2531 f = self.opener(self.indexfile)
2509 2532 f.seek(0, io.SEEK_END)
2510 2533 actual = f.tell()
2511 2534 f.close()
2512 2535 s = self._io.size
2513 2536 i = max(0, actual // s)
2514 2537 di = actual - (i * s)
2515 2538 if self._inline:
2516 2539 databytes = 0
2517 2540 for r in self:
2518 2541 databytes += max(0, self.length(r))
2519 2542 dd = 0
2520 2543 di = actual - len(self) * s - databytes
2521 2544 except IOError as inst:
2522 2545 if inst.errno != errno.ENOENT:
2523 2546 raise
2524 2547 di = 0
2525 2548
2526 2549 return (dd, di)
2527 2550
2528 2551 def files(self):
2529 2552 res = [self.indexfile]
2530 2553 if not self._inline:
2531 2554 res.append(self.datafile)
2532 2555 return res
2533 2556
2534 2557 def emitrevisions(
2535 2558 self,
2536 2559 nodes,
2537 2560 nodesorder=None,
2538 2561 revisiondata=False,
2539 2562 assumehaveparentrevisions=False,
2540 2563 deltamode=repository.CG_DELTAMODE_STD,
2541 2564 ):
2542 2565 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2543 2566 raise error.ProgrammingError(
2544 2567 b'unhandled value for nodesorder: %s' % nodesorder
2545 2568 )
2546 2569
2547 2570 if nodesorder is None and not self._generaldelta:
2548 2571 nodesorder = b'storage'
2549 2572
2550 2573 if (
2551 2574 not self._storedeltachains
2552 2575 and deltamode != repository.CG_DELTAMODE_PREV
2553 2576 ):
2554 2577 deltamode = repository.CG_DELTAMODE_FULL
2555 2578
2556 2579 return storageutil.emitrevisions(
2557 2580 self,
2558 2581 nodes,
2559 2582 nodesorder,
2560 2583 revlogrevisiondelta,
2561 2584 deltaparentfn=self.deltaparent,
2562 2585 candeltafn=self.candelta,
2563 2586 rawsizefn=self.rawsize,
2564 2587 revdifffn=self.revdiff,
2565 2588 flagsfn=self.flags,
2566 2589 deltamode=deltamode,
2567 2590 revisiondata=revisiondata,
2568 2591 assumehaveparentrevisions=assumehaveparentrevisions,
2569 2592 )
2570 2593
2571 2594 DELTAREUSEALWAYS = b'always'
2572 2595 DELTAREUSESAMEREVS = b'samerevs'
2573 2596 DELTAREUSENEVER = b'never'
2574 2597
2575 2598 DELTAREUSEFULLADD = b'fulladd'
2576 2599
2577 2600 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2578 2601
2579 2602 def clone(
2580 2603 self,
2581 2604 tr,
2582 2605 destrevlog,
2583 2606 addrevisioncb=None,
2584 2607 deltareuse=DELTAREUSESAMEREVS,
2585 2608 forcedeltabothparents=None,
2586 2609 sidedatacompanion=None,
2587 2610 ):
2588 2611 """Copy this revlog to another, possibly with format changes.
2589 2612
2590 2613 The destination revlog will contain the same revisions and nodes.
2591 2614 However, it may not be bit-for-bit identical due to e.g. delta encoding
2592 2615 differences.
2593 2616
2594 2617 The ``deltareuse`` argument control how deltas from the existing revlog
2595 2618 are preserved in the destination revlog. The argument can have the
2596 2619 following values:
2597 2620
2598 2621 DELTAREUSEALWAYS
2599 2622 Deltas will always be reused (if possible), even if the destination
2600 2623 revlog would not select the same revisions for the delta. This is the
2601 2624 fastest mode of operation.
2602 2625 DELTAREUSESAMEREVS
2603 2626 Deltas will be reused if the destination revlog would pick the same
2604 2627 revisions for the delta. This mode strikes a balance between speed
2605 2628 and optimization.
2606 2629 DELTAREUSENEVER
2607 2630 Deltas will never be reused. This is the slowest mode of execution.
2608 2631 This mode can be used to recompute deltas (e.g. if the diff/delta
2609 2632 algorithm changes).
2610 2633 DELTAREUSEFULLADD
2611 2634 Revision will be re-added as if their were new content. This is
2612 2635 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2613 2636 eg: large file detection and handling.
2614 2637
2615 2638 Delta computation can be slow, so the choice of delta reuse policy can
2616 2639 significantly affect run time.
2617 2640
2618 2641 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2619 2642 two extremes. Deltas will be reused if they are appropriate. But if the
2620 2643 delta could choose a better revision, it will do so. This means if you
2621 2644 are converting a non-generaldelta revlog to a generaldelta revlog,
2622 2645 deltas will be recomputed if the delta's parent isn't a parent of the
2623 2646 revision.
2624 2647
2625 2648 In addition to the delta policy, the ``forcedeltabothparents``
2626 2649 argument controls whether to force compute deltas against both parents
2627 2650 for merges. By default, the current default is used.
2628 2651
2629 2652 If not None, the `sidedatacompanion` is callable that accept two
2630 2653 arguments:
2631 2654
2632 2655 (srcrevlog, rev)
2633 2656
2634 2657 and return a triplet that control changes to sidedata content from the
2635 2658 old revision to the new clone result:
2636 2659
2637 2660 (dropall, filterout, update)
2638 2661
2639 2662 * if `dropall` is True, all sidedata should be dropped
2640 2663 * `filterout` is a set of sidedata keys that should be dropped
2641 2664 * `update` is a mapping of additionnal/new key -> value
2642 2665 """
2643 2666 if deltareuse not in self.DELTAREUSEALL:
2644 2667 raise ValueError(
2645 2668 _(b'value for deltareuse invalid: %s') % deltareuse
2646 2669 )
2647 2670
2648 2671 if len(destrevlog):
2649 2672 raise ValueError(_(b'destination revlog is not empty'))
2650 2673
2651 2674 if getattr(self, 'filteredrevs', None):
2652 2675 raise ValueError(_(b'source revlog has filtered revisions'))
2653 2676 if getattr(destrevlog, 'filteredrevs', None):
2654 2677 raise ValueError(_(b'destination revlog has filtered revisions'))
2655 2678
2656 2679 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2657 2680 # if possible.
2658 2681 oldlazydelta = destrevlog._lazydelta
2659 2682 oldlazydeltabase = destrevlog._lazydeltabase
2660 2683 oldamd = destrevlog._deltabothparents
2661 2684
2662 2685 try:
2663 2686 if deltareuse == self.DELTAREUSEALWAYS:
2664 2687 destrevlog._lazydeltabase = True
2665 2688 destrevlog._lazydelta = True
2666 2689 elif deltareuse == self.DELTAREUSESAMEREVS:
2667 2690 destrevlog._lazydeltabase = False
2668 2691 destrevlog._lazydelta = True
2669 2692 elif deltareuse == self.DELTAREUSENEVER:
2670 2693 destrevlog._lazydeltabase = False
2671 2694 destrevlog._lazydelta = False
2672 2695
2673 2696 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2674 2697
2675 2698 self._clone(
2676 2699 tr,
2677 2700 destrevlog,
2678 2701 addrevisioncb,
2679 2702 deltareuse,
2680 2703 forcedeltabothparents,
2681 2704 sidedatacompanion,
2682 2705 )
2683 2706
2684 2707 finally:
2685 2708 destrevlog._lazydelta = oldlazydelta
2686 2709 destrevlog._lazydeltabase = oldlazydeltabase
2687 2710 destrevlog._deltabothparents = oldamd
2688 2711
2689 2712 def _clone(
2690 2713 self,
2691 2714 tr,
2692 2715 destrevlog,
2693 2716 addrevisioncb,
2694 2717 deltareuse,
2695 2718 forcedeltabothparents,
2696 2719 sidedatacompanion,
2697 2720 ):
2698 2721 """perform the core duty of `revlog.clone` after parameter processing"""
2699 2722 deltacomputer = deltautil.deltacomputer(destrevlog)
2700 2723 index = self.index
2701 2724 for rev in self:
2702 2725 entry = index[rev]
2703 2726
2704 2727 # Some classes override linkrev to take filtered revs into
2705 2728 # account. Use raw entry from index.
2706 2729 flags = entry[0] & 0xFFFF
2707 2730 linkrev = entry[4]
2708 2731 p1 = index[entry[5]][7]
2709 2732 p2 = index[entry[6]][7]
2710 2733 node = entry[7]
2711 2734
2712 2735 sidedataactions = (False, [], {})
2713 2736 if sidedatacompanion is not None:
2714 2737 sidedataactions = sidedatacompanion(self, rev)
2715 2738
2716 2739 # (Possibly) reuse the delta from the revlog if allowed and
2717 2740 # the revlog chunk is a delta.
2718 2741 cachedelta = None
2719 2742 rawtext = None
2720 2743 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2721 2744 dropall, filterout, update = sidedataactions
2722 2745 text, sidedata = self._revisiondata(rev)
2723 2746 if dropall:
2724 2747 sidedata = {}
2725 2748 for key in filterout:
2726 2749 sidedata.pop(key, None)
2727 2750 sidedata.update(update)
2728 2751 if not sidedata:
2729 2752 sidedata = None
2730 2753 destrevlog.addrevision(
2731 2754 text,
2732 2755 tr,
2733 2756 linkrev,
2734 2757 p1,
2735 2758 p2,
2736 2759 cachedelta=cachedelta,
2737 2760 node=node,
2738 2761 flags=flags,
2739 2762 deltacomputer=deltacomputer,
2740 2763 sidedata=sidedata,
2741 2764 )
2742 2765 else:
2743 2766 if destrevlog._lazydelta:
2744 2767 dp = self.deltaparent(rev)
2745 2768 if dp != nullrev:
2746 2769 cachedelta = (dp, bytes(self._chunk(rev)))
2747 2770
2748 2771 if not cachedelta:
2749 2772 rawtext = self.rawdata(rev)
2750 2773
2751 2774 ifh = destrevlog.opener(
2752 2775 destrevlog.indexfile, b'a+', checkambig=False
2753 2776 )
2754 2777 dfh = None
2755 2778 if not destrevlog._inline:
2756 2779 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2757 2780 try:
2758 2781 destrevlog._addrevision(
2759 2782 node,
2760 2783 rawtext,
2761 2784 tr,
2762 2785 linkrev,
2763 2786 p1,
2764 2787 p2,
2765 2788 flags,
2766 2789 cachedelta,
2767 2790 ifh,
2768 2791 dfh,
2769 2792 deltacomputer=deltacomputer,
2770 2793 )
2771 2794 finally:
2772 2795 if dfh:
2773 2796 dfh.close()
2774 2797 ifh.close()
2775 2798
2776 2799 if addrevisioncb:
2777 2800 addrevisioncb(self, rev, node)
2778 2801
2779 2802 def censorrevision(self, tr, censornode, tombstone=b''):
2780 2803 if (self.version & 0xFFFF) == REVLOGV0:
2781 2804 raise error.RevlogError(
2782 2805 _(b'cannot censor with version %d revlogs') % self.version
2783 2806 )
2784 2807
2785 2808 censorrev = self.rev(censornode)
2786 2809 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2787 2810
2788 2811 if len(tombstone) > self.rawsize(censorrev):
2789 2812 raise error.Abort(
2790 2813 _(b'censor tombstone must be no longer than censored data')
2791 2814 )
2792 2815
2793 2816 # Rewriting the revlog in place is hard. Our strategy for censoring is
2794 2817 # to create a new revlog, copy all revisions to it, then replace the
2795 2818 # revlogs on transaction close.
2796 2819
2797 2820 newindexfile = self.indexfile + b'.tmpcensored'
2798 2821 newdatafile = self.datafile + b'.tmpcensored'
2799 2822
2800 2823 # This is a bit dangerous. We could easily have a mismatch of state.
2801 2824 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2802 2825 newrl.version = self.version
2803 2826 newrl._generaldelta = self._generaldelta
2804 2827 newrl._io = self._io
2805 2828
2806 2829 for rev in self.revs():
2807 2830 node = self.node(rev)
2808 2831 p1, p2 = self.parents(node)
2809 2832
2810 2833 if rev == censorrev:
2811 2834 newrl.addrawrevision(
2812 2835 tombstone,
2813 2836 tr,
2814 2837 self.linkrev(censorrev),
2815 2838 p1,
2816 2839 p2,
2817 2840 censornode,
2818 2841 REVIDX_ISCENSORED,
2819 2842 )
2820 2843
2821 2844 if newrl.deltaparent(rev) != nullrev:
2822 2845 raise error.Abort(
2823 2846 _(
2824 2847 b'censored revision stored as delta; '
2825 2848 b'cannot censor'
2826 2849 ),
2827 2850 hint=_(
2828 2851 b'censoring of revlogs is not '
2829 2852 b'fully implemented; please report '
2830 2853 b'this bug'
2831 2854 ),
2832 2855 )
2833 2856 continue
2834 2857
2835 2858 if self.iscensored(rev):
2836 2859 if self.deltaparent(rev) != nullrev:
2837 2860 raise error.Abort(
2838 2861 _(
2839 2862 b'cannot censor due to censored '
2840 2863 b'revision having delta stored'
2841 2864 )
2842 2865 )
2843 2866 rawtext = self._chunk(rev)
2844 2867 else:
2845 2868 rawtext = self.rawdata(rev)
2846 2869
2847 2870 newrl.addrawrevision(
2848 2871 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2849 2872 )
2850 2873
2851 2874 tr.addbackup(self.indexfile, location=b'store')
2852 2875 if not self._inline:
2853 2876 tr.addbackup(self.datafile, location=b'store')
2854 2877
2855 2878 self.opener.rename(newrl.indexfile, self.indexfile)
2856 2879 if not self._inline:
2857 2880 self.opener.rename(newrl.datafile, self.datafile)
2858 2881
2859 2882 self.clearcaches()
2860 2883 self._loadindex()
2861 2884
2862 2885 def verifyintegrity(self, state):
2863 2886 """Verifies the integrity of the revlog.
2864 2887
2865 2888 Yields ``revlogproblem`` instances describing problems that are
2866 2889 found.
2867 2890 """
2868 2891 dd, di = self.checksize()
2869 2892 if dd:
2870 2893 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2871 2894 if di:
2872 2895 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2873 2896
2874 2897 version = self.version & 0xFFFF
2875 2898
2876 2899 # The verifier tells us what version revlog we should be.
2877 2900 if version != state[b'expectedversion']:
2878 2901 yield revlogproblem(
2879 2902 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2880 2903 % (self.indexfile, version, state[b'expectedversion'])
2881 2904 )
2882 2905
2883 2906 state[b'skipread'] = set()
2884 2907 state[b'safe_renamed'] = set()
2885 2908
2886 2909 for rev in self:
2887 2910 node = self.node(rev)
2888 2911
2889 2912 # Verify contents. 4 cases to care about:
2890 2913 #
2891 2914 # common: the most common case
2892 2915 # rename: with a rename
2893 2916 # meta: file content starts with b'\1\n', the metadata
2894 2917 # header defined in filelog.py, but without a rename
2895 2918 # ext: content stored externally
2896 2919 #
2897 2920 # More formally, their differences are shown below:
2898 2921 #
2899 2922 # | common | rename | meta | ext
2900 2923 # -------------------------------------------------------
2901 2924 # flags() | 0 | 0 | 0 | not 0
2902 2925 # renamed() | False | True | False | ?
2903 2926 # rawtext[0:2]=='\1\n'| False | True | True | ?
2904 2927 #
2905 2928 # "rawtext" means the raw text stored in revlog data, which
2906 2929 # could be retrieved by "rawdata(rev)". "text"
2907 2930 # mentioned below is "revision(rev)".
2908 2931 #
2909 2932 # There are 3 different lengths stored physically:
2910 2933 # 1. L1: rawsize, stored in revlog index
2911 2934 # 2. L2: len(rawtext), stored in revlog data
2912 2935 # 3. L3: len(text), stored in revlog data if flags==0, or
2913 2936 # possibly somewhere else if flags!=0
2914 2937 #
2915 2938 # L1 should be equal to L2. L3 could be different from them.
2916 2939 # "text" may or may not affect commit hash depending on flag
2917 2940 # processors (see flagutil.addflagprocessor).
2918 2941 #
2919 2942 # | common | rename | meta | ext
2920 2943 # -------------------------------------------------
2921 2944 # rawsize() | L1 | L1 | L1 | L1
2922 2945 # size() | L1 | L2-LM | L1(*) | L1 (?)
2923 2946 # len(rawtext) | L2 | L2 | L2 | L2
2924 2947 # len(text) | L2 | L2 | L2 | L3
2925 2948 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2926 2949 #
2927 2950 # LM: length of metadata, depending on rawtext
2928 2951 # (*): not ideal, see comment in filelog.size
2929 2952 # (?): could be "- len(meta)" if the resolved content has
2930 2953 # rename metadata
2931 2954 #
2932 2955 # Checks needed to be done:
2933 2956 # 1. length check: L1 == L2, in all cases.
2934 2957 # 2. hash check: depending on flag processor, we may need to
2935 2958 # use either "text" (external), or "rawtext" (in revlog).
2936 2959
2937 2960 try:
2938 2961 skipflags = state.get(b'skipflags', 0)
2939 2962 if skipflags:
2940 2963 skipflags &= self.flags(rev)
2941 2964
2942 2965 _verify_revision(self, skipflags, state, node)
2943 2966
2944 2967 l1 = self.rawsize(rev)
2945 2968 l2 = len(self.rawdata(node))
2946 2969
2947 2970 if l1 != l2:
2948 2971 yield revlogproblem(
2949 2972 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2950 2973 node=node,
2951 2974 )
2952 2975
2953 2976 except error.CensoredNodeError:
2954 2977 if state[b'erroroncensored']:
2955 2978 yield revlogproblem(
2956 2979 error=_(b'censored file data'), node=node
2957 2980 )
2958 2981 state[b'skipread'].add(node)
2959 2982 except Exception as e:
2960 2983 yield revlogproblem(
2961 2984 error=_(b'unpacking %s: %s')
2962 2985 % (short(node), stringutil.forcebytestr(e)),
2963 2986 node=node,
2964 2987 )
2965 2988 state[b'skipread'].add(node)
2966 2989
2967 2990 def storageinfo(
2968 2991 self,
2969 2992 exclusivefiles=False,
2970 2993 sharedfiles=False,
2971 2994 revisionscount=False,
2972 2995 trackedsize=False,
2973 2996 storedsize=False,
2974 2997 ):
2975 2998 d = {}
2976 2999
2977 3000 if exclusivefiles:
2978 3001 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
2979 3002 if not self._inline:
2980 3003 d[b'exclusivefiles'].append((self.opener, self.datafile))
2981 3004
2982 3005 if sharedfiles:
2983 3006 d[b'sharedfiles'] = []
2984 3007
2985 3008 if revisionscount:
2986 3009 d[b'revisionscount'] = len(self)
2987 3010
2988 3011 if trackedsize:
2989 3012 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
2990 3013
2991 3014 if storedsize:
2992 3015 d[b'storedsize'] = sum(
2993 3016 self.opener.stat(path).st_size for path in self.files()
2994 3017 )
2995 3018
2996 3019 return d
@@ -1,48 +1,50 b''
1 1 ===================================
2 2 Test the persistent on-disk nodemap
3 3 ===================================
4 4
5 5
6 6 $ hg init test-repo
7 7 $ cd test-repo
8 8 $ cat << EOF >> .hg/hgrc
9 9 > [experimental]
10 10 > exp-persistent-nodemap=yes
11 > [devel]
12 > persistent-nodemap=yes
11 13 > EOF
12 14 $ hg debugbuilddag .+5000
13 15 $ f --size .hg/store/00changelog.n
14 16 .hg/store/00changelog.n: size=18
15 17 $ f --sha256 .hg/store/00changelog-*.nd
16 18 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
17 19 $ hg debugnodemap --dump-new | f --sha256 --size
18 20 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
19 21 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
20 22 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
21 23 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
22 24 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
23 25 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
24 26 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
25 27 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
26 28 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
27 29 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
28 30 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
29 31 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
30 32 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
31 33 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
32 34 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
33 35 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
34 36 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
35 37 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
36 38 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
37 39
38 40 add a new commit
39 41
40 42 $ hg up
41 43 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 44 $ echo foo > foo
43 45 $ hg add foo
44 46 $ hg ci -m 'foo'
45 47 $ f --size .hg/store/00changelog.n
46 48 .hg/store/00changelog.n: size=18
47 49 $ f --sha256 .hg/store/00changelog-*.nd --size
48 50 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
General Comments 0
You need to be logged in to leave comments. Login now