##// END OF EJS Templates
revlog: guarantee that p1 != null if a non-null parent exists...
Joerg Sonnenberger -
r47922:49fd21f3 default
parent child Browse files
Show More
@@ -1,3258 +1,3264 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import io
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullhex,
29 29 nullid,
30 30 nullrev,
31 31 short,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .pycompat import getattr
39 39 from .revlogutils.constants import (
40 40 FLAG_GENERALDELTA,
41 41 FLAG_INLINE_DATA,
42 42 REVLOGV0,
43 43 REVLOGV1,
44 44 REVLOGV1_FLAGS,
45 45 REVLOGV2,
46 46 REVLOGV2_FLAGS,
47 47 REVLOG_DEFAULT_FLAGS,
48 48 REVLOG_DEFAULT_FORMAT,
49 49 REVLOG_DEFAULT_VERSION,
50 50 )
51 51 from .revlogutils.flagutil import (
52 52 REVIDX_DEFAULT_FLAGS,
53 53 REVIDX_ELLIPSIS,
54 54 REVIDX_EXTSTORED,
55 55 REVIDX_FLAGS_ORDER,
56 56 REVIDX_HASCOPIESINFO,
57 57 REVIDX_ISCENSORED,
58 58 REVIDX_RAWTEXT_CHANGING_FLAGS,
59 59 REVIDX_SIDEDATA,
60 60 )
61 61 from .thirdparty import attr
62 62 from . import (
63 63 ancestor,
64 64 dagop,
65 65 error,
66 66 mdiff,
67 67 policy,
68 68 pycompat,
69 69 templatefilters,
70 70 util,
71 71 )
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76 from .revlogutils import (
77 77 deltas as deltautil,
78 78 flagutil,
79 79 nodemap as nodemaputil,
80 80 sidedata as sidedatautil,
81 81 )
82 82 from .utils import (
83 83 storageutil,
84 84 stringutil,
85 85 )
86 86 from .pure import parsers as pureparsers
87 87
88 88 # blanked usage of all the name to prevent pyflakes constraints
89 89 # We need these name available in the module for extensions.
90 90 REVLOGV0
91 91 REVLOGV1
92 92 REVLOGV2
93 93 FLAG_INLINE_DATA
94 94 FLAG_GENERALDELTA
95 95 REVLOG_DEFAULT_FLAGS
96 96 REVLOG_DEFAULT_FORMAT
97 97 REVLOG_DEFAULT_VERSION
98 98 REVLOGV1_FLAGS
99 99 REVLOGV2_FLAGS
100 100 REVIDX_ISCENSORED
101 101 REVIDX_ELLIPSIS
102 102 REVIDX_SIDEDATA
103 103 REVIDX_HASCOPIESINFO
104 104 REVIDX_EXTSTORED
105 105 REVIDX_DEFAULT_FLAGS
106 106 REVIDX_FLAGS_ORDER
107 107 REVIDX_RAWTEXT_CHANGING_FLAGS
108 108
109 109 parsers = policy.importmod('parsers')
110 110 rustancestor = policy.importrust('ancestor')
111 111 rustdagop = policy.importrust('dagop')
112 112 rustrevlog = policy.importrust('revlog')
113 113
114 114 # Aliased for performance.
115 115 _zlibdecompress = zlib.decompress
116 116
117 117 # max size of revlog with inline data
118 118 _maxinline = 131072
119 119 _chunksize = 1048576
120 120
121 121 # Flag processors for REVIDX_ELLIPSIS.
122 122 def ellipsisreadprocessor(rl, text):
123 123 return text, False
124 124
125 125
126 126 def ellipsiswriteprocessor(rl, text):
127 127 return text, False
128 128
129 129
130 130 def ellipsisrawprocessor(rl, text):
131 131 return False
132 132
133 133
134 134 ellipsisprocessor = (
135 135 ellipsisreadprocessor,
136 136 ellipsiswriteprocessor,
137 137 ellipsisrawprocessor,
138 138 )
139 139
140 140
141 141 def getoffset(q):
142 142 return int(q >> 16)
143 143
144 144
145 145 def gettype(q):
146 146 return int(q & 0xFFFF)
147 147
148 148
149 149 def offset_type(offset, type):
150 150 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
151 151 raise ValueError(b'unknown revlog index flags')
152 152 return int(int(offset) << 16 | type)
153 153
154 154
155 155 def _verify_revision(rl, skipflags, state, node):
156 156 """Verify the integrity of the given revlog ``node`` while providing a hook
157 157 point for extensions to influence the operation."""
158 158 if skipflags:
159 159 state[b'skipread'].add(node)
160 160 else:
161 161 # Side-effect: read content and verify hash.
162 162 rl.revision(node)
163 163
164 164
165 165 # True if a fast implementation for persistent-nodemap is available
166 166 #
167 167 # We also consider we have a "fast" implementation in "pure" python because
168 168 # people using pure don't really have performance consideration (and a
169 169 # wheelbarrow of other slowness source)
170 170 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
171 171 parsers, 'BaseIndexObject'
172 172 )
173 173
174 174
175 175 @attr.s(slots=True, frozen=True)
176 176 class _revisioninfo(object):
177 177 """Information about a revision that allows building its fulltext
178 178 node: expected hash of the revision
179 179 p1, p2: parent revs of the revision
180 180 btext: built text cache consisting of a one-element list
181 181 cachedelta: (baserev, uncompressed_delta) or None
182 182 flags: flags associated to the revision storage
183 183
184 184 One of btext[0] or cachedelta must be set.
185 185 """
186 186
187 187 node = attr.ib()
188 188 p1 = attr.ib()
189 189 p2 = attr.ib()
190 190 btext = attr.ib()
191 191 textlen = attr.ib()
192 192 cachedelta = attr.ib()
193 193 flags = attr.ib()
194 194
195 195
196 196 @interfaceutil.implementer(repository.irevisiondelta)
197 197 @attr.s(slots=True)
198 198 class revlogrevisiondelta(object):
199 199 node = attr.ib()
200 200 p1node = attr.ib()
201 201 p2node = attr.ib()
202 202 basenode = attr.ib()
203 203 flags = attr.ib()
204 204 baserevisionsize = attr.ib()
205 205 revision = attr.ib()
206 206 delta = attr.ib()
207 207 sidedata = attr.ib()
208 208 linknode = attr.ib(default=None)
209 209
210 210
211 211 @interfaceutil.implementer(repository.iverifyproblem)
212 212 @attr.s(frozen=True)
213 213 class revlogproblem(object):
214 214 warning = attr.ib(default=None)
215 215 error = attr.ib(default=None)
216 216 node = attr.ib(default=None)
217 217
218 218
219 219 # index v0:
220 220 # 4 bytes: offset
221 221 # 4 bytes: compressed length
222 222 # 4 bytes: base rev
223 223 # 4 bytes: link rev
224 224 # 20 bytes: parent 1 nodeid
225 225 # 20 bytes: parent 2 nodeid
226 226 # 20 bytes: nodeid
227 227 indexformatv0 = struct.Struct(b">4l20s20s20s")
228 228 indexformatv0_pack = indexformatv0.pack
229 229 indexformatv0_unpack = indexformatv0.unpack
230 230
231 231
232 232 class revlogoldindex(list):
233 233 @property
234 234 def nodemap(self):
235 235 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
236 236 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
237 237 return self._nodemap
238 238
239 239 @util.propertycache
240 240 def _nodemap(self):
241 241 nodemap = nodemaputil.NodeMap({nullid: nullrev})
242 242 for r in range(0, len(self)):
243 243 n = self[r][7]
244 244 nodemap[n] = r
245 245 return nodemap
246 246
247 247 def has_node(self, node):
248 248 """return True if the node exist in the index"""
249 249 return node in self._nodemap
250 250
251 251 def rev(self, node):
252 252 """return a revision for a node
253 253
254 254 If the node is unknown, raise a RevlogError"""
255 255 return self._nodemap[node]
256 256
257 257 def get_rev(self, node):
258 258 """return a revision for a node
259 259
260 260 If the node is unknown, return None"""
261 261 return self._nodemap.get(node)
262 262
263 263 def append(self, tup):
264 264 self._nodemap[tup[7]] = len(self)
265 265 super(revlogoldindex, self).append(tup)
266 266
267 267 def __delitem__(self, i):
268 268 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
269 269 raise ValueError(b"deleting slices only supports a:-1 with step 1")
270 270 for r in pycompat.xrange(i.start, len(self)):
271 271 del self._nodemap[self[r][7]]
272 272 super(revlogoldindex, self).__delitem__(i)
273 273
274 274 def clearcaches(self):
275 275 self.__dict__.pop('_nodemap', None)
276 276
277 277 def __getitem__(self, i):
278 278 if i == -1:
279 279 return (0, 0, 0, -1, -1, -1, -1, nullid)
280 280 return list.__getitem__(self, i)
281 281
282 282
283 283 class revlogoldio(object):
284 284 def __init__(self):
285 285 self.size = indexformatv0.size
286 286
287 287 def parseindex(self, data, inline):
288 288 s = self.size
289 289 index = []
290 290 nodemap = nodemaputil.NodeMap({nullid: nullrev})
291 291 n = off = 0
292 292 l = len(data)
293 293 while off + s <= l:
294 294 cur = data[off : off + s]
295 295 off += s
296 296 e = indexformatv0_unpack(cur)
297 297 # transform to revlogv1 format
298 298 e2 = (
299 299 offset_type(e[0], 0),
300 300 e[1],
301 301 -1,
302 302 e[2],
303 303 e[3],
304 304 nodemap.get(e[4], nullrev),
305 305 nodemap.get(e[5], nullrev),
306 306 e[6],
307 307 )
308 308 index.append(e2)
309 309 nodemap[e[6]] = n
310 310 n += 1
311 311
312 312 index = revlogoldindex(index)
313 313 return index, None
314 314
315 315 def packentry(self, entry, node, version, rev):
316 316 if gettype(entry[0]):
317 317 raise error.RevlogError(
318 318 _(b'index entry flags need revlog version 1')
319 319 )
320 320 e2 = (
321 321 getoffset(entry[0]),
322 322 entry[1],
323 323 entry[3],
324 324 entry[4],
325 325 node(entry[5]),
326 326 node(entry[6]),
327 327 entry[7],
328 328 )
329 329 return indexformatv0_pack(*e2)
330 330
331 331
332 332 # index ng:
333 333 # 6 bytes: offset
334 334 # 2 bytes: flags
335 335 # 4 bytes: compressed length
336 336 # 4 bytes: uncompressed length
337 337 # 4 bytes: base rev
338 338 # 4 bytes: link rev
339 339 # 4 bytes: parent 1 rev
340 340 # 4 bytes: parent 2 rev
341 341 # 32 bytes: nodeid
342 342 indexformatng = struct.Struct(b">Qiiiiii20s12x")
343 343 indexformatng_pack = indexformatng.pack
344 344 versionformat = struct.Struct(b">I")
345 345 versionformat_pack = versionformat.pack
346 346 versionformat_unpack = versionformat.unpack
347 347
348 348 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
349 349 # signed integer)
350 350 _maxentrysize = 0x7FFFFFFF
351 351
352 352
353 353 class revlogio(object):
354 354 def __init__(self):
355 355 self.size = indexformatng.size
356 356
357 357 def parseindex(self, data, inline):
358 358 # call the C implementation to parse the index data
359 359 index, cache = parsers.parse_index2(data, inline)
360 360 return index, cache
361 361
362 362 def packentry(self, entry, node, version, rev):
363 363 p = indexformatng_pack(*entry)
364 364 if rev == 0:
365 365 p = versionformat_pack(version) + p[4:]
366 366 return p
367 367
368 368
369 369 indexformatv2 = struct.Struct(pureparsers.Index2Mixin.index_format)
370 370 indexformatv2_pack = indexformatv2.pack
371 371
372 372
373 373 class revlogv2io(object):
374 374 def __init__(self):
375 375 self.size = indexformatv2.size
376 376
377 377 def parseindex(self, data, inline):
378 378 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
379 379 return index, cache
380 380
381 381 def packentry(self, entry, node, version, rev):
382 382 p = indexformatv2_pack(*entry)
383 383 if rev == 0:
384 384 p = versionformat_pack(version) + p[4:]
385 385 return p
386 386
387 387
388 388 NodemapRevlogIO = None
389 389
390 390 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
391 391
392 392 class NodemapRevlogIO(revlogio):
393 393 """A debug oriented IO class that return a PersistentNodeMapIndexObject
394 394
395 395 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
396 396 """
397 397
398 398 def parseindex(self, data, inline):
399 399 index, cache = parsers.parse_index_devel_nodemap(data, inline)
400 400 return index, cache
401 401
402 402
403 403 class rustrevlogio(revlogio):
404 404 def parseindex(self, data, inline):
405 405 index, cache = super(rustrevlogio, self).parseindex(data, inline)
406 406 return rustrevlog.MixedIndex(index), cache
407 407
408 408
409 409 class revlog(object):
410 410 """
411 411 the underlying revision storage object
412 412
413 413 A revlog consists of two parts, an index and the revision data.
414 414
415 415 The index is a file with a fixed record size containing
416 416 information on each revision, including its nodeid (hash), the
417 417 nodeids of its parents, the position and offset of its data within
418 418 the data file, and the revision it's based on. Finally, each entry
419 419 contains a linkrev entry that can serve as a pointer to external
420 420 data.
421 421
422 422 The revision data itself is a linear collection of data chunks.
423 423 Each chunk represents a revision and is usually represented as a
424 424 delta against the previous chunk. To bound lookup time, runs of
425 425 deltas are limited to about 2 times the length of the original
426 426 version data. This makes retrieval of a version proportional to
427 427 its size, or O(1) relative to the number of revisions.
428 428
429 429 Both pieces of the revlog are written to in an append-only
430 430 fashion, which means we never need to rewrite a file to insert or
431 431 remove data, and can use some simple techniques to avoid the need
432 432 for locking while reading.
433 433
434 434 If checkambig, indexfile is opened with checkambig=True at
435 435 writing, to avoid file stat ambiguity.
436 436
437 437 If mmaplargeindex is True, and an mmapindexthreshold is set, the
438 438 index will be mmapped rather than read if it is larger than the
439 439 configured threshold.
440 440
441 441 If censorable is True, the revlog can have censored revisions.
442 442
443 443 If `upperboundcomp` is not None, this is the expected maximal gain from
444 444 compression for the data content.
445 445
446 446 `concurrencychecker` is an optional function that receives 3 arguments: a
447 447 file handle, a filename, and an expected position. It should check whether
448 448 the current position in the file handle is valid, and log/warn/fail (by
449 449 raising).
450 450 """
451 451
452 452 _flagserrorclass = error.RevlogError
453 453
454 454 def __init__(
455 455 self,
456 456 opener,
457 457 indexfile,
458 458 datafile=None,
459 459 checkambig=False,
460 460 mmaplargeindex=False,
461 461 censorable=False,
462 462 upperboundcomp=None,
463 463 persistentnodemap=False,
464 464 concurrencychecker=None,
465 465 ):
466 466 """
467 467 create a revlog object
468 468
469 469 opener is a function that abstracts the file opening operation
470 470 and can be used to implement COW semantics or the like.
471 471
472 472 """
473 473 self.upperboundcomp = upperboundcomp
474 474 self.indexfile = indexfile
475 475 self.datafile = datafile or (indexfile[:-2] + b".d")
476 476 self.nodemap_file = None
477 477 if persistentnodemap:
478 478 self.nodemap_file = nodemaputil.get_nodemap_file(
479 479 opener, self.indexfile
480 480 )
481 481
482 482 self.opener = opener
483 483 # When True, indexfile is opened with checkambig=True at writing, to
484 484 # avoid file stat ambiguity.
485 485 self._checkambig = checkambig
486 486 self._mmaplargeindex = mmaplargeindex
487 487 self._censorable = censorable
488 488 # 3-tuple of (node, rev, text) for a raw revision.
489 489 self._revisioncache = None
490 490 # Maps rev to chain base rev.
491 491 self._chainbasecache = util.lrucachedict(100)
492 492 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
493 493 self._chunkcache = (0, b'')
494 494 # How much data to read and cache into the raw revlog data cache.
495 495 self._chunkcachesize = 65536
496 496 self._maxchainlen = None
497 497 self._deltabothparents = True
498 498 self.index = None
499 499 self._nodemap_docket = None
500 500 # Mapping of partial identifiers to full nodes.
501 501 self._pcache = {}
502 502 # Mapping of revision integer to full node.
503 503 self._compengine = b'zlib'
504 504 self._compengineopts = {}
505 505 self._maxdeltachainspan = -1
506 506 self._withsparseread = False
507 507 self._sparserevlog = False
508 508 self._srdensitythreshold = 0.50
509 509 self._srmingapsize = 262144
510 510
511 511 # Make copy of flag processors so each revlog instance can support
512 512 # custom flags.
513 513 self._flagprocessors = dict(flagutil.flagprocessors)
514 514
515 515 # 2-tuple of file handles being used for active writing.
516 516 self._writinghandles = None
517 517
518 518 self._loadindex()
519 519
520 520 self._concurrencychecker = concurrencychecker
521 521
522 522 def _loadindex(self):
523 523 mmapindexthreshold = None
524 524 opts = self.opener.options
525 525
526 526 if b'revlogv2' in opts:
527 527 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
528 528 elif b'revlogv1' in opts:
529 529 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
530 530 if b'generaldelta' in opts:
531 531 newversionflags |= FLAG_GENERALDELTA
532 532 elif b'revlogv0' in self.opener.options:
533 533 newversionflags = REVLOGV0
534 534 else:
535 535 newversionflags = REVLOG_DEFAULT_VERSION
536 536
537 537 if b'chunkcachesize' in opts:
538 538 self._chunkcachesize = opts[b'chunkcachesize']
539 539 if b'maxchainlen' in opts:
540 540 self._maxchainlen = opts[b'maxchainlen']
541 541 if b'deltabothparents' in opts:
542 542 self._deltabothparents = opts[b'deltabothparents']
543 543 self._lazydelta = bool(opts.get(b'lazydelta', True))
544 544 self._lazydeltabase = False
545 545 if self._lazydelta:
546 546 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
547 547 if b'compengine' in opts:
548 548 self._compengine = opts[b'compengine']
549 549 if b'zlib.level' in opts:
550 550 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
551 551 if b'zstd.level' in opts:
552 552 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
553 553 if b'maxdeltachainspan' in opts:
554 554 self._maxdeltachainspan = opts[b'maxdeltachainspan']
555 555 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
556 556 mmapindexthreshold = opts[b'mmapindexthreshold']
557 557 self.hassidedata = bool(opts.get(b'side-data', False))
558 558 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
559 559 withsparseread = bool(opts.get(b'with-sparse-read', False))
560 560 # sparse-revlog forces sparse-read
561 561 self._withsparseread = self._sparserevlog or withsparseread
562 562 if b'sparse-read-density-threshold' in opts:
563 563 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
564 564 if b'sparse-read-min-gap-size' in opts:
565 565 self._srmingapsize = opts[b'sparse-read-min-gap-size']
566 566 if opts.get(b'enableellipsis'):
567 567 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
568 568
569 569 # revlog v0 doesn't have flag processors
570 570 for flag, processor in pycompat.iteritems(
571 571 opts.get(b'flagprocessors', {})
572 572 ):
573 573 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
574 574
575 575 if self._chunkcachesize <= 0:
576 576 raise error.RevlogError(
577 577 _(b'revlog chunk cache size %r is not greater than 0')
578 578 % self._chunkcachesize
579 579 )
580 580 elif self._chunkcachesize & (self._chunkcachesize - 1):
581 581 raise error.RevlogError(
582 582 _(b'revlog chunk cache size %r is not a power of 2')
583 583 % self._chunkcachesize
584 584 )
585 585
586 586 indexdata = b''
587 587 self._initempty = True
588 588 try:
589 589 with self._indexfp() as f:
590 590 if (
591 591 mmapindexthreshold is not None
592 592 and self.opener.fstat(f).st_size >= mmapindexthreshold
593 593 ):
594 594 # TODO: should .close() to release resources without
595 595 # relying on Python GC
596 596 indexdata = util.buffer(util.mmapread(f))
597 597 else:
598 598 indexdata = f.read()
599 599 if len(indexdata) > 0:
600 600 versionflags = versionformat_unpack(indexdata[:4])[0]
601 601 self._initempty = False
602 602 else:
603 603 versionflags = newversionflags
604 604 except IOError as inst:
605 605 if inst.errno != errno.ENOENT:
606 606 raise
607 607
608 608 versionflags = newversionflags
609 609
610 610 self.version = versionflags
611 611
612 612 flags = versionflags & ~0xFFFF
613 613 fmt = versionflags & 0xFFFF
614 614
615 615 if fmt == REVLOGV0:
616 616 if flags:
617 617 raise error.RevlogError(
618 618 _(b'unknown flags (%#04x) in version %d revlog %s')
619 619 % (flags >> 16, fmt, self.indexfile)
620 620 )
621 621
622 622 self._inline = False
623 623 self._generaldelta = False
624 624
625 625 elif fmt == REVLOGV1:
626 626 if flags & ~REVLOGV1_FLAGS:
627 627 raise error.RevlogError(
628 628 _(b'unknown flags (%#04x) in version %d revlog %s')
629 629 % (flags >> 16, fmt, self.indexfile)
630 630 )
631 631
632 632 self._inline = versionflags & FLAG_INLINE_DATA
633 633 self._generaldelta = versionflags & FLAG_GENERALDELTA
634 634
635 635 elif fmt == REVLOGV2:
636 636 if flags & ~REVLOGV2_FLAGS:
637 637 raise error.RevlogError(
638 638 _(b'unknown flags (%#04x) in version %d revlog %s')
639 639 % (flags >> 16, fmt, self.indexfile)
640 640 )
641 641
642 642 # There is a bug in the transaction handling when going from an
643 643 # inline revlog to a separate index and data file. Turn it off until
644 644 # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
645 645 # See issue6485
646 646 self._inline = False
647 647 # generaldelta implied by version 2 revlogs.
648 648 self._generaldelta = True
649 649
650 650 else:
651 651 raise error.RevlogError(
652 652 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
653 653 )
654 654 # sparse-revlog can't be on without general-delta (issue6056)
655 655 if not self._generaldelta:
656 656 self._sparserevlog = False
657 657
658 658 self._storedeltachains = True
659 659
660 660 devel_nodemap = (
661 661 self.nodemap_file
662 662 and opts.get(b'devel-force-nodemap', False)
663 663 and NodemapRevlogIO is not None
664 664 )
665 665
666 666 use_rust_index = False
667 667 if rustrevlog is not None:
668 668 if self.nodemap_file is not None:
669 669 use_rust_index = True
670 670 else:
671 671 use_rust_index = self.opener.options.get(b'rust.index')
672 672
673 673 self._io = revlogio()
674 674 if self.version == REVLOGV0:
675 675 self._io = revlogoldio()
676 676 elif fmt == REVLOGV2:
677 677 self._io = revlogv2io()
678 678 elif devel_nodemap:
679 679 self._io = NodemapRevlogIO()
680 680 elif use_rust_index:
681 681 self._io = rustrevlogio()
682 682 try:
683 683 d = self._io.parseindex(indexdata, self._inline)
684 684 index, _chunkcache = d
685 685 use_nodemap = (
686 686 not self._inline
687 687 and self.nodemap_file is not None
688 688 and util.safehasattr(index, 'update_nodemap_data')
689 689 )
690 690 if use_nodemap:
691 691 nodemap_data = nodemaputil.persisted_data(self)
692 692 if nodemap_data is not None:
693 693 docket = nodemap_data[0]
694 694 if (
695 695 len(d[0]) > docket.tip_rev
696 696 and d[0][docket.tip_rev][7] == docket.tip_node
697 697 ):
698 698 # no changelog tampering
699 699 self._nodemap_docket = docket
700 700 index.update_nodemap_data(*nodemap_data)
701 701 except (ValueError, IndexError):
702 702 raise error.RevlogError(
703 703 _(b"index %s is corrupted") % self.indexfile
704 704 )
705 705 self.index, self._chunkcache = d
706 706 if not self._chunkcache:
707 707 self._chunkclear()
708 708 # revnum -> (chain-length, sum-delta-length)
709 709 self._chaininfocache = util.lrucachedict(500)
710 710 # revlog header -> revlog compressor
711 711 self._decompressors = {}
712 712
713 713 @util.propertycache
714 714 def _compressor(self):
715 715 engine = util.compengines[self._compengine]
716 716 return engine.revlogcompressor(self._compengineopts)
717 717
718 718 def _indexfp(self, mode=b'r'):
719 719 """file object for the revlog's index file"""
720 720 args = {'mode': mode}
721 721 if mode != b'r':
722 722 args['checkambig'] = self._checkambig
723 723 if mode == b'w':
724 724 args['atomictemp'] = True
725 725 return self.opener(self.indexfile, **args)
726 726
727 727 def _datafp(self, mode=b'r'):
728 728 """file object for the revlog's data file"""
729 729 return self.opener(self.datafile, mode=mode)
730 730
731 731 @contextlib.contextmanager
732 732 def _datareadfp(self, existingfp=None):
733 733 """file object suitable to read data"""
734 734 # Use explicit file handle, if given.
735 735 if existingfp is not None:
736 736 yield existingfp
737 737
738 738 # Use a file handle being actively used for writes, if available.
739 739 # There is some danger to doing this because reads will seek the
740 740 # file. However, _writeentry() performs a SEEK_END before all writes,
741 741 # so we should be safe.
742 742 elif self._writinghandles:
743 743 if self._inline:
744 744 yield self._writinghandles[0]
745 745 else:
746 746 yield self._writinghandles[1]
747 747
748 748 # Otherwise open a new file handle.
749 749 else:
750 750 if self._inline:
751 751 func = self._indexfp
752 752 else:
753 753 func = self._datafp
754 754 with func() as fp:
755 755 yield fp
756 756
757 757 def tiprev(self):
758 758 return len(self.index) - 1
759 759
760 760 def tip(self):
761 761 return self.node(self.tiprev())
762 762
763 763 def __contains__(self, rev):
764 764 return 0 <= rev < len(self)
765 765
766 766 def __len__(self):
767 767 return len(self.index)
768 768
769 769 def __iter__(self):
770 770 return iter(pycompat.xrange(len(self)))
771 771
772 772 def revs(self, start=0, stop=None):
773 773 """iterate over all rev in this revlog (from start to stop)"""
774 774 return storageutil.iterrevs(len(self), start=start, stop=stop)
775 775
776 776 @property
777 777 def nodemap(self):
778 778 msg = (
779 779 b"revlog.nodemap is deprecated, "
780 780 b"use revlog.index.[has_node|rev|get_rev]"
781 781 )
782 782 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
783 783 return self.index.nodemap
784 784
785 785 @property
786 786 def _nodecache(self):
787 787 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
788 788 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
789 789 return self.index.nodemap
790 790
791 791 def hasnode(self, node):
792 792 try:
793 793 self.rev(node)
794 794 return True
795 795 except KeyError:
796 796 return False
797 797
798 798 def candelta(self, baserev, rev):
799 799 """whether two revisions (baserev, rev) can be delta-ed or not"""
800 800 # Disable delta if either rev requires a content-changing flag
801 801 # processor (ex. LFS). This is because such flag processor can alter
802 802 # the rawtext content that the delta will be based on, and two clients
803 803 # could have a same revlog node with different flags (i.e. different
804 804 # rawtext contents) and the delta could be incompatible.
805 805 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
806 806 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
807 807 ):
808 808 return False
809 809 return True
810 810
811 811 def update_caches(self, transaction):
812 812 if self.nodemap_file is not None:
813 813 if transaction is None:
814 814 nodemaputil.update_persistent_nodemap(self)
815 815 else:
816 816 nodemaputil.setup_persistent_nodemap(transaction, self)
817 817
818 818 def clearcaches(self):
819 819 self._revisioncache = None
820 820 self._chainbasecache.clear()
821 821 self._chunkcache = (0, b'')
822 822 self._pcache = {}
823 823 self._nodemap_docket = None
824 824 self.index.clearcaches()
825 825 # The python code is the one responsible for validating the docket, we
826 826 # end up having to refresh it here.
827 827 use_nodemap = (
828 828 not self._inline
829 829 and self.nodemap_file is not None
830 830 and util.safehasattr(self.index, 'update_nodemap_data')
831 831 )
832 832 if use_nodemap:
833 833 nodemap_data = nodemaputil.persisted_data(self)
834 834 if nodemap_data is not None:
835 835 self._nodemap_docket = nodemap_data[0]
836 836 self.index.update_nodemap_data(*nodemap_data)
837 837
838 838 def rev(self, node):
839 839 try:
840 840 return self.index.rev(node)
841 841 except TypeError:
842 842 raise
843 843 except error.RevlogError:
844 844 # parsers.c radix tree lookup failed
845 845 if node == wdirid or node in wdirfilenodeids:
846 846 raise error.WdirUnsupported
847 847 raise error.LookupError(node, self.indexfile, _(b'no node'))
848 848
849 849 # Accessors for index entries.
850 850
851 851 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
852 852 # are flags.
853 853 def start(self, rev):
854 854 return int(self.index[rev][0] >> 16)
855 855
856 856 def flags(self, rev):
857 857 return self.index[rev][0] & 0xFFFF
858 858
859 859 def length(self, rev):
860 860 return self.index[rev][1]
861 861
862 862 def sidedata_length(self, rev):
863 863 if self.version & 0xFFFF != REVLOGV2:
864 864 return 0
865 865 return self.index[rev][9]
866 866
867 867 def rawsize(self, rev):
868 868 """return the length of the uncompressed text for a given revision"""
869 869 l = self.index[rev][2]
870 870 if l >= 0:
871 871 return l
872 872
873 873 t = self.rawdata(rev)
874 874 return len(t)
875 875
876 876 def size(self, rev):
877 877 """length of non-raw text (processed by a "read" flag processor)"""
878 878 # fast path: if no "read" flag processor could change the content,
879 879 # size is rawsize. note: ELLIPSIS is known to not change the content.
880 880 flags = self.flags(rev)
881 881 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
882 882 return self.rawsize(rev)
883 883
884 884 return len(self.revision(rev, raw=False))
885 885
886 886 def chainbase(self, rev):
887 887 base = self._chainbasecache.get(rev)
888 888 if base is not None:
889 889 return base
890 890
891 891 index = self.index
892 892 iterrev = rev
893 893 base = index[iterrev][3]
894 894 while base != iterrev:
895 895 iterrev = base
896 896 base = index[iterrev][3]
897 897
898 898 self._chainbasecache[rev] = base
899 899 return base
900 900
901 901 def linkrev(self, rev):
902 902 return self.index[rev][4]
903 903
904 904 def parentrevs(self, rev):
905 905 try:
906 906 entry = self.index[rev]
907 907 except IndexError:
908 908 if rev == wdirrev:
909 909 raise error.WdirUnsupported
910 910 raise
911
912 return entry[5], entry[6]
911 if entry[5] == nullrev:
912 return entry[6], entry[5]
913 else:
914 return entry[5], entry[6]
913 915
914 916 # fast parentrevs(rev) where rev isn't filtered
915 917 _uncheckedparentrevs = parentrevs
916 918
917 919 def node(self, rev):
918 920 try:
919 921 return self.index[rev][7]
920 922 except IndexError:
921 923 if rev == wdirrev:
922 924 raise error.WdirUnsupported
923 925 raise
924 926
925 927 # Derived from index values.
926 928
927 929 def end(self, rev):
928 930 return self.start(rev) + self.length(rev)
929 931
930 932 def parents(self, node):
931 933 i = self.index
932 934 d = i[self.rev(node)]
933 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
935 # inline node() to avoid function call overhead
936 if d[5] == nullid:
937 return i[d[6]][7], i[d[5]][7]
938 else:
939 return i[d[5]][7], i[d[6]][7]
934 940
935 941 def chainlen(self, rev):
936 942 return self._chaininfo(rev)[0]
937 943
938 944 def _chaininfo(self, rev):
939 945 chaininfocache = self._chaininfocache
940 946 if rev in chaininfocache:
941 947 return chaininfocache[rev]
942 948 index = self.index
943 949 generaldelta = self._generaldelta
944 950 iterrev = rev
945 951 e = index[iterrev]
946 952 clen = 0
947 953 compresseddeltalen = 0
948 954 while iterrev != e[3]:
949 955 clen += 1
950 956 compresseddeltalen += e[1]
951 957 if generaldelta:
952 958 iterrev = e[3]
953 959 else:
954 960 iterrev -= 1
955 961 if iterrev in chaininfocache:
956 962 t = chaininfocache[iterrev]
957 963 clen += t[0]
958 964 compresseddeltalen += t[1]
959 965 break
960 966 e = index[iterrev]
961 967 else:
962 968 # Add text length of base since decompressing that also takes
963 969 # work. For cache hits the length is already included.
964 970 compresseddeltalen += e[1]
965 971 r = (clen, compresseddeltalen)
966 972 chaininfocache[rev] = r
967 973 return r
968 974
969 975 def _deltachain(self, rev, stoprev=None):
970 976 """Obtain the delta chain for a revision.
971 977
972 978 ``stoprev`` specifies a revision to stop at. If not specified, we
973 979 stop at the base of the chain.
974 980
975 981 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
976 982 revs in ascending order and ``stopped`` is a bool indicating whether
977 983 ``stoprev`` was hit.
978 984 """
979 985 # Try C implementation.
980 986 try:
981 987 return self.index.deltachain(rev, stoprev, self._generaldelta)
982 988 except AttributeError:
983 989 pass
984 990
985 991 chain = []
986 992
987 993 # Alias to prevent attribute lookup in tight loop.
988 994 index = self.index
989 995 generaldelta = self._generaldelta
990 996
991 997 iterrev = rev
992 998 e = index[iterrev]
993 999 while iterrev != e[3] and iterrev != stoprev:
994 1000 chain.append(iterrev)
995 1001 if generaldelta:
996 1002 iterrev = e[3]
997 1003 else:
998 1004 iterrev -= 1
999 1005 e = index[iterrev]
1000 1006
1001 1007 if iterrev == stoprev:
1002 1008 stopped = True
1003 1009 else:
1004 1010 chain.append(iterrev)
1005 1011 stopped = False
1006 1012
1007 1013 chain.reverse()
1008 1014 return chain, stopped
1009 1015
1010 1016 def ancestors(self, revs, stoprev=0, inclusive=False):
1011 1017 """Generate the ancestors of 'revs' in reverse revision order.
1012 1018 Does not generate revs lower than stoprev.
1013 1019
1014 1020 See the documentation for ancestor.lazyancestors for more details."""
1015 1021
1016 1022 # first, make sure start revisions aren't filtered
1017 1023 revs = list(revs)
1018 1024 checkrev = self.node
1019 1025 for r in revs:
1020 1026 checkrev(r)
1021 1027 # and we're sure ancestors aren't filtered as well
1022 1028
1023 1029 if rustancestor is not None:
1024 1030 lazyancestors = rustancestor.LazyAncestors
1025 1031 arg = self.index
1026 1032 else:
1027 1033 lazyancestors = ancestor.lazyancestors
1028 1034 arg = self._uncheckedparentrevs
1029 1035 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1030 1036
1031 1037 def descendants(self, revs):
1032 1038 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1033 1039
1034 1040 def findcommonmissing(self, common=None, heads=None):
1035 1041 """Return a tuple of the ancestors of common and the ancestors of heads
1036 1042 that are not ancestors of common. In revset terminology, we return the
1037 1043 tuple:
1038 1044
1039 1045 ::common, (::heads) - (::common)
1040 1046
1041 1047 The list is sorted by revision number, meaning it is
1042 1048 topologically sorted.
1043 1049
1044 1050 'heads' and 'common' are both lists of node IDs. If heads is
1045 1051 not supplied, uses all of the revlog's heads. If common is not
1046 1052 supplied, uses nullid."""
1047 1053 if common is None:
1048 1054 common = [nullid]
1049 1055 if heads is None:
1050 1056 heads = self.heads()
1051 1057
1052 1058 common = [self.rev(n) for n in common]
1053 1059 heads = [self.rev(n) for n in heads]
1054 1060
1055 1061 # we want the ancestors, but inclusive
1056 1062 class lazyset(object):
1057 1063 def __init__(self, lazyvalues):
1058 1064 self.addedvalues = set()
1059 1065 self.lazyvalues = lazyvalues
1060 1066
1061 1067 def __contains__(self, value):
1062 1068 return value in self.addedvalues or value in self.lazyvalues
1063 1069
1064 1070 def __iter__(self):
1065 1071 added = self.addedvalues
1066 1072 for r in added:
1067 1073 yield r
1068 1074 for r in self.lazyvalues:
1069 1075 if not r in added:
1070 1076 yield r
1071 1077
1072 1078 def add(self, value):
1073 1079 self.addedvalues.add(value)
1074 1080
1075 1081 def update(self, values):
1076 1082 self.addedvalues.update(values)
1077 1083
1078 1084 has = lazyset(self.ancestors(common))
1079 1085 has.add(nullrev)
1080 1086 has.update(common)
1081 1087
1082 1088 # take all ancestors from heads that aren't in has
1083 1089 missing = set()
1084 1090 visit = collections.deque(r for r in heads if r not in has)
1085 1091 while visit:
1086 1092 r = visit.popleft()
1087 1093 if r in missing:
1088 1094 continue
1089 1095 else:
1090 1096 missing.add(r)
1091 1097 for p in self.parentrevs(r):
1092 1098 if p not in has:
1093 1099 visit.append(p)
1094 1100 missing = list(missing)
1095 1101 missing.sort()
1096 1102 return has, [self.node(miss) for miss in missing]
1097 1103
1098 1104 def incrementalmissingrevs(self, common=None):
1099 1105 """Return an object that can be used to incrementally compute the
1100 1106 revision numbers of the ancestors of arbitrary sets that are not
1101 1107 ancestors of common. This is an ancestor.incrementalmissingancestors
1102 1108 object.
1103 1109
1104 1110 'common' is a list of revision numbers. If common is not supplied, uses
1105 1111 nullrev.
1106 1112 """
1107 1113 if common is None:
1108 1114 common = [nullrev]
1109 1115
1110 1116 if rustancestor is not None:
1111 1117 return rustancestor.MissingAncestors(self.index, common)
1112 1118 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1113 1119
1114 1120 def findmissingrevs(self, common=None, heads=None):
1115 1121 """Return the revision numbers of the ancestors of heads that
1116 1122 are not ancestors of common.
1117 1123
1118 1124 More specifically, return a list of revision numbers corresponding to
1119 1125 nodes N such that every N satisfies the following constraints:
1120 1126
1121 1127 1. N is an ancestor of some node in 'heads'
1122 1128 2. N is not an ancestor of any node in 'common'
1123 1129
1124 1130 The list is sorted by revision number, meaning it is
1125 1131 topologically sorted.
1126 1132
1127 1133 'heads' and 'common' are both lists of revision numbers. If heads is
1128 1134 not supplied, uses all of the revlog's heads. If common is not
1129 1135 supplied, uses nullid."""
1130 1136 if common is None:
1131 1137 common = [nullrev]
1132 1138 if heads is None:
1133 1139 heads = self.headrevs()
1134 1140
1135 1141 inc = self.incrementalmissingrevs(common=common)
1136 1142 return inc.missingancestors(heads)
1137 1143
1138 1144 def findmissing(self, common=None, heads=None):
1139 1145 """Return the ancestors of heads that are not ancestors of common.
1140 1146
1141 1147 More specifically, return a list of nodes N such that every N
1142 1148 satisfies the following constraints:
1143 1149
1144 1150 1. N is an ancestor of some node in 'heads'
1145 1151 2. N is not an ancestor of any node in 'common'
1146 1152
1147 1153 The list is sorted by revision number, meaning it is
1148 1154 topologically sorted.
1149 1155
1150 1156 'heads' and 'common' are both lists of node IDs. If heads is
1151 1157 not supplied, uses all of the revlog's heads. If common is not
1152 1158 supplied, uses nullid."""
1153 1159 if common is None:
1154 1160 common = [nullid]
1155 1161 if heads is None:
1156 1162 heads = self.heads()
1157 1163
1158 1164 common = [self.rev(n) for n in common]
1159 1165 heads = [self.rev(n) for n in heads]
1160 1166
1161 1167 inc = self.incrementalmissingrevs(common=common)
1162 1168 return [self.node(r) for r in inc.missingancestors(heads)]
1163 1169
1164 1170 def nodesbetween(self, roots=None, heads=None):
1165 1171 """Return a topological path from 'roots' to 'heads'.
1166 1172
1167 1173 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1168 1174 topologically sorted list of all nodes N that satisfy both of
1169 1175 these constraints:
1170 1176
1171 1177 1. N is a descendant of some node in 'roots'
1172 1178 2. N is an ancestor of some node in 'heads'
1173 1179
1174 1180 Every node is considered to be both a descendant and an ancestor
1175 1181 of itself, so every reachable node in 'roots' and 'heads' will be
1176 1182 included in 'nodes'.
1177 1183
1178 1184 'outroots' is the list of reachable nodes in 'roots', i.e., the
1179 1185 subset of 'roots' that is returned in 'nodes'. Likewise,
1180 1186 'outheads' is the subset of 'heads' that is also in 'nodes'.
1181 1187
1182 1188 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1183 1189 unspecified, uses nullid as the only root. If 'heads' is
1184 1190 unspecified, uses list of all of the revlog's heads."""
1185 1191 nonodes = ([], [], [])
1186 1192 if roots is not None:
1187 1193 roots = list(roots)
1188 1194 if not roots:
1189 1195 return nonodes
1190 1196 lowestrev = min([self.rev(n) for n in roots])
1191 1197 else:
1192 1198 roots = [nullid] # Everybody's a descendant of nullid
1193 1199 lowestrev = nullrev
1194 1200 if (lowestrev == nullrev) and (heads is None):
1195 1201 # We want _all_ the nodes!
1196 1202 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1197 1203 if heads is None:
1198 1204 # All nodes are ancestors, so the latest ancestor is the last
1199 1205 # node.
1200 1206 highestrev = len(self) - 1
1201 1207 # Set ancestors to None to signal that every node is an ancestor.
1202 1208 ancestors = None
1203 1209 # Set heads to an empty dictionary for later discovery of heads
1204 1210 heads = {}
1205 1211 else:
1206 1212 heads = list(heads)
1207 1213 if not heads:
1208 1214 return nonodes
1209 1215 ancestors = set()
1210 1216 # Turn heads into a dictionary so we can remove 'fake' heads.
1211 1217 # Also, later we will be using it to filter out the heads we can't
1212 1218 # find from roots.
1213 1219 heads = dict.fromkeys(heads, False)
1214 1220 # Start at the top and keep marking parents until we're done.
1215 1221 nodestotag = set(heads)
1216 1222 # Remember where the top was so we can use it as a limit later.
1217 1223 highestrev = max([self.rev(n) for n in nodestotag])
1218 1224 while nodestotag:
1219 1225 # grab a node to tag
1220 1226 n = nodestotag.pop()
1221 1227 # Never tag nullid
1222 1228 if n == nullid:
1223 1229 continue
1224 1230 # A node's revision number represents its place in a
1225 1231 # topologically sorted list of nodes.
1226 1232 r = self.rev(n)
1227 1233 if r >= lowestrev:
1228 1234 if n not in ancestors:
1229 1235 # If we are possibly a descendant of one of the roots
1230 1236 # and we haven't already been marked as an ancestor
1231 1237 ancestors.add(n) # Mark as ancestor
1232 1238 # Add non-nullid parents to list of nodes to tag.
1233 1239 nodestotag.update(
1234 1240 [p for p in self.parents(n) if p != nullid]
1235 1241 )
1236 1242 elif n in heads: # We've seen it before, is it a fake head?
1237 1243 # So it is, real heads should not be the ancestors of
1238 1244 # any other heads.
1239 1245 heads.pop(n)
1240 1246 if not ancestors:
1241 1247 return nonodes
1242 1248 # Now that we have our set of ancestors, we want to remove any
1243 1249 # roots that are not ancestors.
1244 1250
1245 1251 # If one of the roots was nullid, everything is included anyway.
1246 1252 if lowestrev > nullrev:
1247 1253 # But, since we weren't, let's recompute the lowest rev to not
1248 1254 # include roots that aren't ancestors.
1249 1255
1250 1256 # Filter out roots that aren't ancestors of heads
1251 1257 roots = [root for root in roots if root in ancestors]
1252 1258 # Recompute the lowest revision
1253 1259 if roots:
1254 1260 lowestrev = min([self.rev(root) for root in roots])
1255 1261 else:
1256 1262 # No more roots? Return empty list
1257 1263 return nonodes
1258 1264 else:
1259 1265 # We are descending from nullid, and don't need to care about
1260 1266 # any other roots.
1261 1267 lowestrev = nullrev
1262 1268 roots = [nullid]
1263 1269 # Transform our roots list into a set.
1264 1270 descendants = set(roots)
1265 1271 # Also, keep the original roots so we can filter out roots that aren't
1266 1272 # 'real' roots (i.e. are descended from other roots).
1267 1273 roots = descendants.copy()
1268 1274 # Our topologically sorted list of output nodes.
1269 1275 orderedout = []
1270 1276 # Don't start at nullid since we don't want nullid in our output list,
1271 1277 # and if nullid shows up in descendants, empty parents will look like
1272 1278 # they're descendants.
1273 1279 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1274 1280 n = self.node(r)
1275 1281 isdescendant = False
1276 1282 if lowestrev == nullrev: # Everybody is a descendant of nullid
1277 1283 isdescendant = True
1278 1284 elif n in descendants:
1279 1285 # n is already a descendant
1280 1286 isdescendant = True
1281 1287 # This check only needs to be done here because all the roots
1282 1288 # will start being marked is descendants before the loop.
1283 1289 if n in roots:
1284 1290 # If n was a root, check if it's a 'real' root.
1285 1291 p = tuple(self.parents(n))
1286 1292 # If any of its parents are descendants, it's not a root.
1287 1293 if (p[0] in descendants) or (p[1] in descendants):
1288 1294 roots.remove(n)
1289 1295 else:
1290 1296 p = tuple(self.parents(n))
1291 1297 # A node is a descendant if either of its parents are
1292 1298 # descendants. (We seeded the dependents list with the roots
1293 1299 # up there, remember?)
1294 1300 if (p[0] in descendants) or (p[1] in descendants):
1295 1301 descendants.add(n)
1296 1302 isdescendant = True
1297 1303 if isdescendant and ((ancestors is None) or (n in ancestors)):
1298 1304 # Only include nodes that are both descendants and ancestors.
1299 1305 orderedout.append(n)
1300 1306 if (ancestors is not None) and (n in heads):
1301 1307 # We're trying to figure out which heads are reachable
1302 1308 # from roots.
1303 1309 # Mark this head as having been reached
1304 1310 heads[n] = True
1305 1311 elif ancestors is None:
1306 1312 # Otherwise, we're trying to discover the heads.
1307 1313 # Assume this is a head because if it isn't, the next step
1308 1314 # will eventually remove it.
1309 1315 heads[n] = True
1310 1316 # But, obviously its parents aren't.
1311 1317 for p in self.parents(n):
1312 1318 heads.pop(p, None)
1313 1319 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1314 1320 roots = list(roots)
1315 1321 assert orderedout
1316 1322 assert roots
1317 1323 assert heads
1318 1324 return (orderedout, roots, heads)
1319 1325
1320 1326 def headrevs(self, revs=None):
1321 1327 if revs is None:
1322 1328 try:
1323 1329 return self.index.headrevs()
1324 1330 except AttributeError:
1325 1331 return self._headrevs()
1326 1332 if rustdagop is not None:
1327 1333 return rustdagop.headrevs(self.index, revs)
1328 1334 return dagop.headrevs(revs, self._uncheckedparentrevs)
1329 1335
1330 1336 def computephases(self, roots):
1331 1337 return self.index.computephasesmapsets(roots)
1332 1338
1333 1339 def _headrevs(self):
1334 1340 count = len(self)
1335 1341 if not count:
1336 1342 return [nullrev]
1337 1343 # we won't iter over filtered rev so nobody is a head at start
1338 1344 ishead = [0] * (count + 1)
1339 1345 index = self.index
1340 1346 for r in self:
1341 1347 ishead[r] = 1 # I may be an head
1342 1348 e = index[r]
1343 1349 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1344 1350 return [r for r, val in enumerate(ishead) if val]
1345 1351
1346 1352 def heads(self, start=None, stop=None):
1347 1353 """return the list of all nodes that have no children
1348 1354
1349 1355 if start is specified, only heads that are descendants of
1350 1356 start will be returned
1351 1357 if stop is specified, it will consider all the revs from stop
1352 1358 as if they had no children
1353 1359 """
1354 1360 if start is None and stop is None:
1355 1361 if not len(self):
1356 1362 return [nullid]
1357 1363 return [self.node(r) for r in self.headrevs()]
1358 1364
1359 1365 if start is None:
1360 1366 start = nullrev
1361 1367 else:
1362 1368 start = self.rev(start)
1363 1369
1364 1370 stoprevs = {self.rev(n) for n in stop or []}
1365 1371
1366 1372 revs = dagop.headrevssubset(
1367 1373 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1368 1374 )
1369 1375
1370 1376 return [self.node(rev) for rev in revs]
1371 1377
1372 1378 def children(self, node):
1373 1379 """find the children of a given node"""
1374 1380 c = []
1375 1381 p = self.rev(node)
1376 1382 for r in self.revs(start=p + 1):
1377 1383 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1378 1384 if prevs:
1379 1385 for pr in prevs:
1380 1386 if pr == p:
1381 1387 c.append(self.node(r))
1382 1388 elif p == nullrev:
1383 1389 c.append(self.node(r))
1384 1390 return c
1385 1391
1386 1392 def commonancestorsheads(self, a, b):
1387 1393 """calculate all the heads of the common ancestors of nodes a and b"""
1388 1394 a, b = self.rev(a), self.rev(b)
1389 1395 ancs = self._commonancestorsheads(a, b)
1390 1396 return pycompat.maplist(self.node, ancs)
1391 1397
1392 1398 def _commonancestorsheads(self, *revs):
1393 1399 """calculate all the heads of the common ancestors of revs"""
1394 1400 try:
1395 1401 ancs = self.index.commonancestorsheads(*revs)
1396 1402 except (AttributeError, OverflowError): # C implementation failed
1397 1403 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1398 1404 return ancs
1399 1405
1400 1406 def isancestor(self, a, b):
1401 1407 """return True if node a is an ancestor of node b
1402 1408
1403 1409 A revision is considered an ancestor of itself."""
1404 1410 a, b = self.rev(a), self.rev(b)
1405 1411 return self.isancestorrev(a, b)
1406 1412
1407 1413 def isancestorrev(self, a, b):
1408 1414 """return True if revision a is an ancestor of revision b
1409 1415
1410 1416 A revision is considered an ancestor of itself.
1411 1417
1412 1418 The implementation of this is trivial but the use of
1413 1419 reachableroots is not."""
1414 1420 if a == nullrev:
1415 1421 return True
1416 1422 elif a == b:
1417 1423 return True
1418 1424 elif a > b:
1419 1425 return False
1420 1426 return bool(self.reachableroots(a, [b], [a], includepath=False))
1421 1427
1422 1428 def reachableroots(self, minroot, heads, roots, includepath=False):
1423 1429 """return (heads(::(<roots> and <roots>::<heads>)))
1424 1430
1425 1431 If includepath is True, return (<roots>::<heads>)."""
1426 1432 try:
1427 1433 return self.index.reachableroots2(
1428 1434 minroot, heads, roots, includepath
1429 1435 )
1430 1436 except AttributeError:
1431 1437 return dagop._reachablerootspure(
1432 1438 self.parentrevs, minroot, roots, heads, includepath
1433 1439 )
1434 1440
1435 1441 def ancestor(self, a, b):
1436 1442 """calculate the "best" common ancestor of nodes a and b"""
1437 1443
1438 1444 a, b = self.rev(a), self.rev(b)
1439 1445 try:
1440 1446 ancs = self.index.ancestors(a, b)
1441 1447 except (AttributeError, OverflowError):
1442 1448 ancs = ancestor.ancestors(self.parentrevs, a, b)
1443 1449 if ancs:
1444 1450 # choose a consistent winner when there's a tie
1445 1451 return min(map(self.node, ancs))
1446 1452 return nullid
1447 1453
1448 1454 def _match(self, id):
1449 1455 if isinstance(id, int):
1450 1456 # rev
1451 1457 return self.node(id)
1452 1458 if len(id) == 20:
1453 1459 # possibly a binary node
1454 1460 # odds of a binary node being all hex in ASCII are 1 in 10**25
1455 1461 try:
1456 1462 node = id
1457 1463 self.rev(node) # quick search the index
1458 1464 return node
1459 1465 except error.LookupError:
1460 1466 pass # may be partial hex id
1461 1467 try:
1462 1468 # str(rev)
1463 1469 rev = int(id)
1464 1470 if b"%d" % rev != id:
1465 1471 raise ValueError
1466 1472 if rev < 0:
1467 1473 rev = len(self) + rev
1468 1474 if rev < 0 or rev >= len(self):
1469 1475 raise ValueError
1470 1476 return self.node(rev)
1471 1477 except (ValueError, OverflowError):
1472 1478 pass
1473 1479 if len(id) == 40:
1474 1480 try:
1475 1481 # a full hex nodeid?
1476 1482 node = bin(id)
1477 1483 self.rev(node)
1478 1484 return node
1479 1485 except (TypeError, error.LookupError):
1480 1486 pass
1481 1487
1482 1488 def _partialmatch(self, id):
1483 1489 # we don't care wdirfilenodeids as they should be always full hash
1484 1490 maybewdir = wdirhex.startswith(id)
1485 1491 try:
1486 1492 partial = self.index.partialmatch(id)
1487 1493 if partial and self.hasnode(partial):
1488 1494 if maybewdir:
1489 1495 # single 'ff...' match in radix tree, ambiguous with wdir
1490 1496 raise error.RevlogError
1491 1497 return partial
1492 1498 if maybewdir:
1493 1499 # no 'ff...' match in radix tree, wdir identified
1494 1500 raise error.WdirUnsupported
1495 1501 return None
1496 1502 except error.RevlogError:
1497 1503 # parsers.c radix tree lookup gave multiple matches
1498 1504 # fast path: for unfiltered changelog, radix tree is accurate
1499 1505 if not getattr(self, 'filteredrevs', None):
1500 1506 raise error.AmbiguousPrefixLookupError(
1501 1507 id, self.indexfile, _(b'ambiguous identifier')
1502 1508 )
1503 1509 # fall through to slow path that filters hidden revisions
1504 1510 except (AttributeError, ValueError):
1505 1511 # we are pure python, or key was too short to search radix tree
1506 1512 pass
1507 1513
1508 1514 if id in self._pcache:
1509 1515 return self._pcache[id]
1510 1516
1511 1517 if len(id) <= 40:
1512 1518 try:
1513 1519 # hex(node)[:...]
1514 1520 l = len(id) // 2 # grab an even number of digits
1515 1521 prefix = bin(id[: l * 2])
1516 1522 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1517 1523 nl = [
1518 1524 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1519 1525 ]
1520 1526 if nullhex.startswith(id):
1521 1527 nl.append(nullid)
1522 1528 if len(nl) > 0:
1523 1529 if len(nl) == 1 and not maybewdir:
1524 1530 self._pcache[id] = nl[0]
1525 1531 return nl[0]
1526 1532 raise error.AmbiguousPrefixLookupError(
1527 1533 id, self.indexfile, _(b'ambiguous identifier')
1528 1534 )
1529 1535 if maybewdir:
1530 1536 raise error.WdirUnsupported
1531 1537 return None
1532 1538 except TypeError:
1533 1539 pass
1534 1540
1535 1541 def lookup(self, id):
1536 1542 """locate a node based on:
1537 1543 - revision number or str(revision number)
1538 1544 - nodeid or subset of hex nodeid
1539 1545 """
1540 1546 n = self._match(id)
1541 1547 if n is not None:
1542 1548 return n
1543 1549 n = self._partialmatch(id)
1544 1550 if n:
1545 1551 return n
1546 1552
1547 1553 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1548 1554
1549 1555 def shortest(self, node, minlength=1):
1550 1556 """Find the shortest unambiguous prefix that matches node."""
1551 1557
1552 1558 def isvalid(prefix):
1553 1559 try:
1554 1560 matchednode = self._partialmatch(prefix)
1555 1561 except error.AmbiguousPrefixLookupError:
1556 1562 return False
1557 1563 except error.WdirUnsupported:
1558 1564 # single 'ff...' match
1559 1565 return True
1560 1566 if matchednode is None:
1561 1567 raise error.LookupError(node, self.indexfile, _(b'no node'))
1562 1568 return True
1563 1569
1564 1570 def maybewdir(prefix):
1565 1571 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1566 1572
1567 1573 hexnode = hex(node)
1568 1574
1569 1575 def disambiguate(hexnode, minlength):
1570 1576 """Disambiguate against wdirid."""
1571 1577 for length in range(minlength, len(hexnode) + 1):
1572 1578 prefix = hexnode[:length]
1573 1579 if not maybewdir(prefix):
1574 1580 return prefix
1575 1581
1576 1582 if not getattr(self, 'filteredrevs', None):
1577 1583 try:
1578 1584 length = max(self.index.shortest(node), minlength)
1579 1585 return disambiguate(hexnode, length)
1580 1586 except error.RevlogError:
1581 1587 if node != wdirid:
1582 1588 raise error.LookupError(node, self.indexfile, _(b'no node'))
1583 1589 except AttributeError:
1584 1590 # Fall through to pure code
1585 1591 pass
1586 1592
1587 1593 if node == wdirid:
1588 1594 for length in range(minlength, len(hexnode) + 1):
1589 1595 prefix = hexnode[:length]
1590 1596 if isvalid(prefix):
1591 1597 return prefix
1592 1598
1593 1599 for length in range(minlength, len(hexnode) + 1):
1594 1600 prefix = hexnode[:length]
1595 1601 if isvalid(prefix):
1596 1602 return disambiguate(hexnode, length)
1597 1603
1598 1604 def cmp(self, node, text):
1599 1605 """compare text with a given file revision
1600 1606
1601 1607 returns True if text is different than what is stored.
1602 1608 """
1603 1609 p1, p2 = self.parents(node)
1604 1610 return storageutil.hashrevisionsha1(text, p1, p2) != node
1605 1611
1606 1612 def _cachesegment(self, offset, data):
1607 1613 """Add a segment to the revlog cache.
1608 1614
1609 1615 Accepts an absolute offset and the data that is at that location.
1610 1616 """
1611 1617 o, d = self._chunkcache
1612 1618 # try to add to existing cache
1613 1619 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1614 1620 self._chunkcache = o, d + data
1615 1621 else:
1616 1622 self._chunkcache = offset, data
1617 1623
1618 1624 def _readsegment(self, offset, length, df=None):
1619 1625 """Load a segment of raw data from the revlog.
1620 1626
1621 1627 Accepts an absolute offset, length to read, and an optional existing
1622 1628 file handle to read from.
1623 1629
1624 1630 If an existing file handle is passed, it will be seeked and the
1625 1631 original seek position will NOT be restored.
1626 1632
1627 1633 Returns a str or buffer of raw byte data.
1628 1634
1629 1635 Raises if the requested number of bytes could not be read.
1630 1636 """
1631 1637 # Cache data both forward and backward around the requested
1632 1638 # data, in a fixed size window. This helps speed up operations
1633 1639 # involving reading the revlog backwards.
1634 1640 cachesize = self._chunkcachesize
1635 1641 realoffset = offset & ~(cachesize - 1)
1636 1642 reallength = (
1637 1643 (offset + length + cachesize) & ~(cachesize - 1)
1638 1644 ) - realoffset
1639 1645 with self._datareadfp(df) as df:
1640 1646 df.seek(realoffset)
1641 1647 d = df.read(reallength)
1642 1648
1643 1649 self._cachesegment(realoffset, d)
1644 1650 if offset != realoffset or reallength != length:
1645 1651 startoffset = offset - realoffset
1646 1652 if len(d) - startoffset < length:
1647 1653 raise error.RevlogError(
1648 1654 _(
1649 1655 b'partial read of revlog %s; expected %d bytes from '
1650 1656 b'offset %d, got %d'
1651 1657 )
1652 1658 % (
1653 1659 self.indexfile if self._inline else self.datafile,
1654 1660 length,
1655 1661 realoffset,
1656 1662 len(d) - startoffset,
1657 1663 )
1658 1664 )
1659 1665
1660 1666 return util.buffer(d, startoffset, length)
1661 1667
1662 1668 if len(d) < length:
1663 1669 raise error.RevlogError(
1664 1670 _(
1665 1671 b'partial read of revlog %s; expected %d bytes from offset '
1666 1672 b'%d, got %d'
1667 1673 )
1668 1674 % (
1669 1675 self.indexfile if self._inline else self.datafile,
1670 1676 length,
1671 1677 offset,
1672 1678 len(d),
1673 1679 )
1674 1680 )
1675 1681
1676 1682 return d
1677 1683
1678 1684 def _getsegment(self, offset, length, df=None):
1679 1685 """Obtain a segment of raw data from the revlog.
1680 1686
1681 1687 Accepts an absolute offset, length of bytes to obtain, and an
1682 1688 optional file handle to the already-opened revlog. If the file
1683 1689 handle is used, it's original seek position will not be preserved.
1684 1690
1685 1691 Requests for data may be returned from a cache.
1686 1692
1687 1693 Returns a str or a buffer instance of raw byte data.
1688 1694 """
1689 1695 o, d = self._chunkcache
1690 1696 l = len(d)
1691 1697
1692 1698 # is it in the cache?
1693 1699 cachestart = offset - o
1694 1700 cacheend = cachestart + length
1695 1701 if cachestart >= 0 and cacheend <= l:
1696 1702 if cachestart == 0 and cacheend == l:
1697 1703 return d # avoid a copy
1698 1704 return util.buffer(d, cachestart, cacheend - cachestart)
1699 1705
1700 1706 return self._readsegment(offset, length, df=df)
1701 1707
1702 1708 def _getsegmentforrevs(self, startrev, endrev, df=None):
1703 1709 """Obtain a segment of raw data corresponding to a range of revisions.
1704 1710
1705 1711 Accepts the start and end revisions and an optional already-open
1706 1712 file handle to be used for reading. If the file handle is read, its
1707 1713 seek position will not be preserved.
1708 1714
1709 1715 Requests for data may be satisfied by a cache.
1710 1716
1711 1717 Returns a 2-tuple of (offset, data) for the requested range of
1712 1718 revisions. Offset is the integer offset from the beginning of the
1713 1719 revlog and data is a str or buffer of the raw byte data.
1714 1720
1715 1721 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1716 1722 to determine where each revision's data begins and ends.
1717 1723 """
1718 1724 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1719 1725 # (functions are expensive).
1720 1726 index = self.index
1721 1727 istart = index[startrev]
1722 1728 start = int(istart[0] >> 16)
1723 1729 if startrev == endrev:
1724 1730 end = start + istart[1]
1725 1731 else:
1726 1732 iend = index[endrev]
1727 1733 end = int(iend[0] >> 16) + iend[1]
1728 1734
1729 1735 if self._inline:
1730 1736 start += (startrev + 1) * self._io.size
1731 1737 end += (endrev + 1) * self._io.size
1732 1738 length = end - start
1733 1739
1734 1740 return start, self._getsegment(start, length, df=df)
1735 1741
1736 1742 def _chunk(self, rev, df=None):
1737 1743 """Obtain a single decompressed chunk for a revision.
1738 1744
1739 1745 Accepts an integer revision and an optional already-open file handle
1740 1746 to be used for reading. If used, the seek position of the file will not
1741 1747 be preserved.
1742 1748
1743 1749 Returns a str holding uncompressed data for the requested revision.
1744 1750 """
1745 1751 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1746 1752
1747 1753 def _chunks(self, revs, df=None, targetsize=None):
1748 1754 """Obtain decompressed chunks for the specified revisions.
1749 1755
1750 1756 Accepts an iterable of numeric revisions that are assumed to be in
1751 1757 ascending order. Also accepts an optional already-open file handle
1752 1758 to be used for reading. If used, the seek position of the file will
1753 1759 not be preserved.
1754 1760
1755 1761 This function is similar to calling ``self._chunk()`` multiple times,
1756 1762 but is faster.
1757 1763
1758 1764 Returns a list with decompressed data for each requested revision.
1759 1765 """
1760 1766 if not revs:
1761 1767 return []
1762 1768 start = self.start
1763 1769 length = self.length
1764 1770 inline = self._inline
1765 1771 iosize = self._io.size
1766 1772 buffer = util.buffer
1767 1773
1768 1774 l = []
1769 1775 ladd = l.append
1770 1776
1771 1777 if not self._withsparseread:
1772 1778 slicedchunks = (revs,)
1773 1779 else:
1774 1780 slicedchunks = deltautil.slicechunk(
1775 1781 self, revs, targetsize=targetsize
1776 1782 )
1777 1783
1778 1784 for revschunk in slicedchunks:
1779 1785 firstrev = revschunk[0]
1780 1786 # Skip trailing revisions with empty diff
1781 1787 for lastrev in revschunk[::-1]:
1782 1788 if length(lastrev) != 0:
1783 1789 break
1784 1790
1785 1791 try:
1786 1792 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1787 1793 except OverflowError:
1788 1794 # issue4215 - we can't cache a run of chunks greater than
1789 1795 # 2G on Windows
1790 1796 return [self._chunk(rev, df=df) for rev in revschunk]
1791 1797
1792 1798 decomp = self.decompress
1793 1799 for rev in revschunk:
1794 1800 chunkstart = start(rev)
1795 1801 if inline:
1796 1802 chunkstart += (rev + 1) * iosize
1797 1803 chunklength = length(rev)
1798 1804 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1799 1805
1800 1806 return l
1801 1807
1802 1808 def _chunkclear(self):
1803 1809 """Clear the raw chunk cache."""
1804 1810 self._chunkcache = (0, b'')
1805 1811
1806 1812 def deltaparent(self, rev):
1807 1813 """return deltaparent of the given revision"""
1808 1814 base = self.index[rev][3]
1809 1815 if base == rev:
1810 1816 return nullrev
1811 1817 elif self._generaldelta:
1812 1818 return base
1813 1819 else:
1814 1820 return rev - 1
1815 1821
1816 1822 def issnapshot(self, rev):
1817 1823 """tells whether rev is a snapshot"""
1818 1824 if not self._sparserevlog:
1819 1825 return self.deltaparent(rev) == nullrev
1820 1826 elif util.safehasattr(self.index, b'issnapshot'):
1821 1827 # directly assign the method to cache the testing and access
1822 1828 self.issnapshot = self.index.issnapshot
1823 1829 return self.issnapshot(rev)
1824 1830 if rev == nullrev:
1825 1831 return True
1826 1832 entry = self.index[rev]
1827 1833 base = entry[3]
1828 1834 if base == rev:
1829 1835 return True
1830 1836 if base == nullrev:
1831 1837 return True
1832 1838 p1 = entry[5]
1833 1839 p2 = entry[6]
1834 1840 if base == p1 or base == p2:
1835 1841 return False
1836 1842 return self.issnapshot(base)
1837 1843
1838 1844 def snapshotdepth(self, rev):
1839 1845 """number of snapshot in the chain before this one"""
1840 1846 if not self.issnapshot(rev):
1841 1847 raise error.ProgrammingError(b'revision %d not a snapshot')
1842 1848 return len(self._deltachain(rev)[0]) - 1
1843 1849
1844 1850 def revdiff(self, rev1, rev2):
1845 1851 """return or calculate a delta between two revisions
1846 1852
1847 1853 The delta calculated is in binary form and is intended to be written to
1848 1854 revlog data directly. So this function needs raw revision data.
1849 1855 """
1850 1856 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1851 1857 return bytes(self._chunk(rev2))
1852 1858
1853 1859 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1854 1860
1855 1861 def _processflags(self, text, flags, operation, raw=False):
1856 1862 """deprecated entry point to access flag processors"""
1857 1863 msg = b'_processflag(...) use the specialized variant'
1858 1864 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1859 1865 if raw:
1860 1866 return text, flagutil.processflagsraw(self, text, flags)
1861 1867 elif operation == b'read':
1862 1868 return flagutil.processflagsread(self, text, flags)
1863 1869 else: # write operation
1864 1870 return flagutil.processflagswrite(self, text, flags)
1865 1871
1866 1872 def revision(self, nodeorrev, _df=None, raw=False):
1867 1873 """return an uncompressed revision of a given node or revision
1868 1874 number.
1869 1875
1870 1876 _df - an existing file handle to read from. (internal-only)
1871 1877 raw - an optional argument specifying if the revision data is to be
1872 1878 treated as raw data when applying flag transforms. 'raw' should be set
1873 1879 to True when generating changegroups or in debug commands.
1874 1880 """
1875 1881 if raw:
1876 1882 msg = (
1877 1883 b'revlog.revision(..., raw=True) is deprecated, '
1878 1884 b'use revlog.rawdata(...)'
1879 1885 )
1880 1886 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1881 1887 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1882 1888
1883 1889 def sidedata(self, nodeorrev, _df=None):
1884 1890 """a map of extra data related to the changeset but not part of the hash
1885 1891
1886 1892 This function currently return a dictionary. However, more advanced
1887 1893 mapping object will likely be used in the future for a more
1888 1894 efficient/lazy code.
1889 1895 """
1890 1896 return self._revisiondata(nodeorrev, _df)[1]
1891 1897
1892 1898 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1893 1899 # deal with <nodeorrev> argument type
1894 1900 if isinstance(nodeorrev, int):
1895 1901 rev = nodeorrev
1896 1902 node = self.node(rev)
1897 1903 else:
1898 1904 node = nodeorrev
1899 1905 rev = None
1900 1906
1901 1907 # fast path the special `nullid` rev
1902 1908 if node == nullid:
1903 1909 return b"", {}
1904 1910
1905 1911 # ``rawtext`` is the text as stored inside the revlog. Might be the
1906 1912 # revision or might need to be processed to retrieve the revision.
1907 1913 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1908 1914
1909 1915 if self.version & 0xFFFF == REVLOGV2:
1910 1916 if rev is None:
1911 1917 rev = self.rev(node)
1912 1918 sidedata = self._sidedata(rev)
1913 1919 else:
1914 1920 sidedata = {}
1915 1921
1916 1922 if raw and validated:
1917 1923 # if we don't want to process the raw text and that raw
1918 1924 # text is cached, we can exit early.
1919 1925 return rawtext, sidedata
1920 1926 if rev is None:
1921 1927 rev = self.rev(node)
1922 1928 # the revlog's flag for this revision
1923 1929 # (usually alter its state or content)
1924 1930 flags = self.flags(rev)
1925 1931
1926 1932 if validated and flags == REVIDX_DEFAULT_FLAGS:
1927 1933 # no extra flags set, no flag processor runs, text = rawtext
1928 1934 return rawtext, sidedata
1929 1935
1930 1936 if raw:
1931 1937 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1932 1938 text = rawtext
1933 1939 else:
1934 1940 r = flagutil.processflagsread(self, rawtext, flags)
1935 1941 text, validatehash = r
1936 1942 if validatehash:
1937 1943 self.checkhash(text, node, rev=rev)
1938 1944 if not validated:
1939 1945 self._revisioncache = (node, rev, rawtext)
1940 1946
1941 1947 return text, sidedata
1942 1948
1943 1949 def _rawtext(self, node, rev, _df=None):
1944 1950 """return the possibly unvalidated rawtext for a revision
1945 1951
1946 1952 returns (rev, rawtext, validated)
1947 1953 """
1948 1954
1949 1955 # revision in the cache (could be useful to apply delta)
1950 1956 cachedrev = None
1951 1957 # An intermediate text to apply deltas to
1952 1958 basetext = None
1953 1959
1954 1960 # Check if we have the entry in cache
1955 1961 # The cache entry looks like (node, rev, rawtext)
1956 1962 if self._revisioncache:
1957 1963 if self._revisioncache[0] == node:
1958 1964 return (rev, self._revisioncache[2], True)
1959 1965 cachedrev = self._revisioncache[1]
1960 1966
1961 1967 if rev is None:
1962 1968 rev = self.rev(node)
1963 1969
1964 1970 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1965 1971 if stopped:
1966 1972 basetext = self._revisioncache[2]
1967 1973
1968 1974 # drop cache to save memory, the caller is expected to
1969 1975 # update self._revisioncache after validating the text
1970 1976 self._revisioncache = None
1971 1977
1972 1978 targetsize = None
1973 1979 rawsize = self.index[rev][2]
1974 1980 if 0 <= rawsize:
1975 1981 targetsize = 4 * rawsize
1976 1982
1977 1983 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1978 1984 if basetext is None:
1979 1985 basetext = bytes(bins[0])
1980 1986 bins = bins[1:]
1981 1987
1982 1988 rawtext = mdiff.patches(basetext, bins)
1983 1989 del basetext # let us have a chance to free memory early
1984 1990 return (rev, rawtext, False)
1985 1991
1986 1992 def _sidedata(self, rev):
1987 1993 """Return the sidedata for a given revision number."""
1988 1994 index_entry = self.index[rev]
1989 1995 sidedata_offset = index_entry[8]
1990 1996 sidedata_size = index_entry[9]
1991 1997
1992 1998 if self._inline:
1993 1999 sidedata_offset += self._io.size * (1 + rev)
1994 2000 if sidedata_size == 0:
1995 2001 return {}
1996 2002
1997 2003 segment = self._getsegment(sidedata_offset, sidedata_size)
1998 2004 sidedata = sidedatautil.deserialize_sidedata(segment)
1999 2005 return sidedata
2000 2006
2001 2007 def rawdata(self, nodeorrev, _df=None):
2002 2008 """return an uncompressed raw data of a given node or revision number.
2003 2009
2004 2010 _df - an existing file handle to read from. (internal-only)
2005 2011 """
2006 2012 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2007 2013
2008 2014 def hash(self, text, p1, p2):
2009 2015 """Compute a node hash.
2010 2016
2011 2017 Available as a function so that subclasses can replace the hash
2012 2018 as needed.
2013 2019 """
2014 2020 return storageutil.hashrevisionsha1(text, p1, p2)
2015 2021
2016 2022 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2017 2023 """Check node hash integrity.
2018 2024
2019 2025 Available as a function so that subclasses can extend hash mismatch
2020 2026 behaviors as needed.
2021 2027 """
2022 2028 try:
2023 2029 if p1 is None and p2 is None:
2024 2030 p1, p2 = self.parents(node)
2025 2031 if node != self.hash(text, p1, p2):
2026 2032 # Clear the revision cache on hash failure. The revision cache
2027 2033 # only stores the raw revision and clearing the cache does have
2028 2034 # the side-effect that we won't have a cache hit when the raw
2029 2035 # revision data is accessed. But this case should be rare and
2030 2036 # it is extra work to teach the cache about the hash
2031 2037 # verification state.
2032 2038 if self._revisioncache and self._revisioncache[0] == node:
2033 2039 self._revisioncache = None
2034 2040
2035 2041 revornode = rev
2036 2042 if revornode is None:
2037 2043 revornode = templatefilters.short(hex(node))
2038 2044 raise error.RevlogError(
2039 2045 _(b"integrity check failed on %s:%s")
2040 2046 % (self.indexfile, pycompat.bytestr(revornode))
2041 2047 )
2042 2048 except error.RevlogError:
2043 2049 if self._censorable and storageutil.iscensoredtext(text):
2044 2050 raise error.CensoredNodeError(self.indexfile, node, text)
2045 2051 raise
2046 2052
2047 2053 def _enforceinlinesize(self, tr, fp=None):
2048 2054 """Check if the revlog is too big for inline and convert if so.
2049 2055
2050 2056 This should be called after revisions are added to the revlog. If the
2051 2057 revlog has grown too large to be an inline revlog, it will convert it
2052 2058 to use multiple index and data files.
2053 2059 """
2054 2060 tiprev = len(self) - 1
2055 2061 if (
2056 2062 not self._inline
2057 2063 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
2058 2064 ):
2059 2065 return
2060 2066
2061 2067 troffset = tr.findoffset(self.indexfile)
2062 2068 if troffset is None:
2063 2069 raise error.RevlogError(
2064 2070 _(b"%s not found in the transaction") % self.indexfile
2065 2071 )
2066 2072 trindex = 0
2067 2073 tr.add(self.datafile, 0)
2068 2074
2069 2075 if fp:
2070 2076 fp.flush()
2071 2077 fp.close()
2072 2078 # We can't use the cached file handle after close(). So prevent
2073 2079 # its usage.
2074 2080 self._writinghandles = None
2075 2081
2076 2082 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
2077 2083 for r in self:
2078 2084 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
2079 2085 if troffset <= self.start(r):
2080 2086 trindex = r
2081 2087
2082 2088 with self._indexfp(b'w') as fp:
2083 2089 self.version &= ~FLAG_INLINE_DATA
2084 2090 self._inline = False
2085 2091 io = self._io
2086 2092 for i in self:
2087 2093 e = io.packentry(self.index[i], self.node, self.version, i)
2088 2094 fp.write(e)
2089 2095
2090 2096 # the temp file replace the real index when we exit the context
2091 2097 # manager
2092 2098
2093 2099 tr.replace(self.indexfile, trindex * self._io.size)
2094 2100 nodemaputil.setup_persistent_nodemap(tr, self)
2095 2101 self._chunkclear()
2096 2102
2097 2103 def _nodeduplicatecallback(self, transaction, node):
2098 2104 """called when trying to add a node already stored."""
2099 2105
2100 2106 def addrevision(
2101 2107 self,
2102 2108 text,
2103 2109 transaction,
2104 2110 link,
2105 2111 p1,
2106 2112 p2,
2107 2113 cachedelta=None,
2108 2114 node=None,
2109 2115 flags=REVIDX_DEFAULT_FLAGS,
2110 2116 deltacomputer=None,
2111 2117 sidedata=None,
2112 2118 ):
2113 2119 """add a revision to the log
2114 2120
2115 2121 text - the revision data to add
2116 2122 transaction - the transaction object used for rollback
2117 2123 link - the linkrev data to add
2118 2124 p1, p2 - the parent nodeids of the revision
2119 2125 cachedelta - an optional precomputed delta
2120 2126 node - nodeid of revision; typically node is not specified, and it is
2121 2127 computed by default as hash(text, p1, p2), however subclasses might
2122 2128 use different hashing method (and override checkhash() in such case)
2123 2129 flags - the known flags to set on the revision
2124 2130 deltacomputer - an optional deltacomputer instance shared between
2125 2131 multiple calls
2126 2132 """
2127 2133 if link == nullrev:
2128 2134 raise error.RevlogError(
2129 2135 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2130 2136 )
2131 2137
2132 2138 if sidedata is None:
2133 2139 sidedata = {}
2134 2140 elif not self.hassidedata:
2135 2141 raise error.ProgrammingError(
2136 2142 _(b"trying to add sidedata to a revlog who don't support them")
2137 2143 )
2138 2144
2139 2145 if flags:
2140 2146 node = node or self.hash(text, p1, p2)
2141 2147
2142 2148 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2143 2149
2144 2150 # If the flag processor modifies the revision data, ignore any provided
2145 2151 # cachedelta.
2146 2152 if rawtext != text:
2147 2153 cachedelta = None
2148 2154
2149 2155 if len(rawtext) > _maxentrysize:
2150 2156 raise error.RevlogError(
2151 2157 _(
2152 2158 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2153 2159 )
2154 2160 % (self.indexfile, len(rawtext))
2155 2161 )
2156 2162
2157 2163 node = node or self.hash(rawtext, p1, p2)
2158 2164 rev = self.index.get_rev(node)
2159 2165 if rev is not None:
2160 2166 return rev
2161 2167
2162 2168 if validatehash:
2163 2169 self.checkhash(rawtext, node, p1=p1, p2=p2)
2164 2170
2165 2171 return self.addrawrevision(
2166 2172 rawtext,
2167 2173 transaction,
2168 2174 link,
2169 2175 p1,
2170 2176 p2,
2171 2177 node,
2172 2178 flags,
2173 2179 cachedelta=cachedelta,
2174 2180 deltacomputer=deltacomputer,
2175 2181 sidedata=sidedata,
2176 2182 )
2177 2183
2178 2184 def addrawrevision(
2179 2185 self,
2180 2186 rawtext,
2181 2187 transaction,
2182 2188 link,
2183 2189 p1,
2184 2190 p2,
2185 2191 node,
2186 2192 flags,
2187 2193 cachedelta=None,
2188 2194 deltacomputer=None,
2189 2195 sidedata=None,
2190 2196 ):
2191 2197 """add a raw revision with known flags, node and parents
2192 2198 useful when reusing a revision not stored in this revlog (ex: received
2193 2199 over wire, or read from an external bundle).
2194 2200 """
2195 2201 dfh = None
2196 2202 if not self._inline:
2197 2203 dfh = self._datafp(b"a+")
2198 2204 ifh = self._indexfp(b"a+")
2199 2205 try:
2200 2206 return self._addrevision(
2201 2207 node,
2202 2208 rawtext,
2203 2209 transaction,
2204 2210 link,
2205 2211 p1,
2206 2212 p2,
2207 2213 flags,
2208 2214 cachedelta,
2209 2215 ifh,
2210 2216 dfh,
2211 2217 deltacomputer=deltacomputer,
2212 2218 sidedata=sidedata,
2213 2219 )
2214 2220 finally:
2215 2221 if dfh:
2216 2222 dfh.close()
2217 2223 ifh.close()
2218 2224
2219 2225 def compress(self, data):
2220 2226 """Generate a possibly-compressed representation of data."""
2221 2227 if not data:
2222 2228 return b'', data
2223 2229
2224 2230 compressed = self._compressor.compress(data)
2225 2231
2226 2232 if compressed:
2227 2233 # The revlog compressor added the header in the returned data.
2228 2234 return b'', compressed
2229 2235
2230 2236 if data[0:1] == b'\0':
2231 2237 return b'', data
2232 2238 return b'u', data
2233 2239
2234 2240 def decompress(self, data):
2235 2241 """Decompress a revlog chunk.
2236 2242
2237 2243 The chunk is expected to begin with a header identifying the
2238 2244 format type so it can be routed to an appropriate decompressor.
2239 2245 """
2240 2246 if not data:
2241 2247 return data
2242 2248
2243 2249 # Revlogs are read much more frequently than they are written and many
2244 2250 # chunks only take microseconds to decompress, so performance is
2245 2251 # important here.
2246 2252 #
2247 2253 # We can make a few assumptions about revlogs:
2248 2254 #
2249 2255 # 1) the majority of chunks will be compressed (as opposed to inline
2250 2256 # raw data).
2251 2257 # 2) decompressing *any* data will likely by at least 10x slower than
2252 2258 # returning raw inline data.
2253 2259 # 3) we want to prioritize common and officially supported compression
2254 2260 # engines
2255 2261 #
2256 2262 # It follows that we want to optimize for "decompress compressed data
2257 2263 # when encoded with common and officially supported compression engines"
2258 2264 # case over "raw data" and "data encoded by less common or non-official
2259 2265 # compression engines." That is why we have the inline lookup first
2260 2266 # followed by the compengines lookup.
2261 2267 #
2262 2268 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2263 2269 # compressed chunks. And this matters for changelog and manifest reads.
2264 2270 t = data[0:1]
2265 2271
2266 2272 if t == b'x':
2267 2273 try:
2268 2274 return _zlibdecompress(data)
2269 2275 except zlib.error as e:
2270 2276 raise error.RevlogError(
2271 2277 _(b'revlog decompress error: %s')
2272 2278 % stringutil.forcebytestr(e)
2273 2279 )
2274 2280 # '\0' is more common than 'u' so it goes first.
2275 2281 elif t == b'\0':
2276 2282 return data
2277 2283 elif t == b'u':
2278 2284 return util.buffer(data, 1)
2279 2285
2280 2286 try:
2281 2287 compressor = self._decompressors[t]
2282 2288 except KeyError:
2283 2289 try:
2284 2290 engine = util.compengines.forrevlogheader(t)
2285 2291 compressor = engine.revlogcompressor(self._compengineopts)
2286 2292 self._decompressors[t] = compressor
2287 2293 except KeyError:
2288 2294 raise error.RevlogError(_(b'unknown compression type %r') % t)
2289 2295
2290 2296 return compressor.decompress(data)
2291 2297
2292 2298 def _addrevision(
2293 2299 self,
2294 2300 node,
2295 2301 rawtext,
2296 2302 transaction,
2297 2303 link,
2298 2304 p1,
2299 2305 p2,
2300 2306 flags,
2301 2307 cachedelta,
2302 2308 ifh,
2303 2309 dfh,
2304 2310 alwayscache=False,
2305 2311 deltacomputer=None,
2306 2312 sidedata=None,
2307 2313 ):
2308 2314 """internal function to add revisions to the log
2309 2315
2310 2316 see addrevision for argument descriptions.
2311 2317
2312 2318 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2313 2319
2314 2320 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2315 2321 be used.
2316 2322
2317 2323 invariants:
2318 2324 - rawtext is optional (can be None); if not set, cachedelta must be set.
2319 2325 if both are set, they must correspond to each other.
2320 2326 """
2321 2327 if node == nullid:
2322 2328 raise error.RevlogError(
2323 2329 _(b"%s: attempt to add null revision") % self.indexfile
2324 2330 )
2325 2331 if node == wdirid or node in wdirfilenodeids:
2326 2332 raise error.RevlogError(
2327 2333 _(b"%s: attempt to add wdir revision") % self.indexfile
2328 2334 )
2329 2335
2330 2336 if self._inline:
2331 2337 fh = ifh
2332 2338 else:
2333 2339 fh = dfh
2334 2340
2335 2341 btext = [rawtext]
2336 2342
2337 2343 curr = len(self)
2338 2344 prev = curr - 1
2339 2345
2340 2346 offset = self._get_data_offset(prev)
2341 2347
2342 2348 if self._concurrencychecker:
2343 2349 if self._inline:
2344 2350 # offset is "as if" it were in the .d file, so we need to add on
2345 2351 # the size of the entry metadata.
2346 2352 self._concurrencychecker(
2347 2353 ifh, self.indexfile, offset + curr * self._io.size
2348 2354 )
2349 2355 else:
2350 2356 # Entries in the .i are a consistent size.
2351 2357 self._concurrencychecker(
2352 2358 ifh, self.indexfile, curr * self._io.size
2353 2359 )
2354 2360 self._concurrencychecker(dfh, self.datafile, offset)
2355 2361
2356 2362 p1r, p2r = self.rev(p1), self.rev(p2)
2357 2363
2358 2364 # full versions are inserted when the needed deltas
2359 2365 # become comparable to the uncompressed text
2360 2366 if rawtext is None:
2361 2367 # need rawtext size, before changed by flag processors, which is
2362 2368 # the non-raw size. use revlog explicitly to avoid filelog's extra
2363 2369 # logic that might remove metadata size.
2364 2370 textlen = mdiff.patchedsize(
2365 2371 revlog.size(self, cachedelta[0]), cachedelta[1]
2366 2372 )
2367 2373 else:
2368 2374 textlen = len(rawtext)
2369 2375
2370 2376 if deltacomputer is None:
2371 2377 deltacomputer = deltautil.deltacomputer(self)
2372 2378
2373 2379 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2374 2380
2375 2381 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2376 2382
2377 2383 if sidedata:
2378 2384 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2379 2385 sidedata_offset = offset + deltainfo.deltalen
2380 2386 else:
2381 2387 serialized_sidedata = b""
2382 2388 # Don't store the offset if the sidedata is empty, that way
2383 2389 # we can easily detect empty sidedata and they will be no different
2384 2390 # than ones we manually add.
2385 2391 sidedata_offset = 0
2386 2392
2387 2393 e = (
2388 2394 offset_type(offset, flags),
2389 2395 deltainfo.deltalen,
2390 2396 textlen,
2391 2397 deltainfo.base,
2392 2398 link,
2393 2399 p1r,
2394 2400 p2r,
2395 2401 node,
2396 2402 sidedata_offset,
2397 2403 len(serialized_sidedata),
2398 2404 )
2399 2405
2400 2406 if self.version & 0xFFFF != REVLOGV2:
2401 2407 e = e[:8]
2402 2408
2403 2409 self.index.append(e)
2404 2410 entry = self._io.packentry(e, self.node, self.version, curr)
2405 2411 self._writeentry(
2406 2412 transaction,
2407 2413 ifh,
2408 2414 dfh,
2409 2415 entry,
2410 2416 deltainfo.data,
2411 2417 link,
2412 2418 offset,
2413 2419 serialized_sidedata,
2414 2420 )
2415 2421
2416 2422 rawtext = btext[0]
2417 2423
2418 2424 if alwayscache and rawtext is None:
2419 2425 rawtext = deltacomputer.buildtext(revinfo, fh)
2420 2426
2421 2427 if type(rawtext) == bytes: # only accept immutable objects
2422 2428 self._revisioncache = (node, curr, rawtext)
2423 2429 self._chainbasecache[curr] = deltainfo.chainbase
2424 2430 return curr
2425 2431
2426 2432 def _get_data_offset(self, prev):
2427 2433 """Returns the current offset in the (in-transaction) data file.
2428 2434 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2429 2435 file to store that information: since sidedata can be rewritten to the
2430 2436 end of the data file within a transaction, you can have cases where, for
2431 2437 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2432 2438 to `n - 1`'s sidedata being written after `n`'s data.
2433 2439
2434 2440 TODO cache this in a docket file before getting out of experimental."""
2435 2441 if self.version & 0xFFFF != REVLOGV2:
2436 2442 return self.end(prev)
2437 2443
2438 2444 offset = 0
2439 2445 for rev, entry in enumerate(self.index):
2440 2446 sidedata_end = entry[8] + entry[9]
2441 2447 # Sidedata for a previous rev has potentially been written after
2442 2448 # this rev's end, so take the max.
2443 2449 offset = max(self.end(rev), offset, sidedata_end)
2444 2450 return offset
2445 2451
2446 2452 def _writeentry(
2447 2453 self, transaction, ifh, dfh, entry, data, link, offset, sidedata
2448 2454 ):
2449 2455 # Files opened in a+ mode have inconsistent behavior on various
2450 2456 # platforms. Windows requires that a file positioning call be made
2451 2457 # when the file handle transitions between reads and writes. See
2452 2458 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2453 2459 # platforms, Python or the platform itself can be buggy. Some versions
2454 2460 # of Solaris have been observed to not append at the end of the file
2455 2461 # if the file was seeked to before the end. See issue4943 for more.
2456 2462 #
2457 2463 # We work around this issue by inserting a seek() before writing.
2458 2464 # Note: This is likely not necessary on Python 3. However, because
2459 2465 # the file handle is reused for reads and may be seeked there, we need
2460 2466 # to be careful before changing this.
2461 2467 ifh.seek(0, os.SEEK_END)
2462 2468 if dfh:
2463 2469 dfh.seek(0, os.SEEK_END)
2464 2470
2465 2471 curr = len(self) - 1
2466 2472 if not self._inline:
2467 2473 transaction.add(self.datafile, offset)
2468 2474 transaction.add(self.indexfile, curr * len(entry))
2469 2475 if data[0]:
2470 2476 dfh.write(data[0])
2471 2477 dfh.write(data[1])
2472 2478 if sidedata:
2473 2479 dfh.write(sidedata)
2474 2480 ifh.write(entry)
2475 2481 else:
2476 2482 offset += curr * self._io.size
2477 2483 transaction.add(self.indexfile, offset)
2478 2484 ifh.write(entry)
2479 2485 ifh.write(data[0])
2480 2486 ifh.write(data[1])
2481 2487 if sidedata:
2482 2488 ifh.write(sidedata)
2483 2489 self._enforceinlinesize(transaction, ifh)
2484 2490 nodemaputil.setup_persistent_nodemap(transaction, self)
2485 2491
2486 2492 def addgroup(
2487 2493 self,
2488 2494 deltas,
2489 2495 linkmapper,
2490 2496 transaction,
2491 2497 alwayscache=False,
2492 2498 addrevisioncb=None,
2493 2499 duplicaterevisioncb=None,
2494 2500 ):
2495 2501 """
2496 2502 add a delta group
2497 2503
2498 2504 given a set of deltas, add them to the revision log. the
2499 2505 first delta is against its parent, which should be in our
2500 2506 log, the rest are against the previous delta.
2501 2507
2502 2508 If ``addrevisioncb`` is defined, it will be called with arguments of
2503 2509 this revlog and the node that was added.
2504 2510 """
2505 2511
2506 2512 if self._writinghandles:
2507 2513 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2508 2514
2509 2515 r = len(self)
2510 2516 end = 0
2511 2517 if r:
2512 2518 end = self.end(r - 1)
2513 2519 ifh = self._indexfp(b"a+")
2514 2520 isize = r * self._io.size
2515 2521 if self._inline:
2516 2522 transaction.add(self.indexfile, end + isize)
2517 2523 dfh = None
2518 2524 else:
2519 2525 transaction.add(self.indexfile, isize)
2520 2526 transaction.add(self.datafile, end)
2521 2527 dfh = self._datafp(b"a+")
2522 2528
2523 2529 def flush():
2524 2530 if dfh:
2525 2531 dfh.flush()
2526 2532 ifh.flush()
2527 2533
2528 2534 self._writinghandles = (ifh, dfh)
2529 2535 empty = True
2530 2536
2531 2537 try:
2532 2538 deltacomputer = deltautil.deltacomputer(self)
2533 2539 # loop through our set of deltas
2534 2540 for data in deltas:
2535 2541 node, p1, p2, linknode, deltabase, delta, flags, sidedata = data
2536 2542 link = linkmapper(linknode)
2537 2543 flags = flags or REVIDX_DEFAULT_FLAGS
2538 2544
2539 2545 rev = self.index.get_rev(node)
2540 2546 if rev is not None:
2541 2547 # this can happen if two branches make the same change
2542 2548 self._nodeduplicatecallback(transaction, rev)
2543 2549 if duplicaterevisioncb:
2544 2550 duplicaterevisioncb(self, rev)
2545 2551 empty = False
2546 2552 continue
2547 2553
2548 2554 for p in (p1, p2):
2549 2555 if not self.index.has_node(p):
2550 2556 raise error.LookupError(
2551 2557 p, self.indexfile, _(b'unknown parent')
2552 2558 )
2553 2559
2554 2560 if not self.index.has_node(deltabase):
2555 2561 raise error.LookupError(
2556 2562 deltabase, self.indexfile, _(b'unknown delta base')
2557 2563 )
2558 2564
2559 2565 baserev = self.rev(deltabase)
2560 2566
2561 2567 if baserev != nullrev and self.iscensored(baserev):
2562 2568 # if base is censored, delta must be full replacement in a
2563 2569 # single patch operation
2564 2570 hlen = struct.calcsize(b">lll")
2565 2571 oldlen = self.rawsize(baserev)
2566 2572 newlen = len(delta) - hlen
2567 2573 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2568 2574 raise error.CensoredBaseError(
2569 2575 self.indexfile, self.node(baserev)
2570 2576 )
2571 2577
2572 2578 if not flags and self._peek_iscensored(baserev, delta, flush):
2573 2579 flags |= REVIDX_ISCENSORED
2574 2580
2575 2581 # We assume consumers of addrevisioncb will want to retrieve
2576 2582 # the added revision, which will require a call to
2577 2583 # revision(). revision() will fast path if there is a cache
2578 2584 # hit. So, we tell _addrevision() to always cache in this case.
2579 2585 # We're only using addgroup() in the context of changegroup
2580 2586 # generation so the revision data can always be handled as raw
2581 2587 # by the flagprocessor.
2582 2588 rev = self._addrevision(
2583 2589 node,
2584 2590 None,
2585 2591 transaction,
2586 2592 link,
2587 2593 p1,
2588 2594 p2,
2589 2595 flags,
2590 2596 (baserev, delta),
2591 2597 ifh,
2592 2598 dfh,
2593 2599 alwayscache=alwayscache,
2594 2600 deltacomputer=deltacomputer,
2595 2601 sidedata=sidedata,
2596 2602 )
2597 2603
2598 2604 if addrevisioncb:
2599 2605 addrevisioncb(self, rev)
2600 2606 empty = False
2601 2607
2602 2608 if not dfh and not self._inline:
2603 2609 # addrevision switched from inline to conventional
2604 2610 # reopen the index
2605 2611 ifh.close()
2606 2612 dfh = self._datafp(b"a+")
2607 2613 ifh = self._indexfp(b"a+")
2608 2614 self._writinghandles = (ifh, dfh)
2609 2615 finally:
2610 2616 self._writinghandles = None
2611 2617
2612 2618 if dfh:
2613 2619 dfh.close()
2614 2620 ifh.close()
2615 2621 return not empty
2616 2622
2617 2623 def iscensored(self, rev):
2618 2624 """Check if a file revision is censored."""
2619 2625 if not self._censorable:
2620 2626 return False
2621 2627
2622 2628 return self.flags(rev) & REVIDX_ISCENSORED
2623 2629
2624 2630 def _peek_iscensored(self, baserev, delta, flush):
2625 2631 """Quickly check if a delta produces a censored revision."""
2626 2632 if not self._censorable:
2627 2633 return False
2628 2634
2629 2635 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2630 2636
2631 2637 def getstrippoint(self, minlink):
2632 2638 """find the minimum rev that must be stripped to strip the linkrev
2633 2639
2634 2640 Returns a tuple containing the minimum rev and a set of all revs that
2635 2641 have linkrevs that will be broken by this strip.
2636 2642 """
2637 2643 return storageutil.resolvestripinfo(
2638 2644 minlink,
2639 2645 len(self) - 1,
2640 2646 self.headrevs(),
2641 2647 self.linkrev,
2642 2648 self.parentrevs,
2643 2649 )
2644 2650
2645 2651 def strip(self, minlink, transaction):
2646 2652 """truncate the revlog on the first revision with a linkrev >= minlink
2647 2653
2648 2654 This function is called when we're stripping revision minlink and
2649 2655 its descendants from the repository.
2650 2656
2651 2657 We have to remove all revisions with linkrev >= minlink, because
2652 2658 the equivalent changelog revisions will be renumbered after the
2653 2659 strip.
2654 2660
2655 2661 So we truncate the revlog on the first of these revisions, and
2656 2662 trust that the caller has saved the revisions that shouldn't be
2657 2663 removed and that it'll re-add them after this truncation.
2658 2664 """
2659 2665 if len(self) == 0:
2660 2666 return
2661 2667
2662 2668 rev, _ = self.getstrippoint(minlink)
2663 2669 if rev == len(self):
2664 2670 return
2665 2671
2666 2672 # first truncate the files on disk
2667 2673 end = self.start(rev)
2668 2674 if not self._inline:
2669 2675 transaction.add(self.datafile, end)
2670 2676 end = rev * self._io.size
2671 2677 else:
2672 2678 end += rev * self._io.size
2673 2679
2674 2680 transaction.add(self.indexfile, end)
2675 2681
2676 2682 # then reset internal state in memory to forget those revisions
2677 2683 self._revisioncache = None
2678 2684 self._chaininfocache = util.lrucachedict(500)
2679 2685 self._chunkclear()
2680 2686
2681 2687 del self.index[rev:-1]
2682 2688
2683 2689 def checksize(self):
2684 2690 """Check size of index and data files
2685 2691
2686 2692 return a (dd, di) tuple.
2687 2693 - dd: extra bytes for the "data" file
2688 2694 - di: extra bytes for the "index" file
2689 2695
2690 2696 A healthy revlog will return (0, 0).
2691 2697 """
2692 2698 expected = 0
2693 2699 if len(self):
2694 2700 expected = max(0, self.end(len(self) - 1))
2695 2701
2696 2702 try:
2697 2703 with self._datafp() as f:
2698 2704 f.seek(0, io.SEEK_END)
2699 2705 actual = f.tell()
2700 2706 dd = actual - expected
2701 2707 except IOError as inst:
2702 2708 if inst.errno != errno.ENOENT:
2703 2709 raise
2704 2710 dd = 0
2705 2711
2706 2712 try:
2707 2713 f = self.opener(self.indexfile)
2708 2714 f.seek(0, io.SEEK_END)
2709 2715 actual = f.tell()
2710 2716 f.close()
2711 2717 s = self._io.size
2712 2718 i = max(0, actual // s)
2713 2719 di = actual - (i * s)
2714 2720 if self._inline:
2715 2721 databytes = 0
2716 2722 for r in self:
2717 2723 databytes += max(0, self.length(r))
2718 2724 dd = 0
2719 2725 di = actual - len(self) * s - databytes
2720 2726 except IOError as inst:
2721 2727 if inst.errno != errno.ENOENT:
2722 2728 raise
2723 2729 di = 0
2724 2730
2725 2731 return (dd, di)
2726 2732
2727 2733 def files(self):
2728 2734 res = [self.indexfile]
2729 2735 if not self._inline:
2730 2736 res.append(self.datafile)
2731 2737 return res
2732 2738
2733 2739 def emitrevisions(
2734 2740 self,
2735 2741 nodes,
2736 2742 nodesorder=None,
2737 2743 revisiondata=False,
2738 2744 assumehaveparentrevisions=False,
2739 2745 deltamode=repository.CG_DELTAMODE_STD,
2740 2746 sidedata_helpers=None,
2741 2747 ):
2742 2748 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2743 2749 raise error.ProgrammingError(
2744 2750 b'unhandled value for nodesorder: %s' % nodesorder
2745 2751 )
2746 2752
2747 2753 if nodesorder is None and not self._generaldelta:
2748 2754 nodesorder = b'storage'
2749 2755
2750 2756 if (
2751 2757 not self._storedeltachains
2752 2758 and deltamode != repository.CG_DELTAMODE_PREV
2753 2759 ):
2754 2760 deltamode = repository.CG_DELTAMODE_FULL
2755 2761
2756 2762 return storageutil.emitrevisions(
2757 2763 self,
2758 2764 nodes,
2759 2765 nodesorder,
2760 2766 revlogrevisiondelta,
2761 2767 deltaparentfn=self.deltaparent,
2762 2768 candeltafn=self.candelta,
2763 2769 rawsizefn=self.rawsize,
2764 2770 revdifffn=self.revdiff,
2765 2771 flagsfn=self.flags,
2766 2772 deltamode=deltamode,
2767 2773 revisiondata=revisiondata,
2768 2774 assumehaveparentrevisions=assumehaveparentrevisions,
2769 2775 sidedata_helpers=sidedata_helpers,
2770 2776 )
2771 2777
2772 2778 DELTAREUSEALWAYS = b'always'
2773 2779 DELTAREUSESAMEREVS = b'samerevs'
2774 2780 DELTAREUSENEVER = b'never'
2775 2781
2776 2782 DELTAREUSEFULLADD = b'fulladd'
2777 2783
2778 2784 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2779 2785
2780 2786 def clone(
2781 2787 self,
2782 2788 tr,
2783 2789 destrevlog,
2784 2790 addrevisioncb=None,
2785 2791 deltareuse=DELTAREUSESAMEREVS,
2786 2792 forcedeltabothparents=None,
2787 2793 sidedatacompanion=None,
2788 2794 ):
2789 2795 """Copy this revlog to another, possibly with format changes.
2790 2796
2791 2797 The destination revlog will contain the same revisions and nodes.
2792 2798 However, it may not be bit-for-bit identical due to e.g. delta encoding
2793 2799 differences.
2794 2800
2795 2801 The ``deltareuse`` argument control how deltas from the existing revlog
2796 2802 are preserved in the destination revlog. The argument can have the
2797 2803 following values:
2798 2804
2799 2805 DELTAREUSEALWAYS
2800 2806 Deltas will always be reused (if possible), even if the destination
2801 2807 revlog would not select the same revisions for the delta. This is the
2802 2808 fastest mode of operation.
2803 2809 DELTAREUSESAMEREVS
2804 2810 Deltas will be reused if the destination revlog would pick the same
2805 2811 revisions for the delta. This mode strikes a balance between speed
2806 2812 and optimization.
2807 2813 DELTAREUSENEVER
2808 2814 Deltas will never be reused. This is the slowest mode of execution.
2809 2815 This mode can be used to recompute deltas (e.g. if the diff/delta
2810 2816 algorithm changes).
2811 2817 DELTAREUSEFULLADD
2812 2818 Revision will be re-added as if their were new content. This is
2813 2819 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2814 2820 eg: large file detection and handling.
2815 2821
2816 2822 Delta computation can be slow, so the choice of delta reuse policy can
2817 2823 significantly affect run time.
2818 2824
2819 2825 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2820 2826 two extremes. Deltas will be reused if they are appropriate. But if the
2821 2827 delta could choose a better revision, it will do so. This means if you
2822 2828 are converting a non-generaldelta revlog to a generaldelta revlog,
2823 2829 deltas will be recomputed if the delta's parent isn't a parent of the
2824 2830 revision.
2825 2831
2826 2832 In addition to the delta policy, the ``forcedeltabothparents``
2827 2833 argument controls whether to force compute deltas against both parents
2828 2834 for merges. By default, the current default is used.
2829 2835
2830 2836 If not None, the `sidedatacompanion` is callable that accept two
2831 2837 arguments:
2832 2838
2833 2839 (srcrevlog, rev)
2834 2840
2835 2841 and return a quintet that control changes to sidedata content from the
2836 2842 old revision to the new clone result:
2837 2843
2838 2844 (dropall, filterout, update, new_flags, dropped_flags)
2839 2845
2840 2846 * if `dropall` is True, all sidedata should be dropped
2841 2847 * `filterout` is a set of sidedata keys that should be dropped
2842 2848 * `update` is a mapping of additionnal/new key -> value
2843 2849 * new_flags is a bitfields of new flags that the revision should get
2844 2850 * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
2845 2851 """
2846 2852 if deltareuse not in self.DELTAREUSEALL:
2847 2853 raise ValueError(
2848 2854 _(b'value for deltareuse invalid: %s') % deltareuse
2849 2855 )
2850 2856
2851 2857 if len(destrevlog):
2852 2858 raise ValueError(_(b'destination revlog is not empty'))
2853 2859
2854 2860 if getattr(self, 'filteredrevs', None):
2855 2861 raise ValueError(_(b'source revlog has filtered revisions'))
2856 2862 if getattr(destrevlog, 'filteredrevs', None):
2857 2863 raise ValueError(_(b'destination revlog has filtered revisions'))
2858 2864
2859 2865 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2860 2866 # if possible.
2861 2867 oldlazydelta = destrevlog._lazydelta
2862 2868 oldlazydeltabase = destrevlog._lazydeltabase
2863 2869 oldamd = destrevlog._deltabothparents
2864 2870
2865 2871 try:
2866 2872 if deltareuse == self.DELTAREUSEALWAYS:
2867 2873 destrevlog._lazydeltabase = True
2868 2874 destrevlog._lazydelta = True
2869 2875 elif deltareuse == self.DELTAREUSESAMEREVS:
2870 2876 destrevlog._lazydeltabase = False
2871 2877 destrevlog._lazydelta = True
2872 2878 elif deltareuse == self.DELTAREUSENEVER:
2873 2879 destrevlog._lazydeltabase = False
2874 2880 destrevlog._lazydelta = False
2875 2881
2876 2882 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2877 2883
2878 2884 self._clone(
2879 2885 tr,
2880 2886 destrevlog,
2881 2887 addrevisioncb,
2882 2888 deltareuse,
2883 2889 forcedeltabothparents,
2884 2890 sidedatacompanion,
2885 2891 )
2886 2892
2887 2893 finally:
2888 2894 destrevlog._lazydelta = oldlazydelta
2889 2895 destrevlog._lazydeltabase = oldlazydeltabase
2890 2896 destrevlog._deltabothparents = oldamd
2891 2897
2892 2898 def _clone(
2893 2899 self,
2894 2900 tr,
2895 2901 destrevlog,
2896 2902 addrevisioncb,
2897 2903 deltareuse,
2898 2904 forcedeltabothparents,
2899 2905 sidedatacompanion,
2900 2906 ):
2901 2907 """perform the core duty of `revlog.clone` after parameter processing"""
2902 2908 deltacomputer = deltautil.deltacomputer(destrevlog)
2903 2909 index = self.index
2904 2910 for rev in self:
2905 2911 entry = index[rev]
2906 2912
2907 2913 # Some classes override linkrev to take filtered revs into
2908 2914 # account. Use raw entry from index.
2909 2915 flags = entry[0] & 0xFFFF
2910 2916 linkrev = entry[4]
2911 2917 p1 = index[entry[5]][7]
2912 2918 p2 = index[entry[6]][7]
2913 2919 node = entry[7]
2914 2920
2915 2921 sidedataactions = (False, [], {}, 0, 0)
2916 2922 if sidedatacompanion is not None:
2917 2923 sidedataactions = sidedatacompanion(self, rev)
2918 2924
2919 2925 # (Possibly) reuse the delta from the revlog if allowed and
2920 2926 # the revlog chunk is a delta.
2921 2927 cachedelta = None
2922 2928 rawtext = None
2923 2929 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2924 2930 dropall = sidedataactions[0]
2925 2931 filterout = sidedataactions[1]
2926 2932 update = sidedataactions[2]
2927 2933 new_flags = sidedataactions[3]
2928 2934 dropped_flags = sidedataactions[4]
2929 2935 text, sidedata = self._revisiondata(rev)
2930 2936 if dropall:
2931 2937 sidedata = {}
2932 2938 for key in filterout:
2933 2939 sidedata.pop(key, None)
2934 2940 sidedata.update(update)
2935 2941 if not sidedata:
2936 2942 sidedata = None
2937 2943
2938 2944 flags |= new_flags
2939 2945 flags &= ~dropped_flags
2940 2946
2941 2947 destrevlog.addrevision(
2942 2948 text,
2943 2949 tr,
2944 2950 linkrev,
2945 2951 p1,
2946 2952 p2,
2947 2953 cachedelta=cachedelta,
2948 2954 node=node,
2949 2955 flags=flags,
2950 2956 deltacomputer=deltacomputer,
2951 2957 sidedata=sidedata,
2952 2958 )
2953 2959 else:
2954 2960 if destrevlog._lazydelta:
2955 2961 dp = self.deltaparent(rev)
2956 2962 if dp != nullrev:
2957 2963 cachedelta = (dp, bytes(self._chunk(rev)))
2958 2964
2959 2965 if not cachedelta:
2960 2966 rawtext = self.rawdata(rev)
2961 2967
2962 2968 ifh = destrevlog.opener(
2963 2969 destrevlog.indexfile, b'a+', checkambig=False
2964 2970 )
2965 2971 dfh = None
2966 2972 if not destrevlog._inline:
2967 2973 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2968 2974 try:
2969 2975 destrevlog._addrevision(
2970 2976 node,
2971 2977 rawtext,
2972 2978 tr,
2973 2979 linkrev,
2974 2980 p1,
2975 2981 p2,
2976 2982 flags,
2977 2983 cachedelta,
2978 2984 ifh,
2979 2985 dfh,
2980 2986 deltacomputer=deltacomputer,
2981 2987 )
2982 2988 finally:
2983 2989 if dfh:
2984 2990 dfh.close()
2985 2991 ifh.close()
2986 2992
2987 2993 if addrevisioncb:
2988 2994 addrevisioncb(self, rev, node)
2989 2995
2990 2996 def censorrevision(self, tr, censornode, tombstone=b''):
2991 2997 if (self.version & 0xFFFF) == REVLOGV0:
2992 2998 raise error.RevlogError(
2993 2999 _(b'cannot censor with version %d revlogs') % self.version
2994 3000 )
2995 3001
2996 3002 censorrev = self.rev(censornode)
2997 3003 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2998 3004
2999 3005 if len(tombstone) > self.rawsize(censorrev):
3000 3006 raise error.Abort(
3001 3007 _(b'censor tombstone must be no longer than censored data')
3002 3008 )
3003 3009
3004 3010 # Rewriting the revlog in place is hard. Our strategy for censoring is
3005 3011 # to create a new revlog, copy all revisions to it, then replace the
3006 3012 # revlogs on transaction close.
3007 3013
3008 3014 newindexfile = self.indexfile + b'.tmpcensored'
3009 3015 newdatafile = self.datafile + b'.tmpcensored'
3010 3016
3011 3017 # This is a bit dangerous. We could easily have a mismatch of state.
3012 3018 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
3013 3019 newrl.version = self.version
3014 3020 newrl._generaldelta = self._generaldelta
3015 3021 newrl._io = self._io
3016 3022
3017 3023 for rev in self.revs():
3018 3024 node = self.node(rev)
3019 3025 p1, p2 = self.parents(node)
3020 3026
3021 3027 if rev == censorrev:
3022 3028 newrl.addrawrevision(
3023 3029 tombstone,
3024 3030 tr,
3025 3031 self.linkrev(censorrev),
3026 3032 p1,
3027 3033 p2,
3028 3034 censornode,
3029 3035 REVIDX_ISCENSORED,
3030 3036 )
3031 3037
3032 3038 if newrl.deltaparent(rev) != nullrev:
3033 3039 raise error.Abort(
3034 3040 _(
3035 3041 b'censored revision stored as delta; '
3036 3042 b'cannot censor'
3037 3043 ),
3038 3044 hint=_(
3039 3045 b'censoring of revlogs is not '
3040 3046 b'fully implemented; please report '
3041 3047 b'this bug'
3042 3048 ),
3043 3049 )
3044 3050 continue
3045 3051
3046 3052 if self.iscensored(rev):
3047 3053 if self.deltaparent(rev) != nullrev:
3048 3054 raise error.Abort(
3049 3055 _(
3050 3056 b'cannot censor due to censored '
3051 3057 b'revision having delta stored'
3052 3058 )
3053 3059 )
3054 3060 rawtext = self._chunk(rev)
3055 3061 else:
3056 3062 rawtext = self.rawdata(rev)
3057 3063
3058 3064 newrl.addrawrevision(
3059 3065 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3060 3066 )
3061 3067
3062 3068 tr.addbackup(self.indexfile, location=b'store')
3063 3069 if not self._inline:
3064 3070 tr.addbackup(self.datafile, location=b'store')
3065 3071
3066 3072 self.opener.rename(newrl.indexfile, self.indexfile)
3067 3073 if not self._inline:
3068 3074 self.opener.rename(newrl.datafile, self.datafile)
3069 3075
3070 3076 self.clearcaches()
3071 3077 self._loadindex()
3072 3078
3073 3079 def verifyintegrity(self, state):
3074 3080 """Verifies the integrity of the revlog.
3075 3081
3076 3082 Yields ``revlogproblem`` instances describing problems that are
3077 3083 found.
3078 3084 """
3079 3085 dd, di = self.checksize()
3080 3086 if dd:
3081 3087 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3082 3088 if di:
3083 3089 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3084 3090
3085 3091 version = self.version & 0xFFFF
3086 3092
3087 3093 # The verifier tells us what version revlog we should be.
3088 3094 if version != state[b'expectedversion']:
3089 3095 yield revlogproblem(
3090 3096 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3091 3097 % (self.indexfile, version, state[b'expectedversion'])
3092 3098 )
3093 3099
3094 3100 state[b'skipread'] = set()
3095 3101 state[b'safe_renamed'] = set()
3096 3102
3097 3103 for rev in self:
3098 3104 node = self.node(rev)
3099 3105
3100 3106 # Verify contents. 4 cases to care about:
3101 3107 #
3102 3108 # common: the most common case
3103 3109 # rename: with a rename
3104 3110 # meta: file content starts with b'\1\n', the metadata
3105 3111 # header defined in filelog.py, but without a rename
3106 3112 # ext: content stored externally
3107 3113 #
3108 3114 # More formally, their differences are shown below:
3109 3115 #
3110 3116 # | common | rename | meta | ext
3111 3117 # -------------------------------------------------------
3112 3118 # flags() | 0 | 0 | 0 | not 0
3113 3119 # renamed() | False | True | False | ?
3114 3120 # rawtext[0:2]=='\1\n'| False | True | True | ?
3115 3121 #
3116 3122 # "rawtext" means the raw text stored in revlog data, which
3117 3123 # could be retrieved by "rawdata(rev)". "text"
3118 3124 # mentioned below is "revision(rev)".
3119 3125 #
3120 3126 # There are 3 different lengths stored physically:
3121 3127 # 1. L1: rawsize, stored in revlog index
3122 3128 # 2. L2: len(rawtext), stored in revlog data
3123 3129 # 3. L3: len(text), stored in revlog data if flags==0, or
3124 3130 # possibly somewhere else if flags!=0
3125 3131 #
3126 3132 # L1 should be equal to L2. L3 could be different from them.
3127 3133 # "text" may or may not affect commit hash depending on flag
3128 3134 # processors (see flagutil.addflagprocessor).
3129 3135 #
3130 3136 # | common | rename | meta | ext
3131 3137 # -------------------------------------------------
3132 3138 # rawsize() | L1 | L1 | L1 | L1
3133 3139 # size() | L1 | L2-LM | L1(*) | L1 (?)
3134 3140 # len(rawtext) | L2 | L2 | L2 | L2
3135 3141 # len(text) | L2 | L2 | L2 | L3
3136 3142 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3137 3143 #
3138 3144 # LM: length of metadata, depending on rawtext
3139 3145 # (*): not ideal, see comment in filelog.size
3140 3146 # (?): could be "- len(meta)" if the resolved content has
3141 3147 # rename metadata
3142 3148 #
3143 3149 # Checks needed to be done:
3144 3150 # 1. length check: L1 == L2, in all cases.
3145 3151 # 2. hash check: depending on flag processor, we may need to
3146 3152 # use either "text" (external), or "rawtext" (in revlog).
3147 3153
3148 3154 try:
3149 3155 skipflags = state.get(b'skipflags', 0)
3150 3156 if skipflags:
3151 3157 skipflags &= self.flags(rev)
3152 3158
3153 3159 _verify_revision(self, skipflags, state, node)
3154 3160
3155 3161 l1 = self.rawsize(rev)
3156 3162 l2 = len(self.rawdata(node))
3157 3163
3158 3164 if l1 != l2:
3159 3165 yield revlogproblem(
3160 3166 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3161 3167 node=node,
3162 3168 )
3163 3169
3164 3170 except error.CensoredNodeError:
3165 3171 if state[b'erroroncensored']:
3166 3172 yield revlogproblem(
3167 3173 error=_(b'censored file data'), node=node
3168 3174 )
3169 3175 state[b'skipread'].add(node)
3170 3176 except Exception as e:
3171 3177 yield revlogproblem(
3172 3178 error=_(b'unpacking %s: %s')
3173 3179 % (short(node), stringutil.forcebytestr(e)),
3174 3180 node=node,
3175 3181 )
3176 3182 state[b'skipread'].add(node)
3177 3183
3178 3184 def storageinfo(
3179 3185 self,
3180 3186 exclusivefiles=False,
3181 3187 sharedfiles=False,
3182 3188 revisionscount=False,
3183 3189 trackedsize=False,
3184 3190 storedsize=False,
3185 3191 ):
3186 3192 d = {}
3187 3193
3188 3194 if exclusivefiles:
3189 3195 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3190 3196 if not self._inline:
3191 3197 d[b'exclusivefiles'].append((self.opener, self.datafile))
3192 3198
3193 3199 if sharedfiles:
3194 3200 d[b'sharedfiles'] = []
3195 3201
3196 3202 if revisionscount:
3197 3203 d[b'revisionscount'] = len(self)
3198 3204
3199 3205 if trackedsize:
3200 3206 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3201 3207
3202 3208 if storedsize:
3203 3209 d[b'storedsize'] = sum(
3204 3210 self.opener.stat(path).st_size for path in self.files()
3205 3211 )
3206 3212
3207 3213 return d
3208 3214
3209 3215 def rewrite_sidedata(self, helpers, startrev, endrev):
3210 3216 if self.version & 0xFFFF != REVLOGV2:
3211 3217 return
3212 3218 # inline are not yet supported because they suffer from an issue when
3213 3219 # rewriting them (since it's not an append-only operation).
3214 3220 # See issue6485.
3215 3221 assert not self._inline
3216 3222 if not helpers[1] and not helpers[2]:
3217 3223 # Nothing to generate or remove
3218 3224 return
3219 3225
3220 3226 new_entries = []
3221 3227 # append the new sidedata
3222 3228 with self._datafp(b'a+') as fp:
3223 3229 # Maybe this bug still exists, see revlog._writeentry
3224 3230 fp.seek(0, os.SEEK_END)
3225 3231 current_offset = fp.tell()
3226 3232 for rev in range(startrev, endrev + 1):
3227 3233 entry = self.index[rev]
3228 3234 new_sidedata = storageutil.run_sidedata_helpers(
3229 3235 store=self,
3230 3236 sidedata_helpers=helpers,
3231 3237 sidedata={},
3232 3238 rev=rev,
3233 3239 )
3234 3240
3235 3241 serialized_sidedata = sidedatautil.serialize_sidedata(
3236 3242 new_sidedata
3237 3243 )
3238 3244 if entry[8] != 0 or entry[9] != 0:
3239 3245 # rewriting entries that already have sidedata is not
3240 3246 # supported yet, because it introduces garbage data in the
3241 3247 # revlog.
3242 3248 msg = "Rewriting existing sidedata is not supported yet"
3243 3249 raise error.Abort(msg)
3244 3250 entry = entry[:8]
3245 3251 entry += (current_offset, len(serialized_sidedata))
3246 3252
3247 3253 fp.write(serialized_sidedata)
3248 3254 new_entries.append(entry)
3249 3255 current_offset += len(serialized_sidedata)
3250 3256
3251 3257 # rewrite the new index entries
3252 3258 with self._indexfp(b'w+') as fp:
3253 3259 fp.seek(startrev * self._io.size)
3254 3260 for i, entry in enumerate(new_entries):
3255 3261 rev = startrev + i
3256 3262 self.index.replace_sidedata_info(rev, entry[8], entry[9])
3257 3263 packed = self._io.packentry(entry, self.node, self.version, rev)
3258 3264 fp.write(packed)
@@ -1,38 +1,45 b''
1 1 == New Features ==
2 2
3 3 * `hg purge` is now a core command using `--confirm` by default.
4 4
5 5 * The `rev-branch-cache` is now updated incrementally whenever changesets
6 6 are added.
7 7
8 8 * The new options `experimental.bundlecompthreads` and
9 9 `experimental.bundlecompthreads.<engine>` can be used to instruct
10 10 the compression engines for bundle operations to use multiple threads
11 11 for compression. The default is single threaded operation. Currently
12 12 only supported for zstd.
13 13
14 14 == New Experimental Features ==
15 15
16 16 * There's a new `diff.merge` config option to show the changes
17 17 relative to an automerge for merge changesets. This makes it
18 18 easier to detect and review manual changes performed in merge
19 19 changesets. It is supported by `hg diff --change`, `hg log -p`
20 20 `hg incoming -p`, and `hg outgoing -p` so far.
21 21
22 22
23 23 == Bug Fixes ==
24 24
25 25
26 26
27 27 == Backwards Compatibility Changes ==
28 28
29 * In normal repositories, the first parent of a changeset is not null,
30 unless both parents are null (like the first changeset). Some legacy
31 repositories violate this condition. The revlog code will now
32 silentely swap the parents if this condition is tested. This can
33 change the output of `hg log` when explicitly asking for first or
34 second parent.
35
29 36
30 37 == Internal API Changes ==
31 38
32 39 * `changelog.branchinfo` is deprecated and will be removed after 5.8.
33 40 It is superseded by `changelogrevision.branchinfo`.
34 41
35 42 * Callbacks for revlog.addgroup and the changelog._nodeduplicatecallback hook
36 43 now get a revision number as argument instead of a node.
37 44
38 45 * revlog.addrevision returns the revision number instead of the node.
@@ -1,347 +1,347 b''
1 1 #require no-reposimplestore
2 2
3 3 $ . "$TESTDIR/narrow-library.sh"
4 4
5 5 create full repo
6 6
7 7 $ hg init master
8 8 $ cd master
9 9 $ cat >> .hg/hgrc <<EOF
10 10 > [narrow]
11 11 > serveellipses=True
12 12 > EOF
13 13
14 14 $ mkdir inside
15 15 $ echo 1 > inside/f
16 16 $ hg commit -Aqm 'initial inside'
17 17
18 18 $ mkdir outside
19 19 $ echo 1 > outside/f
20 20 $ hg commit -Aqm 'initial outside'
21 21
22 22 $ echo 2a > outside/f
23 23 $ hg commit -Aqm 'outside 2a'
24 24 $ echo 3 > inside/f
25 25 $ hg commit -Aqm 'inside 3'
26 26 $ echo 4a > outside/f
27 27 $ hg commit -Aqm 'outside 4a'
28 28 $ hg update '.~3'
29 29 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 $ echo 2b > outside/f
32 32 $ hg commit -Aqm 'outside 2b'
33 33 $ echo 3 > inside/f
34 34 $ hg commit -Aqm 'inside 3'
35 35 $ echo 4b > outside/f
36 36 $ hg commit -Aqm 'outside 4b'
37 37 $ hg update '.~3'
38 38 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 39
40 40 $ echo 2c > outside/f
41 41 $ hg commit -Aqm 'outside 2c'
42 42 $ echo 3 > inside/f
43 43 $ hg commit -Aqm 'inside 3'
44 44 $ echo 4c > outside/f
45 45 $ hg commit -Aqm 'outside 4c'
46 46 $ hg update '.~3'
47 47 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 48
49 49 $ echo 2d > outside/f
50 50 $ hg commit -Aqm 'outside 2d'
51 51 $ echo 3 > inside/f
52 52 $ hg commit -Aqm 'inside 3'
53 53 $ echo 4d > outside/f
54 54 $ hg commit -Aqm 'outside 4d'
55 55
56 56 $ hg update -r 'desc("outside 4a")'
57 57 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 58 $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
59 59 merging outside/f
60 60 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
61 61 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
62 62 $ echo 5 > outside/f
63 63 $ rm outside/f.orig
64 64 $ hg resolve --mark outside/f
65 65 (no more unresolved files)
66 66 $ hg commit -m 'merge a/b 5'
67 67 $ echo 6 > outside/f
68 68 $ hg commit -Aqm 'outside 6'
69 69
70 70 $ hg merge -r 'desc("outside 4c")' 2>&1 | egrep -v '(warning:|incomplete!)'
71 71 merging outside/f
72 72 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
73 73 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
74 74 $ echo 7 > outside/f
75 75 $ rm outside/f.orig
76 76 $ hg resolve --mark outside/f
77 77 (no more unresolved files)
78 78 $ hg commit -Aqm 'merge a/b/c 7'
79 79 $ echo 8 > outside/f
80 80 $ hg commit -Aqm 'outside 8'
81 81
82 82 $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
83 83 merging outside/f
84 84 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
85 85 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
86 86 $ echo 9 > outside/f
87 87 $ rm outside/f.orig
88 88 $ hg resolve --mark outside/f
89 89 (no more unresolved files)
90 90 $ hg commit -Aqm 'merge a/b/c/d 9'
91 91 $ echo 10 > outside/f
92 92 $ hg commit -Aqm 'outside 10'
93 93
94 94 $ echo 11 > inside/f
95 95 $ hg commit -Aqm 'inside 11'
96 96 $ echo 12 > outside/f
97 97 $ hg commit -Aqm 'outside 12'
98 98
99 99 $ hg log -G -T '{rev} {node|short} {desc}\n'
100 100 @ 21 8d874d57adea outside 12
101 101 |
102 102 o 20 7ef88b4dd4fa inside 11
103 103 |
104 104 o 19 2a20009de83e outside 10
105 105 |
106 106 o 18 3ac1f5779de3 merge a/b/c/d 9
107 107 |\
108 108 | o 17 38a9c2f7e546 outside 8
109 109 | |
110 110 | o 16 094aa62fc898 merge a/b/c 7
111 111 | |\
112 112 | | o 15 f29d083d32e4 outside 6
113 113 | | |
114 114 | | o 14 2dc11382541d merge a/b 5
115 115 | | |\
116 116 o | | | 13 27d07ef97221 outside 4d
117 117 | | | |
118 118 o | | | 12 465567bdfb2d inside 3
119 119 | | | |
120 120 o | | | 11 d1c61993ec83 outside 2d
121 121 | | | |
122 122 | o | | 10 56859a8e33b9 outside 4c
123 123 | | | |
124 124 | o | | 9 bb96a08b062a inside 3
125 125 | | | |
126 126 | o | | 8 b844052e7b3b outside 2c
127 127 |/ / /
128 128 | | o 7 9db2d8fcc2a6 outside 4b
129 129 | | |
130 130 | | o 6 6418167787a6 inside 3
131 131 | | |
132 132 +---o 5 77344f344d83 outside 2b
133 133 | |
134 134 | o 4 9cadde08dc9f outside 4a
135 135 | |
136 136 | o 3 019ef06f125b inside 3
137 137 | |
138 138 | o 2 75e40c075a19 outside 2a
139 139 |/
140 140 o 1 906d6c682641 initial outside
141 141 |
142 142 o 0 9f8e82b51004 initial inside
143 143
144 144
145 145 Now narrow and shallow clone this and get a hopefully correct graph
146 146
147 147 $ cd ..
148 148 $ hg clone --narrow ssh://user@dummy/master narrow --include inside --depth 7
149 149 requesting all changes
150 150 adding changesets
151 151 adding manifests
152 152 adding file changes
153 153 added 8 changesets with 3 changes to 1 files
154 154 new changesets *:* (glob)
155 155 updating to branch default
156 156 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 157 $ cd narrow
158 158
159 159 To make updating the tests easier, we print the emitted nodes
160 160 sorted. This makes it easier to identify when the same node structure
161 161 has been emitted, just in a different order.
162 162
163 163 $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n'
164 164 @ 7 8d874d57adea... outside 12
165 165 |
166 166 o 6 7ef88b4dd4fa inside 11
167 167 |
168 168 o 5 2a20009de83e... outside 10
169 169 |
170 170 o 4 3ac1f5779de3... merge a/b/c/d 9
171 171 |\
172 172 | o 3 465567bdfb2d inside 3
173 173 | |
174 174 | o 2 d1c61993ec83... outside 2d
175 175 |
176 176 o 1 bb96a08b062a inside 3
177 177 |
178 178 o 0 b844052e7b3b... outside 2c
179 179
180 180
181 181 $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
182 ...2a20009de83e 000000000000 3ac1f5779de3 outside 10
182 ...2a20009de83e 3ac1f5779de3 000000000000 outside 10
183 183 ...3ac1f5779de3 bb96a08b062a 465567bdfb2d merge a/b/c/d 9
184 184 ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12
185 185 ...b844052e7b3b 000000000000 000000000000 outside 2c
186 186 ...d1c61993ec83 000000000000 000000000000 outside 2d
187 187 465567bdfb2d d1c61993ec83 000000000000 inside 3
188 188 7ef88b4dd4fa 2a20009de83e 000000000000 inside 11
189 189 bb96a08b062a b844052e7b3b 000000000000 inside 3
190 190
191 191 $ cd ..
192 192
193 193 Incremental test case: show a pull can pull in a conflicted merge even if elided
194 194
195 195 $ hg init pullmaster
196 196 $ cd pullmaster
197 197 $ cat >> .hg/hgrc <<EOF
198 198 > [narrow]
199 199 > serveellipses=True
200 200 > EOF
201 201 $ mkdir inside outside
202 202 $ echo v1 > inside/f
203 203 $ echo v1 > outside/f
204 204 $ hg add inside/f outside/f
205 205 $ hg commit -m init
206 206
207 207 $ for line in a b c d
208 208 > do
209 209 > hg update -r 0
210 210 > echo v2$line > outside/f
211 211 > hg commit -m "outside 2$line"
212 212 > echo v2$line > inside/f
213 213 > hg commit -m "inside 2$line"
214 214 > echo v3$line > outside/f
215 215 > hg commit -m "outside 3$line"
216 216 > echo v4$line > outside/f
217 217 > hg commit -m "outside 4$line"
218 218 > done
219 219 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
220 220 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
221 221 created new head
222 222 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
223 223 created new head
224 224 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
225 225 created new head
226 226
227 227 $ cd ..
228 228 $ hg clone --narrow ssh://user@dummy/pullmaster pullshallow \
229 229 > --include inside --depth 3
230 230 requesting all changes
231 231 adding changesets
232 232 adding manifests
233 233 adding file changes
234 234 added 12 changesets with 5 changes to 1 files (+3 heads)
235 235 new changesets *:* (glob)
236 236 updating to branch default
237 237 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
238 238 $ cd pullshallow
239 239
240 240 $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n'
241 241 @ 11 0ebbd712a0c8... outside 4d
242 242 |
243 243 o 10 0d4c867aeb23 inside 2d
244 244 |
245 245 o 9 e932969c3961... outside 2d
246 246
247 247 o 8 33d530345455... outside 4c
248 248 |
249 249 o 7 0ce6481bfe07 inside 2c
250 250 |
251 251 o 6 caa65c940632... outside 2c
252 252
253 253 o 5 3df233defecc... outside 4b
254 254 |
255 255 o 4 7162cc6d11a4 inside 2b
256 256 |
257 257 o 3 f2a632f0082d... outside 2b
258 258
259 259 o 2 b8a3da16ba49... outside 4a
260 260 |
261 261 o 1 53f543eb8e45 inside 2a
262 262 |
263 263 o 0 1be3e5221c6a... outside 2a
264 264
265 265 $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
266 266 ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d
267 267 ...1be3e5221c6a 000000000000 000000000000 outside 2a
268 268 ...33d530345455 0ce6481bfe07 000000000000 outside 4c
269 269 ...3df233defecc 7162cc6d11a4 000000000000 outside 4b
270 270 ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a
271 271 ...caa65c940632 000000000000 000000000000 outside 2c
272 272 ...e932969c3961 000000000000 000000000000 outside 2d
273 273 ...f2a632f0082d 000000000000 000000000000 outside 2b
274 274 0ce6481bfe07 caa65c940632 000000000000 inside 2c
275 275 0d4c867aeb23 e932969c3961 000000000000 inside 2d
276 276 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a
277 277 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b
278 278
279 279 $ cd ../pullmaster
280 280 $ hg update -r 'desc("outside 4a")'
281 281 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
282 282 $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
283 283 merging inside/f
284 284 merging outside/f
285 285 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
286 286 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
287 287 $ echo 3 > inside/f
288 288 $ echo 5 > outside/f
289 289 $ rm -f {in,out}side/f.orig
290 290 $ hg resolve --mark inside/f outside/f
291 291 (no more unresolved files)
292 292 $ hg commit -m 'merge a/b 5'
293 293
294 294 $ hg update -r 'desc("outside 4c")'
295 295 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
296 296 $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
297 297 merging inside/f
298 298 merging outside/f
299 299 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
300 300 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
301 301 $ echo 3 > inside/f
302 302 $ echo 5 > outside/f
303 303 $ rm -f {in,out}side/f.orig
304 304 $ hg resolve --mark inside/f outside/f
305 305 (no more unresolved files)
306 306 $ hg commit -m 'merge c/d 5'
307 307
308 308 $ hg update -r 'desc("merge a/b 5")'
309 309 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
310 310 $ hg merge -r 'desc("merge c/d 5")'
311 311 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
312 312 (branch merge, don't forget to commit)
313 313 $ echo 6 > outside/f
314 314 $ hg commit -m 'outside 6'
315 315 $ echo 7 > outside/f
316 316 $ hg commit -m 'outside 7'
317 317 $ echo 8 > outside/f
318 318 $ hg commit -m 'outside 8'
319 319
320 320 $ cd ../pullshallow
321 321 $ hg pull --depth 3
322 322 pulling from ssh://user@dummy/pullmaster
323 323 searching for changes
324 324 adding changesets
325 325 adding manifests
326 326 adding file changes
327 327 added 4 changesets with 3 changes to 1 files (-3 heads)
328 328 new changesets *:* (glob)
329 329 (run 'hg update' to get a working copy)
330 330
331 331 $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
332 332 ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d
333 333 ...1be3e5221c6a 000000000000 000000000000 outside 2a
334 334 ...33d530345455 0ce6481bfe07 000000000000 outside 4c
335 335 ...3df233defecc 7162cc6d11a4 000000000000 outside 4b
336 336 ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a
337 337 ...bf545653453e 968003d40c60 000000000000 outside 8
338 338 ...caa65c940632 000000000000 000000000000 outside 2c
339 339 ...e932969c3961 000000000000 000000000000 outside 2d
340 340 ...f2a632f0082d 000000000000 000000000000 outside 2b
341 341 0ce6481bfe07 caa65c940632 000000000000 inside 2c
342 342 0d4c867aeb23 e932969c3961 000000000000 inside 2d
343 343 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a
344 344 67d49c0bdbda b8a3da16ba49 3df233defecc merge a/b 5
345 345 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b
346 346 968003d40c60 67d49c0bdbda e867021d52c2 outside 6
347 347 e867021d52c2 33d530345455 0ebbd712a0c8 merge c/d 5
General Comments 0
You need to be logged in to leave comments. Login now