##// END OF EJS Templates
corruption: backout changeset 49fd21f32695 (issue6528)...
marmoute -
r48740:411dc27f 5.8.1 stable
parent child Browse files
Show More
@@ -1,3242 +1,3236 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import errno
20 20 import io
21 21 import os
22 22 import struct
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullhex,
30 30 nullid,
31 31 nullrev,
32 32 sha1nodeconstants,
33 33 short,
34 34 wdirfilenodeids,
35 35 wdirhex,
36 36 wdirid,
37 37 wdirrev,
38 38 )
39 39 from .i18n import _
40 40 from .pycompat import getattr
41 41 from .revlogutils.constants import (
42 42 FLAG_GENERALDELTA,
43 43 FLAG_INLINE_DATA,
44 44 INDEX_ENTRY_V0,
45 45 INDEX_ENTRY_V1,
46 46 INDEX_ENTRY_V2,
47 47 INDEX_HEADER,
48 48 REVLOGV0,
49 49 REVLOGV1,
50 50 REVLOGV1_FLAGS,
51 51 REVLOGV2,
52 52 REVLOGV2_FLAGS,
53 53 REVLOG_DEFAULT_FLAGS,
54 54 REVLOG_DEFAULT_FORMAT,
55 55 REVLOG_DEFAULT_VERSION,
56 56 )
57 57 from .revlogutils.flagutil import (
58 58 REVIDX_DEFAULT_FLAGS,
59 59 REVIDX_ELLIPSIS,
60 60 REVIDX_EXTSTORED,
61 61 REVIDX_FLAGS_ORDER,
62 62 REVIDX_HASCOPIESINFO,
63 63 REVIDX_ISCENSORED,
64 64 REVIDX_RAWTEXT_CHANGING_FLAGS,
65 65 REVIDX_SIDEDATA,
66 66 )
67 67 from .thirdparty import attr
68 68 from . import (
69 69 ancestor,
70 70 dagop,
71 71 error,
72 72 mdiff,
73 73 policy,
74 74 pycompat,
75 75 templatefilters,
76 76 util,
77 77 )
78 78 from .interfaces import (
79 79 repository,
80 80 util as interfaceutil,
81 81 )
82 82 from .revlogutils import (
83 83 deltas as deltautil,
84 84 flagutil,
85 85 nodemap as nodemaputil,
86 86 sidedata as sidedatautil,
87 87 )
88 88 from .utils import (
89 89 storageutil,
90 90 stringutil,
91 91 )
92 92
93 93 # blanked usage of all the name to prevent pyflakes constraints
94 94 # We need these name available in the module for extensions.
95 95 REVLOGV0
96 96 REVLOGV1
97 97 REVLOGV2
98 98 FLAG_INLINE_DATA
99 99 FLAG_GENERALDELTA
100 100 REVLOG_DEFAULT_FLAGS
101 101 REVLOG_DEFAULT_FORMAT
102 102 REVLOG_DEFAULT_VERSION
103 103 REVLOGV1_FLAGS
104 104 REVLOGV2_FLAGS
105 105 REVIDX_ISCENSORED
106 106 REVIDX_ELLIPSIS
107 107 REVIDX_SIDEDATA
108 108 REVIDX_HASCOPIESINFO
109 109 REVIDX_EXTSTORED
110 110 REVIDX_DEFAULT_FLAGS
111 111 REVIDX_FLAGS_ORDER
112 112 REVIDX_RAWTEXT_CHANGING_FLAGS
113 113
114 114 parsers = policy.importmod('parsers')
115 115 rustancestor = policy.importrust('ancestor')
116 116 rustdagop = policy.importrust('dagop')
117 117 rustrevlog = policy.importrust('revlog')
118 118
119 119 # Aliased for performance.
120 120 _zlibdecompress = zlib.decompress
121 121
122 122 # max size of revlog with inline data
123 123 _maxinline = 131072
124 124 _chunksize = 1048576
125 125
126 126 # Flag processors for REVIDX_ELLIPSIS.
127 127 def ellipsisreadprocessor(rl, text):
128 128 return text, False
129 129
130 130
131 131 def ellipsiswriteprocessor(rl, text):
132 132 return text, False
133 133
134 134
135 135 def ellipsisrawprocessor(rl, text):
136 136 return False
137 137
138 138
139 139 ellipsisprocessor = (
140 140 ellipsisreadprocessor,
141 141 ellipsiswriteprocessor,
142 142 ellipsisrawprocessor,
143 143 )
144 144
145 145
146 146 def getoffset(q):
147 147 return int(q >> 16)
148 148
149 149
150 150 def gettype(q):
151 151 return int(q & 0xFFFF)
152 152
153 153
154 154 def offset_type(offset, type):
155 155 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
156 156 raise ValueError(b'unknown revlog index flags')
157 157 return int(int(offset) << 16 | type)
158 158
159 159
160 160 def _verify_revision(rl, skipflags, state, node):
161 161 """Verify the integrity of the given revlog ``node`` while providing a hook
162 162 point for extensions to influence the operation."""
163 163 if skipflags:
164 164 state[b'skipread'].add(node)
165 165 else:
166 166 # Side-effect: read content and verify hash.
167 167 rl.revision(node)
168 168
169 169
170 170 # True if a fast implementation for persistent-nodemap is available
171 171 #
172 172 # We also consider we have a "fast" implementation in "pure" python because
173 173 # people using pure don't really have performance consideration (and a
174 174 # wheelbarrow of other slowness source)
175 175 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
176 176 parsers, 'BaseIndexObject'
177 177 )
178 178
179 179
180 180 @attr.s(slots=True, frozen=True)
181 181 class _revisioninfo(object):
182 182 """Information about a revision that allows building its fulltext
183 183 node: expected hash of the revision
184 184 p1, p2: parent revs of the revision
185 185 btext: built text cache consisting of a one-element list
186 186 cachedelta: (baserev, uncompressed_delta) or None
187 187 flags: flags associated to the revision storage
188 188
189 189 One of btext[0] or cachedelta must be set.
190 190 """
191 191
192 192 node = attr.ib()
193 193 p1 = attr.ib()
194 194 p2 = attr.ib()
195 195 btext = attr.ib()
196 196 textlen = attr.ib()
197 197 cachedelta = attr.ib()
198 198 flags = attr.ib()
199 199
200 200
201 201 @interfaceutil.implementer(repository.irevisiondelta)
202 202 @attr.s(slots=True)
203 203 class revlogrevisiondelta(object):
204 204 node = attr.ib()
205 205 p1node = attr.ib()
206 206 p2node = attr.ib()
207 207 basenode = attr.ib()
208 208 flags = attr.ib()
209 209 baserevisionsize = attr.ib()
210 210 revision = attr.ib()
211 211 delta = attr.ib()
212 212 sidedata = attr.ib()
213 213 linknode = attr.ib(default=None)
214 214
215 215
216 216 @interfaceutil.implementer(repository.iverifyproblem)
217 217 @attr.s(frozen=True)
218 218 class revlogproblem(object):
219 219 warning = attr.ib(default=None)
220 220 error = attr.ib(default=None)
221 221 node = attr.ib(default=None)
222 222
223 223
224 224 class revlogoldindex(list):
225 225 entry_size = INDEX_ENTRY_V0.size
226 226
227 227 @property
228 228 def nodemap(self):
229 229 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
230 230 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
231 231 return self._nodemap
232 232
233 233 @util.propertycache
234 234 def _nodemap(self):
235 235 nodemap = nodemaputil.NodeMap({nullid: nullrev})
236 236 for r in range(0, len(self)):
237 237 n = self[r][7]
238 238 nodemap[n] = r
239 239 return nodemap
240 240
241 241 def has_node(self, node):
242 242 """return True if the node exist in the index"""
243 243 return node in self._nodemap
244 244
245 245 def rev(self, node):
246 246 """return a revision for a node
247 247
248 248 If the node is unknown, raise a RevlogError"""
249 249 return self._nodemap[node]
250 250
251 251 def get_rev(self, node):
252 252 """return a revision for a node
253 253
254 254 If the node is unknown, return None"""
255 255 return self._nodemap.get(node)
256 256
257 257 def append(self, tup):
258 258 self._nodemap[tup[7]] = len(self)
259 259 super(revlogoldindex, self).append(tup)
260 260
261 261 def __delitem__(self, i):
262 262 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
263 263 raise ValueError(b"deleting slices only supports a:-1 with step 1")
264 264 for r in pycompat.xrange(i.start, len(self)):
265 265 del self._nodemap[self[r][7]]
266 266 super(revlogoldindex, self).__delitem__(i)
267 267
268 268 def clearcaches(self):
269 269 self.__dict__.pop('_nodemap', None)
270 270
271 271 def __getitem__(self, i):
272 272 if i == -1:
273 273 return (0, 0, 0, -1, -1, -1, -1, nullid)
274 274 return list.__getitem__(self, i)
275 275
276 276
277 277 class revlogoldio(object):
278 278 def parseindex(self, data, inline):
279 279 s = INDEX_ENTRY_V0.size
280 280 index = []
281 281 nodemap = nodemaputil.NodeMap({nullid: nullrev})
282 282 n = off = 0
283 283 l = len(data)
284 284 while off + s <= l:
285 285 cur = data[off : off + s]
286 286 off += s
287 287 e = INDEX_ENTRY_V0.unpack(cur)
288 288 # transform to revlogv1 format
289 289 e2 = (
290 290 offset_type(e[0], 0),
291 291 e[1],
292 292 -1,
293 293 e[2],
294 294 e[3],
295 295 nodemap.get(e[4], nullrev),
296 296 nodemap.get(e[5], nullrev),
297 297 e[6],
298 298 )
299 299 index.append(e2)
300 300 nodemap[e[6]] = n
301 301 n += 1
302 302
303 303 index = revlogoldindex(index)
304 304 return index, None
305 305
306 306 def packentry(self, entry, node, version, rev):
307 307 """return the binary representation of an entry
308 308
309 309 entry: a tuple containing all the values (see index.__getitem__)
310 310 node: a callback to convert a revision to nodeid
311 311 version: the changelog version
312 312 rev: the revision number
313 313 """
314 314 if gettype(entry[0]):
315 315 raise error.RevlogError(
316 316 _(b'index entry flags need revlog version 1')
317 317 )
318 318 e2 = (
319 319 getoffset(entry[0]),
320 320 entry[1],
321 321 entry[3],
322 322 entry[4],
323 323 node(entry[5]),
324 324 node(entry[6]),
325 325 entry[7],
326 326 )
327 327 return INDEX_ENTRY_V0.pack(*e2)
328 328
329 329
330 330 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
331 331 # signed integer)
332 332 _maxentrysize = 0x7FFFFFFF
333 333
334 334
335 335 class revlogio(object):
336 336 def parseindex(self, data, inline):
337 337 # call the C implementation to parse the index data
338 338 index, cache = parsers.parse_index2(data, inline)
339 339 return index, cache
340 340
341 341 def packentry(self, entry, node, version, rev):
342 342 p = INDEX_ENTRY_V1.pack(*entry)
343 343 if rev == 0:
344 344 p = INDEX_HEADER.pack(version) + p[4:]
345 345 return p
346 346
347 347
348 348 class revlogv2io(object):
349 349 def parseindex(self, data, inline):
350 350 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
351 351 return index, cache
352 352
353 353 def packentry(self, entry, node, version, rev):
354 354 p = INDEX_ENTRY_V2.pack(*entry)
355 355 if rev == 0:
356 356 p = INDEX_HEADER.pack(version) + p[4:]
357 357 return p
358 358
359 359
360 360 NodemapRevlogIO = None
361 361
362 362 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
363 363
364 364 class NodemapRevlogIO(revlogio):
365 365 """A debug oriented IO class that return a PersistentNodeMapIndexObject
366 366
367 367 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
368 368 """
369 369
370 370 def parseindex(self, data, inline):
371 371 index, cache = parsers.parse_index_devel_nodemap(data, inline)
372 372 return index, cache
373 373
374 374
375 375 class rustrevlogio(revlogio):
376 376 def parseindex(self, data, inline):
377 377 index, cache = super(rustrevlogio, self).parseindex(data, inline)
378 378 return rustrevlog.MixedIndex(index), cache
379 379
380 380
381 381 class revlog(object):
382 382 """
383 383 the underlying revision storage object
384 384
385 385 A revlog consists of two parts, an index and the revision data.
386 386
387 387 The index is a file with a fixed record size containing
388 388 information on each revision, including its nodeid (hash), the
389 389 nodeids of its parents, the position and offset of its data within
390 390 the data file, and the revision it's based on. Finally, each entry
391 391 contains a linkrev entry that can serve as a pointer to external
392 392 data.
393 393
394 394 The revision data itself is a linear collection of data chunks.
395 395 Each chunk represents a revision and is usually represented as a
396 396 delta against the previous chunk. To bound lookup time, runs of
397 397 deltas are limited to about 2 times the length of the original
398 398 version data. This makes retrieval of a version proportional to
399 399 its size, or O(1) relative to the number of revisions.
400 400
401 401 Both pieces of the revlog are written to in an append-only
402 402 fashion, which means we never need to rewrite a file to insert or
403 403 remove data, and can use some simple techniques to avoid the need
404 404 for locking while reading.
405 405
406 406 If checkambig, indexfile is opened with checkambig=True at
407 407 writing, to avoid file stat ambiguity.
408 408
409 409 If mmaplargeindex is True, and an mmapindexthreshold is set, the
410 410 index will be mmapped rather than read if it is larger than the
411 411 configured threshold.
412 412
413 413 If censorable is True, the revlog can have censored revisions.
414 414
415 415 If `upperboundcomp` is not None, this is the expected maximal gain from
416 416 compression for the data content.
417 417
418 418 `concurrencychecker` is an optional function that receives 3 arguments: a
419 419 file handle, a filename, and an expected position. It should check whether
420 420 the current position in the file handle is valid, and log/warn/fail (by
421 421 raising).
422 422 """
423 423
424 424 _flagserrorclass = error.RevlogError
425 425
426 426 def __init__(
427 427 self,
428 428 opener,
429 429 indexfile,
430 430 datafile=None,
431 431 checkambig=False,
432 432 mmaplargeindex=False,
433 433 censorable=False,
434 434 upperboundcomp=None,
435 435 persistentnodemap=False,
436 436 concurrencychecker=None,
437 437 ):
438 438 """
439 439 create a revlog object
440 440
441 441 opener is a function that abstracts the file opening operation
442 442 and can be used to implement COW semantics or the like.
443 443
444 444 """
445 445 self.upperboundcomp = upperboundcomp
446 446 self.indexfile = indexfile
447 447 self.datafile = datafile or (indexfile[:-2] + b".d")
448 448 self.nodemap_file = None
449 449 if persistentnodemap:
450 450 self.nodemap_file = nodemaputil.get_nodemap_file(
451 451 opener, self.indexfile
452 452 )
453 453
454 454 self.opener = opener
455 455 # When True, indexfile is opened with checkambig=True at writing, to
456 456 # avoid file stat ambiguity.
457 457 self._checkambig = checkambig
458 458 self._mmaplargeindex = mmaplargeindex
459 459 self._censorable = censorable
460 460 # 3-tuple of (node, rev, text) for a raw revision.
461 461 self._revisioncache = None
462 462 # Maps rev to chain base rev.
463 463 self._chainbasecache = util.lrucachedict(100)
464 464 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
465 465 self._chunkcache = (0, b'')
466 466 # How much data to read and cache into the raw revlog data cache.
467 467 self._chunkcachesize = 65536
468 468 self._maxchainlen = None
469 469 self._deltabothparents = True
470 470 self.index = None
471 471 self._nodemap_docket = None
472 472 # Mapping of partial identifiers to full nodes.
473 473 self._pcache = {}
474 474 # Mapping of revision integer to full node.
475 475 self._compengine = b'zlib'
476 476 self._compengineopts = {}
477 477 self._maxdeltachainspan = -1
478 478 self._withsparseread = False
479 479 self._sparserevlog = False
480 480 self._srdensitythreshold = 0.50
481 481 self._srmingapsize = 262144
482 482
483 483 # Make copy of flag processors so each revlog instance can support
484 484 # custom flags.
485 485 self._flagprocessors = dict(flagutil.flagprocessors)
486 486
487 487 # 2-tuple of file handles being used for active writing.
488 488 self._writinghandles = None
489 489
490 490 self._loadindex()
491 491
492 492 self._concurrencychecker = concurrencychecker
493 493
494 494 def _loadindex(self):
495 495 mmapindexthreshold = None
496 496 opts = self.opener.options
497 497
498 498 if b'revlogv2' in opts:
499 499 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
500 500 elif b'revlogv1' in opts:
501 501 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
502 502 if b'generaldelta' in opts:
503 503 newversionflags |= FLAG_GENERALDELTA
504 504 elif b'revlogv0' in self.opener.options:
505 505 newversionflags = REVLOGV0
506 506 else:
507 507 newversionflags = REVLOG_DEFAULT_VERSION
508 508
509 509 if b'chunkcachesize' in opts:
510 510 self._chunkcachesize = opts[b'chunkcachesize']
511 511 if b'maxchainlen' in opts:
512 512 self._maxchainlen = opts[b'maxchainlen']
513 513 if b'deltabothparents' in opts:
514 514 self._deltabothparents = opts[b'deltabothparents']
515 515 self._lazydelta = bool(opts.get(b'lazydelta', True))
516 516 self._lazydeltabase = False
517 517 if self._lazydelta:
518 518 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
519 519 if b'compengine' in opts:
520 520 self._compengine = opts[b'compengine']
521 521 if b'zlib.level' in opts:
522 522 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
523 523 if b'zstd.level' in opts:
524 524 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
525 525 if b'maxdeltachainspan' in opts:
526 526 self._maxdeltachainspan = opts[b'maxdeltachainspan']
527 527 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
528 528 mmapindexthreshold = opts[b'mmapindexthreshold']
529 529 self.hassidedata = bool(opts.get(b'side-data', False))
530 530 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
531 531 withsparseread = bool(opts.get(b'with-sparse-read', False))
532 532 # sparse-revlog forces sparse-read
533 533 self._withsparseread = self._sparserevlog or withsparseread
534 534 if b'sparse-read-density-threshold' in opts:
535 535 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
536 536 if b'sparse-read-min-gap-size' in opts:
537 537 self._srmingapsize = opts[b'sparse-read-min-gap-size']
538 538 if opts.get(b'enableellipsis'):
539 539 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
540 540
541 541 # revlog v0 doesn't have flag processors
542 542 for flag, processor in pycompat.iteritems(
543 543 opts.get(b'flagprocessors', {})
544 544 ):
545 545 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
546 546
547 547 if self._chunkcachesize <= 0:
548 548 raise error.RevlogError(
549 549 _(b'revlog chunk cache size %r is not greater than 0')
550 550 % self._chunkcachesize
551 551 )
552 552 elif self._chunkcachesize & (self._chunkcachesize - 1):
553 553 raise error.RevlogError(
554 554 _(b'revlog chunk cache size %r is not a power of 2')
555 555 % self._chunkcachesize
556 556 )
557 557
558 558 indexdata = b''
559 559 self._initempty = True
560 560 try:
561 561 with self._indexfp() as f:
562 562 if (
563 563 mmapindexthreshold is not None
564 564 and self.opener.fstat(f).st_size >= mmapindexthreshold
565 565 ):
566 566 # TODO: should .close() to release resources without
567 567 # relying on Python GC
568 568 indexdata = util.buffer(util.mmapread(f))
569 569 else:
570 570 indexdata = f.read()
571 571 if len(indexdata) > 0:
572 572 versionflags = INDEX_HEADER.unpack(indexdata[:4])[0]
573 573 self._initempty = False
574 574 else:
575 575 versionflags = newversionflags
576 576 except IOError as inst:
577 577 if inst.errno != errno.ENOENT:
578 578 raise
579 579
580 580 versionflags = newversionflags
581 581
582 582 self.version = versionflags
583 583
584 584 flags = versionflags & ~0xFFFF
585 585 fmt = versionflags & 0xFFFF
586 586
587 587 if fmt == REVLOGV0:
588 588 if flags:
589 589 raise error.RevlogError(
590 590 _(b'unknown flags (%#04x) in version %d revlog %s')
591 591 % (flags >> 16, fmt, self.indexfile)
592 592 )
593 593
594 594 self._inline = False
595 595 self._generaldelta = False
596 596
597 597 elif fmt == REVLOGV1:
598 598 if flags & ~REVLOGV1_FLAGS:
599 599 raise error.RevlogError(
600 600 _(b'unknown flags (%#04x) in version %d revlog %s')
601 601 % (flags >> 16, fmt, self.indexfile)
602 602 )
603 603
604 604 self._inline = versionflags & FLAG_INLINE_DATA
605 605 self._generaldelta = versionflags & FLAG_GENERALDELTA
606 606
607 607 elif fmt == REVLOGV2:
608 608 if flags & ~REVLOGV2_FLAGS:
609 609 raise error.RevlogError(
610 610 _(b'unknown flags (%#04x) in version %d revlog %s')
611 611 % (flags >> 16, fmt, self.indexfile)
612 612 )
613 613
614 614 # There is a bug in the transaction handling when going from an
615 615 # inline revlog to a separate index and data file. Turn it off until
616 616 # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
617 617 # See issue6485
618 618 self._inline = False
619 619 # generaldelta implied by version 2 revlogs.
620 620 self._generaldelta = True
621 621
622 622 else:
623 623 raise error.RevlogError(
624 624 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
625 625 )
626 626
627 627 self.nodeconstants = sha1nodeconstants
628 628 self.nullid = self.nodeconstants.nullid
629 629
630 630 # sparse-revlog can't be on without general-delta (issue6056)
631 631 if not self._generaldelta:
632 632 self._sparserevlog = False
633 633
634 634 self._storedeltachains = True
635 635
636 636 devel_nodemap = (
637 637 self.nodemap_file
638 638 and opts.get(b'devel-force-nodemap', False)
639 639 and NodemapRevlogIO is not None
640 640 )
641 641
642 642 use_rust_index = False
643 643 if rustrevlog is not None:
644 644 if self.nodemap_file is not None:
645 645 use_rust_index = True
646 646 else:
647 647 use_rust_index = self.opener.options.get(b'rust.index')
648 648
649 649 self._io = revlogio()
650 650 if self.version == REVLOGV0:
651 651 self._io = revlogoldio()
652 652 elif fmt == REVLOGV2:
653 653 self._io = revlogv2io()
654 654 elif devel_nodemap:
655 655 self._io = NodemapRevlogIO()
656 656 elif use_rust_index:
657 657 self._io = rustrevlogio()
658 658 try:
659 659 d = self._io.parseindex(indexdata, self._inline)
660 660 index, _chunkcache = d
661 661 use_nodemap = (
662 662 not self._inline
663 663 and self.nodemap_file is not None
664 664 and util.safehasattr(index, 'update_nodemap_data')
665 665 )
666 666 if use_nodemap:
667 667 nodemap_data = nodemaputil.persisted_data(self)
668 668 if nodemap_data is not None:
669 669 docket = nodemap_data[0]
670 670 if (
671 671 len(d[0]) > docket.tip_rev
672 672 and d[0][docket.tip_rev][7] == docket.tip_node
673 673 ):
674 674 # no changelog tampering
675 675 self._nodemap_docket = docket
676 676 index.update_nodemap_data(*nodemap_data)
677 677 except (ValueError, IndexError):
678 678 raise error.RevlogError(
679 679 _(b"index %s is corrupted") % self.indexfile
680 680 )
681 681 self.index, self._chunkcache = d
682 682 if not self._chunkcache:
683 683 self._chunkclear()
684 684 # revnum -> (chain-length, sum-delta-length)
685 685 self._chaininfocache = util.lrucachedict(500)
686 686 # revlog header -> revlog compressor
687 687 self._decompressors = {}
688 688
689 689 @util.propertycache
690 690 def _compressor(self):
691 691 engine = util.compengines[self._compengine]
692 692 return engine.revlogcompressor(self._compengineopts)
693 693
694 694 def _indexfp(self, mode=b'r'):
695 695 """file object for the revlog's index file"""
696 696 args = {'mode': mode}
697 697 if mode != b'r':
698 698 args['checkambig'] = self._checkambig
699 699 if mode == b'w':
700 700 args['atomictemp'] = True
701 701 return self.opener(self.indexfile, **args)
702 702
703 703 def _datafp(self, mode=b'r'):
704 704 """file object for the revlog's data file"""
705 705 return self.opener(self.datafile, mode=mode)
706 706
707 707 @contextlib.contextmanager
708 708 def _datareadfp(self, existingfp=None):
709 709 """file object suitable to read data"""
710 710 # Use explicit file handle, if given.
711 711 if existingfp is not None:
712 712 yield existingfp
713 713
714 714 # Use a file handle being actively used for writes, if available.
715 715 # There is some danger to doing this because reads will seek the
716 716 # file. However, _writeentry() performs a SEEK_END before all writes,
717 717 # so we should be safe.
718 718 elif self._writinghandles:
719 719 if self._inline:
720 720 yield self._writinghandles[0]
721 721 else:
722 722 yield self._writinghandles[1]
723 723
724 724 # Otherwise open a new file handle.
725 725 else:
726 726 if self._inline:
727 727 func = self._indexfp
728 728 else:
729 729 func = self._datafp
730 730 with func() as fp:
731 731 yield fp
732 732
733 733 def tiprev(self):
734 734 return len(self.index) - 1
735 735
736 736 def tip(self):
737 737 return self.node(self.tiprev())
738 738
739 739 def __contains__(self, rev):
740 740 return 0 <= rev < len(self)
741 741
742 742 def __len__(self):
743 743 return len(self.index)
744 744
745 745 def __iter__(self):
746 746 return iter(pycompat.xrange(len(self)))
747 747
748 748 def revs(self, start=0, stop=None):
749 749 """iterate over all rev in this revlog (from start to stop)"""
750 750 return storageutil.iterrevs(len(self), start=start, stop=stop)
751 751
752 752 @property
753 753 def nodemap(self):
754 754 msg = (
755 755 b"revlog.nodemap is deprecated, "
756 756 b"use revlog.index.[has_node|rev|get_rev]"
757 757 )
758 758 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
759 759 return self.index.nodemap
760 760
761 761 @property
762 762 def _nodecache(self):
763 763 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
764 764 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
765 765 return self.index.nodemap
766 766
767 767 def hasnode(self, node):
768 768 try:
769 769 self.rev(node)
770 770 return True
771 771 except KeyError:
772 772 return False
773 773
774 774 def candelta(self, baserev, rev):
775 775 """whether two revisions (baserev, rev) can be delta-ed or not"""
776 776 # Disable delta if either rev requires a content-changing flag
777 777 # processor (ex. LFS). This is because such flag processor can alter
778 778 # the rawtext content that the delta will be based on, and two clients
779 779 # could have a same revlog node with different flags (i.e. different
780 780 # rawtext contents) and the delta could be incompatible.
781 781 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
782 782 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
783 783 ):
784 784 return False
785 785 return True
786 786
787 787 def update_caches(self, transaction):
788 788 if self.nodemap_file is not None:
789 789 if transaction is None:
790 790 nodemaputil.update_persistent_nodemap(self)
791 791 else:
792 792 nodemaputil.setup_persistent_nodemap(transaction, self)
793 793
794 794 def clearcaches(self):
795 795 self._revisioncache = None
796 796 self._chainbasecache.clear()
797 797 self._chunkcache = (0, b'')
798 798 self._pcache = {}
799 799 self._nodemap_docket = None
800 800 self.index.clearcaches()
801 801 # The python code is the one responsible for validating the docket, we
802 802 # end up having to refresh it here.
803 803 use_nodemap = (
804 804 not self._inline
805 805 and self.nodemap_file is not None
806 806 and util.safehasattr(self.index, 'update_nodemap_data')
807 807 )
808 808 if use_nodemap:
809 809 nodemap_data = nodemaputil.persisted_data(self)
810 810 if nodemap_data is not None:
811 811 self._nodemap_docket = nodemap_data[0]
812 812 self.index.update_nodemap_data(*nodemap_data)
813 813
814 814 def rev(self, node):
815 815 try:
816 816 return self.index.rev(node)
817 817 except TypeError:
818 818 raise
819 819 except error.RevlogError:
820 820 # parsers.c radix tree lookup failed
821 821 if node == wdirid or node in wdirfilenodeids:
822 822 raise error.WdirUnsupported
823 823 raise error.LookupError(node, self.indexfile, _(b'no node'))
824 824
825 825 # Accessors for index entries.
826 826
827 827 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
828 828 # are flags.
829 829 def start(self, rev):
830 830 return int(self.index[rev][0] >> 16)
831 831
832 832 def flags(self, rev):
833 833 return self.index[rev][0] & 0xFFFF
834 834
835 835 def length(self, rev):
836 836 return self.index[rev][1]
837 837
838 838 def sidedata_length(self, rev):
839 839 if self.version & 0xFFFF != REVLOGV2:
840 840 return 0
841 841 return self.index[rev][9]
842 842
843 843 def rawsize(self, rev):
844 844 """return the length of the uncompressed text for a given revision"""
845 845 l = self.index[rev][2]
846 846 if l >= 0:
847 847 return l
848 848
849 849 t = self.rawdata(rev)
850 850 return len(t)
851 851
852 852 def size(self, rev):
853 853 """length of non-raw text (processed by a "read" flag processor)"""
854 854 # fast path: if no "read" flag processor could change the content,
855 855 # size is rawsize. note: ELLIPSIS is known to not change the content.
856 856 flags = self.flags(rev)
857 857 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
858 858 return self.rawsize(rev)
859 859
860 860 return len(self.revision(rev, raw=False))
861 861
862 862 def chainbase(self, rev):
863 863 base = self._chainbasecache.get(rev)
864 864 if base is not None:
865 865 return base
866 866
867 867 index = self.index
868 868 iterrev = rev
869 869 base = index[iterrev][3]
870 870 while base != iterrev:
871 871 iterrev = base
872 872 base = index[iterrev][3]
873 873
874 874 self._chainbasecache[rev] = base
875 875 return base
876 876
877 877 def linkrev(self, rev):
878 878 return self.index[rev][4]
879 879
880 880 def parentrevs(self, rev):
881 881 try:
882 882 entry = self.index[rev]
883 883 except IndexError:
884 884 if rev == wdirrev:
885 885 raise error.WdirUnsupported
886 886 raise
887 if entry[5] == nullrev:
888 return entry[6], entry[5]
889 else:
890 return entry[5], entry[6]
887
888 return entry[5], entry[6]
891 889
892 890 # fast parentrevs(rev) where rev isn't filtered
893 891 _uncheckedparentrevs = parentrevs
894 892
895 893 def node(self, rev):
896 894 try:
897 895 return self.index[rev][7]
898 896 except IndexError:
899 897 if rev == wdirrev:
900 898 raise error.WdirUnsupported
901 899 raise
902 900
903 901 # Derived from index values.
904 902
905 903 def end(self, rev):
906 904 return self.start(rev) + self.length(rev)
907 905
908 906 def parents(self, node):
909 907 i = self.index
910 908 d = i[self.rev(node)]
911 # inline node() to avoid function call overhead
912 if d[5] == nullid:
913 return i[d[6]][7], i[d[5]][7]
914 else:
915 return i[d[5]][7], i[d[6]][7]
909 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
916 910
917 911 def chainlen(self, rev):
918 912 return self._chaininfo(rev)[0]
919 913
920 914 def _chaininfo(self, rev):
921 915 chaininfocache = self._chaininfocache
922 916 if rev in chaininfocache:
923 917 return chaininfocache[rev]
924 918 index = self.index
925 919 generaldelta = self._generaldelta
926 920 iterrev = rev
927 921 e = index[iterrev]
928 922 clen = 0
929 923 compresseddeltalen = 0
930 924 while iterrev != e[3]:
931 925 clen += 1
932 926 compresseddeltalen += e[1]
933 927 if generaldelta:
934 928 iterrev = e[3]
935 929 else:
936 930 iterrev -= 1
937 931 if iterrev in chaininfocache:
938 932 t = chaininfocache[iterrev]
939 933 clen += t[0]
940 934 compresseddeltalen += t[1]
941 935 break
942 936 e = index[iterrev]
943 937 else:
944 938 # Add text length of base since decompressing that also takes
945 939 # work. For cache hits the length is already included.
946 940 compresseddeltalen += e[1]
947 941 r = (clen, compresseddeltalen)
948 942 chaininfocache[rev] = r
949 943 return r
950 944
951 945 def _deltachain(self, rev, stoprev=None):
952 946 """Obtain the delta chain for a revision.
953 947
954 948 ``stoprev`` specifies a revision to stop at. If not specified, we
955 949 stop at the base of the chain.
956 950
957 951 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
958 952 revs in ascending order and ``stopped`` is a bool indicating whether
959 953 ``stoprev`` was hit.
960 954 """
961 955 # Try C implementation.
962 956 try:
963 957 return self.index.deltachain(rev, stoprev, self._generaldelta)
964 958 except AttributeError:
965 959 pass
966 960
967 961 chain = []
968 962
969 963 # Alias to prevent attribute lookup in tight loop.
970 964 index = self.index
971 965 generaldelta = self._generaldelta
972 966
973 967 iterrev = rev
974 968 e = index[iterrev]
975 969 while iterrev != e[3] and iterrev != stoprev:
976 970 chain.append(iterrev)
977 971 if generaldelta:
978 972 iterrev = e[3]
979 973 else:
980 974 iterrev -= 1
981 975 e = index[iterrev]
982 976
983 977 if iterrev == stoprev:
984 978 stopped = True
985 979 else:
986 980 chain.append(iterrev)
987 981 stopped = False
988 982
989 983 chain.reverse()
990 984 return chain, stopped
991 985
992 986 def ancestors(self, revs, stoprev=0, inclusive=False):
993 987 """Generate the ancestors of 'revs' in reverse revision order.
994 988 Does not generate revs lower than stoprev.
995 989
996 990 See the documentation for ancestor.lazyancestors for more details."""
997 991
998 992 # first, make sure start revisions aren't filtered
999 993 revs = list(revs)
1000 994 checkrev = self.node
1001 995 for r in revs:
1002 996 checkrev(r)
1003 997 # and we're sure ancestors aren't filtered as well
1004 998
1005 999 if rustancestor is not None:
1006 1000 lazyancestors = rustancestor.LazyAncestors
1007 1001 arg = self.index
1008 1002 else:
1009 1003 lazyancestors = ancestor.lazyancestors
1010 1004 arg = self._uncheckedparentrevs
1011 1005 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1012 1006
1013 1007 def descendants(self, revs):
1014 1008 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1015 1009
1016 1010 def findcommonmissing(self, common=None, heads=None):
1017 1011 """Return a tuple of the ancestors of common and the ancestors of heads
1018 1012 that are not ancestors of common. In revset terminology, we return the
1019 1013 tuple:
1020 1014
1021 1015 ::common, (::heads) - (::common)
1022 1016
1023 1017 The list is sorted by revision number, meaning it is
1024 1018 topologically sorted.
1025 1019
1026 1020 'heads' and 'common' are both lists of node IDs. If heads is
1027 1021 not supplied, uses all of the revlog's heads. If common is not
1028 1022 supplied, uses nullid."""
1029 1023 if common is None:
1030 1024 common = [nullid]
1031 1025 if heads is None:
1032 1026 heads = self.heads()
1033 1027
1034 1028 common = [self.rev(n) for n in common]
1035 1029 heads = [self.rev(n) for n in heads]
1036 1030
1037 1031 # we want the ancestors, but inclusive
1038 1032 class lazyset(object):
1039 1033 def __init__(self, lazyvalues):
1040 1034 self.addedvalues = set()
1041 1035 self.lazyvalues = lazyvalues
1042 1036
1043 1037 def __contains__(self, value):
1044 1038 return value in self.addedvalues or value in self.lazyvalues
1045 1039
1046 1040 def __iter__(self):
1047 1041 added = self.addedvalues
1048 1042 for r in added:
1049 1043 yield r
1050 1044 for r in self.lazyvalues:
1051 1045 if not r in added:
1052 1046 yield r
1053 1047
1054 1048 def add(self, value):
1055 1049 self.addedvalues.add(value)
1056 1050
1057 1051 def update(self, values):
1058 1052 self.addedvalues.update(values)
1059 1053
1060 1054 has = lazyset(self.ancestors(common))
1061 1055 has.add(nullrev)
1062 1056 has.update(common)
1063 1057
1064 1058 # take all ancestors from heads that aren't in has
1065 1059 missing = set()
1066 1060 visit = collections.deque(r for r in heads if r not in has)
1067 1061 while visit:
1068 1062 r = visit.popleft()
1069 1063 if r in missing:
1070 1064 continue
1071 1065 else:
1072 1066 missing.add(r)
1073 1067 for p in self.parentrevs(r):
1074 1068 if p not in has:
1075 1069 visit.append(p)
1076 1070 missing = list(missing)
1077 1071 missing.sort()
1078 1072 return has, [self.node(miss) for miss in missing]
1079 1073
1080 1074 def incrementalmissingrevs(self, common=None):
1081 1075 """Return an object that can be used to incrementally compute the
1082 1076 revision numbers of the ancestors of arbitrary sets that are not
1083 1077 ancestors of common. This is an ancestor.incrementalmissingancestors
1084 1078 object.
1085 1079
1086 1080 'common' is a list of revision numbers. If common is not supplied, uses
1087 1081 nullrev.
1088 1082 """
1089 1083 if common is None:
1090 1084 common = [nullrev]
1091 1085
1092 1086 if rustancestor is not None:
1093 1087 return rustancestor.MissingAncestors(self.index, common)
1094 1088 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1095 1089
1096 1090 def findmissingrevs(self, common=None, heads=None):
1097 1091 """Return the revision numbers of the ancestors of heads that
1098 1092 are not ancestors of common.
1099 1093
1100 1094 More specifically, return a list of revision numbers corresponding to
1101 1095 nodes N such that every N satisfies the following constraints:
1102 1096
1103 1097 1. N is an ancestor of some node in 'heads'
1104 1098 2. N is not an ancestor of any node in 'common'
1105 1099
1106 1100 The list is sorted by revision number, meaning it is
1107 1101 topologically sorted.
1108 1102
1109 1103 'heads' and 'common' are both lists of revision numbers. If heads is
1110 1104 not supplied, uses all of the revlog's heads. If common is not
1111 1105 supplied, uses nullid."""
1112 1106 if common is None:
1113 1107 common = [nullrev]
1114 1108 if heads is None:
1115 1109 heads = self.headrevs()
1116 1110
1117 1111 inc = self.incrementalmissingrevs(common=common)
1118 1112 return inc.missingancestors(heads)
1119 1113
1120 1114 def findmissing(self, common=None, heads=None):
1121 1115 """Return the ancestors of heads that are not ancestors of common.
1122 1116
1123 1117 More specifically, return a list of nodes N such that every N
1124 1118 satisfies the following constraints:
1125 1119
1126 1120 1. N is an ancestor of some node in 'heads'
1127 1121 2. N is not an ancestor of any node in 'common'
1128 1122
1129 1123 The list is sorted by revision number, meaning it is
1130 1124 topologically sorted.
1131 1125
1132 1126 'heads' and 'common' are both lists of node IDs. If heads is
1133 1127 not supplied, uses all of the revlog's heads. If common is not
1134 1128 supplied, uses nullid."""
1135 1129 if common is None:
1136 1130 common = [nullid]
1137 1131 if heads is None:
1138 1132 heads = self.heads()
1139 1133
1140 1134 common = [self.rev(n) for n in common]
1141 1135 heads = [self.rev(n) for n in heads]
1142 1136
1143 1137 inc = self.incrementalmissingrevs(common=common)
1144 1138 return [self.node(r) for r in inc.missingancestors(heads)]
1145 1139
1146 1140 def nodesbetween(self, roots=None, heads=None):
1147 1141 """Return a topological path from 'roots' to 'heads'.
1148 1142
1149 1143 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1150 1144 topologically sorted list of all nodes N that satisfy both of
1151 1145 these constraints:
1152 1146
1153 1147 1. N is a descendant of some node in 'roots'
1154 1148 2. N is an ancestor of some node in 'heads'
1155 1149
1156 1150 Every node is considered to be both a descendant and an ancestor
1157 1151 of itself, so every reachable node in 'roots' and 'heads' will be
1158 1152 included in 'nodes'.
1159 1153
1160 1154 'outroots' is the list of reachable nodes in 'roots', i.e., the
1161 1155 subset of 'roots' that is returned in 'nodes'. Likewise,
1162 1156 'outheads' is the subset of 'heads' that is also in 'nodes'.
1163 1157
1164 1158 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1165 1159 unspecified, uses nullid as the only root. If 'heads' is
1166 1160 unspecified, uses list of all of the revlog's heads."""
1167 1161 nonodes = ([], [], [])
1168 1162 if roots is not None:
1169 1163 roots = list(roots)
1170 1164 if not roots:
1171 1165 return nonodes
1172 1166 lowestrev = min([self.rev(n) for n in roots])
1173 1167 else:
1174 1168 roots = [nullid] # Everybody's a descendant of nullid
1175 1169 lowestrev = nullrev
1176 1170 if (lowestrev == nullrev) and (heads is None):
1177 1171 # We want _all_ the nodes!
1178 1172 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1179 1173 if heads is None:
1180 1174 # All nodes are ancestors, so the latest ancestor is the last
1181 1175 # node.
1182 1176 highestrev = len(self) - 1
1183 1177 # Set ancestors to None to signal that every node is an ancestor.
1184 1178 ancestors = None
1185 1179 # Set heads to an empty dictionary for later discovery of heads
1186 1180 heads = {}
1187 1181 else:
1188 1182 heads = list(heads)
1189 1183 if not heads:
1190 1184 return nonodes
1191 1185 ancestors = set()
1192 1186 # Turn heads into a dictionary so we can remove 'fake' heads.
1193 1187 # Also, later we will be using it to filter out the heads we can't
1194 1188 # find from roots.
1195 1189 heads = dict.fromkeys(heads, False)
1196 1190 # Start at the top and keep marking parents until we're done.
1197 1191 nodestotag = set(heads)
1198 1192 # Remember where the top was so we can use it as a limit later.
1199 1193 highestrev = max([self.rev(n) for n in nodestotag])
1200 1194 while nodestotag:
1201 1195 # grab a node to tag
1202 1196 n = nodestotag.pop()
1203 1197 # Never tag nullid
1204 1198 if n == nullid:
1205 1199 continue
1206 1200 # A node's revision number represents its place in a
1207 1201 # topologically sorted list of nodes.
1208 1202 r = self.rev(n)
1209 1203 if r >= lowestrev:
1210 1204 if n not in ancestors:
1211 1205 # If we are possibly a descendant of one of the roots
1212 1206 # and we haven't already been marked as an ancestor
1213 1207 ancestors.add(n) # Mark as ancestor
1214 1208 # Add non-nullid parents to list of nodes to tag.
1215 1209 nodestotag.update(
1216 1210 [p for p in self.parents(n) if p != nullid]
1217 1211 )
1218 1212 elif n in heads: # We've seen it before, is it a fake head?
1219 1213 # So it is, real heads should not be the ancestors of
1220 1214 # any other heads.
1221 1215 heads.pop(n)
1222 1216 if not ancestors:
1223 1217 return nonodes
1224 1218 # Now that we have our set of ancestors, we want to remove any
1225 1219 # roots that are not ancestors.
1226 1220
1227 1221 # If one of the roots was nullid, everything is included anyway.
1228 1222 if lowestrev > nullrev:
1229 1223 # But, since we weren't, let's recompute the lowest rev to not
1230 1224 # include roots that aren't ancestors.
1231 1225
1232 1226 # Filter out roots that aren't ancestors of heads
1233 1227 roots = [root for root in roots if root in ancestors]
1234 1228 # Recompute the lowest revision
1235 1229 if roots:
1236 1230 lowestrev = min([self.rev(root) for root in roots])
1237 1231 else:
1238 1232 # No more roots? Return empty list
1239 1233 return nonodes
1240 1234 else:
1241 1235 # We are descending from nullid, and don't need to care about
1242 1236 # any other roots.
1243 1237 lowestrev = nullrev
1244 1238 roots = [nullid]
1245 1239 # Transform our roots list into a set.
1246 1240 descendants = set(roots)
1247 1241 # Also, keep the original roots so we can filter out roots that aren't
1248 1242 # 'real' roots (i.e. are descended from other roots).
1249 1243 roots = descendants.copy()
1250 1244 # Our topologically sorted list of output nodes.
1251 1245 orderedout = []
1252 1246 # Don't start at nullid since we don't want nullid in our output list,
1253 1247 # and if nullid shows up in descendants, empty parents will look like
1254 1248 # they're descendants.
1255 1249 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1256 1250 n = self.node(r)
1257 1251 isdescendant = False
1258 1252 if lowestrev == nullrev: # Everybody is a descendant of nullid
1259 1253 isdescendant = True
1260 1254 elif n in descendants:
1261 1255 # n is already a descendant
1262 1256 isdescendant = True
1263 1257 # This check only needs to be done here because all the roots
1264 1258 # will start being marked is descendants before the loop.
1265 1259 if n in roots:
1266 1260 # If n was a root, check if it's a 'real' root.
1267 1261 p = tuple(self.parents(n))
1268 1262 # If any of its parents are descendants, it's not a root.
1269 1263 if (p[0] in descendants) or (p[1] in descendants):
1270 1264 roots.remove(n)
1271 1265 else:
1272 1266 p = tuple(self.parents(n))
1273 1267 # A node is a descendant if either of its parents are
1274 1268 # descendants. (We seeded the dependents list with the roots
1275 1269 # up there, remember?)
1276 1270 if (p[0] in descendants) or (p[1] in descendants):
1277 1271 descendants.add(n)
1278 1272 isdescendant = True
1279 1273 if isdescendant and ((ancestors is None) or (n in ancestors)):
1280 1274 # Only include nodes that are both descendants and ancestors.
1281 1275 orderedout.append(n)
1282 1276 if (ancestors is not None) and (n in heads):
1283 1277 # We're trying to figure out which heads are reachable
1284 1278 # from roots.
1285 1279 # Mark this head as having been reached
1286 1280 heads[n] = True
1287 1281 elif ancestors is None:
1288 1282 # Otherwise, we're trying to discover the heads.
1289 1283 # Assume this is a head because if it isn't, the next step
1290 1284 # will eventually remove it.
1291 1285 heads[n] = True
1292 1286 # But, obviously its parents aren't.
1293 1287 for p in self.parents(n):
1294 1288 heads.pop(p, None)
1295 1289 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1296 1290 roots = list(roots)
1297 1291 assert orderedout
1298 1292 assert roots
1299 1293 assert heads
1300 1294 return (orderedout, roots, heads)
1301 1295
1302 1296 def headrevs(self, revs=None):
1303 1297 if revs is None:
1304 1298 try:
1305 1299 return self.index.headrevs()
1306 1300 except AttributeError:
1307 1301 return self._headrevs()
1308 1302 if rustdagop is not None:
1309 1303 return rustdagop.headrevs(self.index, revs)
1310 1304 return dagop.headrevs(revs, self._uncheckedparentrevs)
1311 1305
1312 1306 def computephases(self, roots):
1313 1307 return self.index.computephasesmapsets(roots)
1314 1308
1315 1309 def _headrevs(self):
1316 1310 count = len(self)
1317 1311 if not count:
1318 1312 return [nullrev]
1319 1313 # we won't iter over filtered rev so nobody is a head at start
1320 1314 ishead = [0] * (count + 1)
1321 1315 index = self.index
1322 1316 for r in self:
1323 1317 ishead[r] = 1 # I may be an head
1324 1318 e = index[r]
1325 1319 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1326 1320 return [r for r, val in enumerate(ishead) if val]
1327 1321
1328 1322 def heads(self, start=None, stop=None):
1329 1323 """return the list of all nodes that have no children
1330 1324
1331 1325 if start is specified, only heads that are descendants of
1332 1326 start will be returned
1333 1327 if stop is specified, it will consider all the revs from stop
1334 1328 as if they had no children
1335 1329 """
1336 1330 if start is None and stop is None:
1337 1331 if not len(self):
1338 1332 return [nullid]
1339 1333 return [self.node(r) for r in self.headrevs()]
1340 1334
1341 1335 if start is None:
1342 1336 start = nullrev
1343 1337 else:
1344 1338 start = self.rev(start)
1345 1339
1346 1340 stoprevs = {self.rev(n) for n in stop or []}
1347 1341
1348 1342 revs = dagop.headrevssubset(
1349 1343 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1350 1344 )
1351 1345
1352 1346 return [self.node(rev) for rev in revs]
1353 1347
1354 1348 def children(self, node):
1355 1349 """find the children of a given node"""
1356 1350 c = []
1357 1351 p = self.rev(node)
1358 1352 for r in self.revs(start=p + 1):
1359 1353 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1360 1354 if prevs:
1361 1355 for pr in prevs:
1362 1356 if pr == p:
1363 1357 c.append(self.node(r))
1364 1358 elif p == nullrev:
1365 1359 c.append(self.node(r))
1366 1360 return c
1367 1361
1368 1362 def commonancestorsheads(self, a, b):
1369 1363 """calculate all the heads of the common ancestors of nodes a and b"""
1370 1364 a, b = self.rev(a), self.rev(b)
1371 1365 ancs = self._commonancestorsheads(a, b)
1372 1366 return pycompat.maplist(self.node, ancs)
1373 1367
1374 1368 def _commonancestorsheads(self, *revs):
1375 1369 """calculate all the heads of the common ancestors of revs"""
1376 1370 try:
1377 1371 ancs = self.index.commonancestorsheads(*revs)
1378 1372 except (AttributeError, OverflowError): # C implementation failed
1379 1373 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1380 1374 return ancs
1381 1375
1382 1376 def isancestor(self, a, b):
1383 1377 """return True if node a is an ancestor of node b
1384 1378
1385 1379 A revision is considered an ancestor of itself."""
1386 1380 a, b = self.rev(a), self.rev(b)
1387 1381 return self.isancestorrev(a, b)
1388 1382
1389 1383 def isancestorrev(self, a, b):
1390 1384 """return True if revision a is an ancestor of revision b
1391 1385
1392 1386 A revision is considered an ancestor of itself.
1393 1387
1394 1388 The implementation of this is trivial but the use of
1395 1389 reachableroots is not."""
1396 1390 if a == nullrev:
1397 1391 return True
1398 1392 elif a == b:
1399 1393 return True
1400 1394 elif a > b:
1401 1395 return False
1402 1396 return bool(self.reachableroots(a, [b], [a], includepath=False))
1403 1397
1404 1398 def reachableroots(self, minroot, heads, roots, includepath=False):
1405 1399 """return (heads(::(<roots> and <roots>::<heads>)))
1406 1400
1407 1401 If includepath is True, return (<roots>::<heads>)."""
1408 1402 try:
1409 1403 return self.index.reachableroots2(
1410 1404 minroot, heads, roots, includepath
1411 1405 )
1412 1406 except AttributeError:
1413 1407 return dagop._reachablerootspure(
1414 1408 self.parentrevs, minroot, roots, heads, includepath
1415 1409 )
1416 1410
1417 1411 def ancestor(self, a, b):
1418 1412 """calculate the "best" common ancestor of nodes a and b"""
1419 1413
1420 1414 a, b = self.rev(a), self.rev(b)
1421 1415 try:
1422 1416 ancs = self.index.ancestors(a, b)
1423 1417 except (AttributeError, OverflowError):
1424 1418 ancs = ancestor.ancestors(self.parentrevs, a, b)
1425 1419 if ancs:
1426 1420 # choose a consistent winner when there's a tie
1427 1421 return min(map(self.node, ancs))
1428 1422 return nullid
1429 1423
1430 1424 def _match(self, id):
1431 1425 if isinstance(id, int):
1432 1426 # rev
1433 1427 return self.node(id)
1434 1428 if len(id) == 20:
1435 1429 # possibly a binary node
1436 1430 # odds of a binary node being all hex in ASCII are 1 in 10**25
1437 1431 try:
1438 1432 node = id
1439 1433 self.rev(node) # quick search the index
1440 1434 return node
1441 1435 except error.LookupError:
1442 1436 pass # may be partial hex id
1443 1437 try:
1444 1438 # str(rev)
1445 1439 rev = int(id)
1446 1440 if b"%d" % rev != id:
1447 1441 raise ValueError
1448 1442 if rev < 0:
1449 1443 rev = len(self) + rev
1450 1444 if rev < 0 or rev >= len(self):
1451 1445 raise ValueError
1452 1446 return self.node(rev)
1453 1447 except (ValueError, OverflowError):
1454 1448 pass
1455 1449 if len(id) == 40:
1456 1450 try:
1457 1451 # a full hex nodeid?
1458 1452 node = bin(id)
1459 1453 self.rev(node)
1460 1454 return node
1461 1455 except (TypeError, error.LookupError):
1462 1456 pass
1463 1457
1464 1458 def _partialmatch(self, id):
1465 1459 # we don't care wdirfilenodeids as they should be always full hash
1466 1460 maybewdir = wdirhex.startswith(id)
1467 1461 try:
1468 1462 partial = self.index.partialmatch(id)
1469 1463 if partial and self.hasnode(partial):
1470 1464 if maybewdir:
1471 1465 # single 'ff...' match in radix tree, ambiguous with wdir
1472 1466 raise error.RevlogError
1473 1467 return partial
1474 1468 if maybewdir:
1475 1469 # no 'ff...' match in radix tree, wdir identified
1476 1470 raise error.WdirUnsupported
1477 1471 return None
1478 1472 except error.RevlogError:
1479 1473 # parsers.c radix tree lookup gave multiple matches
1480 1474 # fast path: for unfiltered changelog, radix tree is accurate
1481 1475 if not getattr(self, 'filteredrevs', None):
1482 1476 raise error.AmbiguousPrefixLookupError(
1483 1477 id, self.indexfile, _(b'ambiguous identifier')
1484 1478 )
1485 1479 # fall through to slow path that filters hidden revisions
1486 1480 except (AttributeError, ValueError):
1487 1481 # we are pure python, or key was too short to search radix tree
1488 1482 pass
1489 1483
1490 1484 if id in self._pcache:
1491 1485 return self._pcache[id]
1492 1486
1493 1487 if len(id) <= 40:
1494 1488 try:
1495 1489 # hex(node)[:...]
1496 1490 l = len(id) // 2 # grab an even number of digits
1497 1491 prefix = bin(id[: l * 2])
1498 1492 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1499 1493 nl = [
1500 1494 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1501 1495 ]
1502 1496 if nullhex.startswith(id):
1503 1497 nl.append(nullid)
1504 1498 if len(nl) > 0:
1505 1499 if len(nl) == 1 and not maybewdir:
1506 1500 self._pcache[id] = nl[0]
1507 1501 return nl[0]
1508 1502 raise error.AmbiguousPrefixLookupError(
1509 1503 id, self.indexfile, _(b'ambiguous identifier')
1510 1504 )
1511 1505 if maybewdir:
1512 1506 raise error.WdirUnsupported
1513 1507 return None
1514 1508 except TypeError:
1515 1509 pass
1516 1510
1517 1511 def lookup(self, id):
1518 1512 """locate a node based on:
1519 1513 - revision number or str(revision number)
1520 1514 - nodeid or subset of hex nodeid
1521 1515 """
1522 1516 n = self._match(id)
1523 1517 if n is not None:
1524 1518 return n
1525 1519 n = self._partialmatch(id)
1526 1520 if n:
1527 1521 return n
1528 1522
1529 1523 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1530 1524
1531 1525 def shortest(self, node, minlength=1):
1532 1526 """Find the shortest unambiguous prefix that matches node."""
1533 1527
1534 1528 def isvalid(prefix):
1535 1529 try:
1536 1530 matchednode = self._partialmatch(prefix)
1537 1531 except error.AmbiguousPrefixLookupError:
1538 1532 return False
1539 1533 except error.WdirUnsupported:
1540 1534 # single 'ff...' match
1541 1535 return True
1542 1536 if matchednode is None:
1543 1537 raise error.LookupError(node, self.indexfile, _(b'no node'))
1544 1538 return True
1545 1539
1546 1540 def maybewdir(prefix):
1547 1541 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1548 1542
1549 1543 hexnode = hex(node)
1550 1544
1551 1545 def disambiguate(hexnode, minlength):
1552 1546 """Disambiguate against wdirid."""
1553 1547 for length in range(minlength, len(hexnode) + 1):
1554 1548 prefix = hexnode[:length]
1555 1549 if not maybewdir(prefix):
1556 1550 return prefix
1557 1551
1558 1552 if not getattr(self, 'filteredrevs', None):
1559 1553 try:
1560 1554 length = max(self.index.shortest(node), minlength)
1561 1555 return disambiguate(hexnode, length)
1562 1556 except error.RevlogError:
1563 1557 if node != wdirid:
1564 1558 raise error.LookupError(node, self.indexfile, _(b'no node'))
1565 1559 except AttributeError:
1566 1560 # Fall through to pure code
1567 1561 pass
1568 1562
1569 1563 if node == wdirid:
1570 1564 for length in range(minlength, len(hexnode) + 1):
1571 1565 prefix = hexnode[:length]
1572 1566 if isvalid(prefix):
1573 1567 return prefix
1574 1568
1575 1569 for length in range(minlength, len(hexnode) + 1):
1576 1570 prefix = hexnode[:length]
1577 1571 if isvalid(prefix):
1578 1572 return disambiguate(hexnode, length)
1579 1573
1580 1574 def cmp(self, node, text):
1581 1575 """compare text with a given file revision
1582 1576
1583 1577 returns True if text is different than what is stored.
1584 1578 """
1585 1579 p1, p2 = self.parents(node)
1586 1580 return storageutil.hashrevisionsha1(text, p1, p2) != node
1587 1581
1588 1582 def _cachesegment(self, offset, data):
1589 1583 """Add a segment to the revlog cache.
1590 1584
1591 1585 Accepts an absolute offset and the data that is at that location.
1592 1586 """
1593 1587 o, d = self._chunkcache
1594 1588 # try to add to existing cache
1595 1589 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1596 1590 self._chunkcache = o, d + data
1597 1591 else:
1598 1592 self._chunkcache = offset, data
1599 1593
1600 1594 def _readsegment(self, offset, length, df=None):
1601 1595 """Load a segment of raw data from the revlog.
1602 1596
1603 1597 Accepts an absolute offset, length to read, and an optional existing
1604 1598 file handle to read from.
1605 1599
1606 1600 If an existing file handle is passed, it will be seeked and the
1607 1601 original seek position will NOT be restored.
1608 1602
1609 1603 Returns a str or buffer of raw byte data.
1610 1604
1611 1605 Raises if the requested number of bytes could not be read.
1612 1606 """
1613 1607 # Cache data both forward and backward around the requested
1614 1608 # data, in a fixed size window. This helps speed up operations
1615 1609 # involving reading the revlog backwards.
1616 1610 cachesize = self._chunkcachesize
1617 1611 realoffset = offset & ~(cachesize - 1)
1618 1612 reallength = (
1619 1613 (offset + length + cachesize) & ~(cachesize - 1)
1620 1614 ) - realoffset
1621 1615 with self._datareadfp(df) as df:
1622 1616 df.seek(realoffset)
1623 1617 d = df.read(reallength)
1624 1618
1625 1619 self._cachesegment(realoffset, d)
1626 1620 if offset != realoffset or reallength != length:
1627 1621 startoffset = offset - realoffset
1628 1622 if len(d) - startoffset < length:
1629 1623 raise error.RevlogError(
1630 1624 _(
1631 1625 b'partial read of revlog %s; expected %d bytes from '
1632 1626 b'offset %d, got %d'
1633 1627 )
1634 1628 % (
1635 1629 self.indexfile if self._inline else self.datafile,
1636 1630 length,
1637 1631 realoffset,
1638 1632 len(d) - startoffset,
1639 1633 )
1640 1634 )
1641 1635
1642 1636 return util.buffer(d, startoffset, length)
1643 1637
1644 1638 if len(d) < length:
1645 1639 raise error.RevlogError(
1646 1640 _(
1647 1641 b'partial read of revlog %s; expected %d bytes from offset '
1648 1642 b'%d, got %d'
1649 1643 )
1650 1644 % (
1651 1645 self.indexfile if self._inline else self.datafile,
1652 1646 length,
1653 1647 offset,
1654 1648 len(d),
1655 1649 )
1656 1650 )
1657 1651
1658 1652 return d
1659 1653
1660 1654 def _getsegment(self, offset, length, df=None):
1661 1655 """Obtain a segment of raw data from the revlog.
1662 1656
1663 1657 Accepts an absolute offset, length of bytes to obtain, and an
1664 1658 optional file handle to the already-opened revlog. If the file
1665 1659 handle is used, it's original seek position will not be preserved.
1666 1660
1667 1661 Requests for data may be returned from a cache.
1668 1662
1669 1663 Returns a str or a buffer instance of raw byte data.
1670 1664 """
1671 1665 o, d = self._chunkcache
1672 1666 l = len(d)
1673 1667
1674 1668 # is it in the cache?
1675 1669 cachestart = offset - o
1676 1670 cacheend = cachestart + length
1677 1671 if cachestart >= 0 and cacheend <= l:
1678 1672 if cachestart == 0 and cacheend == l:
1679 1673 return d # avoid a copy
1680 1674 return util.buffer(d, cachestart, cacheend - cachestart)
1681 1675
1682 1676 return self._readsegment(offset, length, df=df)
1683 1677
1684 1678 def _getsegmentforrevs(self, startrev, endrev, df=None):
1685 1679 """Obtain a segment of raw data corresponding to a range of revisions.
1686 1680
1687 1681 Accepts the start and end revisions and an optional already-open
1688 1682 file handle to be used for reading. If the file handle is read, its
1689 1683 seek position will not be preserved.
1690 1684
1691 1685 Requests for data may be satisfied by a cache.
1692 1686
1693 1687 Returns a 2-tuple of (offset, data) for the requested range of
1694 1688 revisions. Offset is the integer offset from the beginning of the
1695 1689 revlog and data is a str or buffer of the raw byte data.
1696 1690
1697 1691 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1698 1692 to determine where each revision's data begins and ends.
1699 1693 """
1700 1694 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1701 1695 # (functions are expensive).
1702 1696 index = self.index
1703 1697 istart = index[startrev]
1704 1698 start = int(istart[0] >> 16)
1705 1699 if startrev == endrev:
1706 1700 end = start + istart[1]
1707 1701 else:
1708 1702 iend = index[endrev]
1709 1703 end = int(iend[0] >> 16) + iend[1]
1710 1704
1711 1705 if self._inline:
1712 1706 start += (startrev + 1) * self.index.entry_size
1713 1707 end += (endrev + 1) * self.index.entry_size
1714 1708 length = end - start
1715 1709
1716 1710 return start, self._getsegment(start, length, df=df)
1717 1711
1718 1712 def _chunk(self, rev, df=None):
1719 1713 """Obtain a single decompressed chunk for a revision.
1720 1714
1721 1715 Accepts an integer revision and an optional already-open file handle
1722 1716 to be used for reading. If used, the seek position of the file will not
1723 1717 be preserved.
1724 1718
1725 1719 Returns a str holding uncompressed data for the requested revision.
1726 1720 """
1727 1721 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1728 1722
1729 1723 def _chunks(self, revs, df=None, targetsize=None):
1730 1724 """Obtain decompressed chunks for the specified revisions.
1731 1725
1732 1726 Accepts an iterable of numeric revisions that are assumed to be in
1733 1727 ascending order. Also accepts an optional already-open file handle
1734 1728 to be used for reading. If used, the seek position of the file will
1735 1729 not be preserved.
1736 1730
1737 1731 This function is similar to calling ``self._chunk()`` multiple times,
1738 1732 but is faster.
1739 1733
1740 1734 Returns a list with decompressed data for each requested revision.
1741 1735 """
1742 1736 if not revs:
1743 1737 return []
1744 1738 start = self.start
1745 1739 length = self.length
1746 1740 inline = self._inline
1747 1741 iosize = self.index.entry_size
1748 1742 buffer = util.buffer
1749 1743
1750 1744 l = []
1751 1745 ladd = l.append
1752 1746
1753 1747 if not self._withsparseread:
1754 1748 slicedchunks = (revs,)
1755 1749 else:
1756 1750 slicedchunks = deltautil.slicechunk(
1757 1751 self, revs, targetsize=targetsize
1758 1752 )
1759 1753
1760 1754 for revschunk in slicedchunks:
1761 1755 firstrev = revschunk[0]
1762 1756 # Skip trailing revisions with empty diff
1763 1757 for lastrev in revschunk[::-1]:
1764 1758 if length(lastrev) != 0:
1765 1759 break
1766 1760
1767 1761 try:
1768 1762 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1769 1763 except OverflowError:
1770 1764 # issue4215 - we can't cache a run of chunks greater than
1771 1765 # 2G on Windows
1772 1766 return [self._chunk(rev, df=df) for rev in revschunk]
1773 1767
1774 1768 decomp = self.decompress
1775 1769 for rev in revschunk:
1776 1770 chunkstart = start(rev)
1777 1771 if inline:
1778 1772 chunkstart += (rev + 1) * iosize
1779 1773 chunklength = length(rev)
1780 1774 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1781 1775
1782 1776 return l
1783 1777
1784 1778 def _chunkclear(self):
1785 1779 """Clear the raw chunk cache."""
1786 1780 self._chunkcache = (0, b'')
1787 1781
1788 1782 def deltaparent(self, rev):
1789 1783 """return deltaparent of the given revision"""
1790 1784 base = self.index[rev][3]
1791 1785 if base == rev:
1792 1786 return nullrev
1793 1787 elif self._generaldelta:
1794 1788 return base
1795 1789 else:
1796 1790 return rev - 1
1797 1791
1798 1792 def issnapshot(self, rev):
1799 1793 """tells whether rev is a snapshot"""
1800 1794 if not self._sparserevlog:
1801 1795 return self.deltaparent(rev) == nullrev
1802 1796 elif util.safehasattr(self.index, b'issnapshot'):
1803 1797 # directly assign the method to cache the testing and access
1804 1798 self.issnapshot = self.index.issnapshot
1805 1799 return self.issnapshot(rev)
1806 1800 if rev == nullrev:
1807 1801 return True
1808 1802 entry = self.index[rev]
1809 1803 base = entry[3]
1810 1804 if base == rev:
1811 1805 return True
1812 1806 if base == nullrev:
1813 1807 return True
1814 1808 p1 = entry[5]
1815 1809 p2 = entry[6]
1816 1810 if base == p1 or base == p2:
1817 1811 return False
1818 1812 return self.issnapshot(base)
1819 1813
1820 1814 def snapshotdepth(self, rev):
1821 1815 """number of snapshot in the chain before this one"""
1822 1816 if not self.issnapshot(rev):
1823 1817 raise error.ProgrammingError(b'revision %d not a snapshot')
1824 1818 return len(self._deltachain(rev)[0]) - 1
1825 1819
1826 1820 def revdiff(self, rev1, rev2):
1827 1821 """return or calculate a delta between two revisions
1828 1822
1829 1823 The delta calculated is in binary form and is intended to be written to
1830 1824 revlog data directly. So this function needs raw revision data.
1831 1825 """
1832 1826 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1833 1827 return bytes(self._chunk(rev2))
1834 1828
1835 1829 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1836 1830
1837 1831 def _processflags(self, text, flags, operation, raw=False):
1838 1832 """deprecated entry point to access flag processors"""
1839 1833 msg = b'_processflag(...) use the specialized variant'
1840 1834 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1841 1835 if raw:
1842 1836 return text, flagutil.processflagsraw(self, text, flags)
1843 1837 elif operation == b'read':
1844 1838 return flagutil.processflagsread(self, text, flags)
1845 1839 else: # write operation
1846 1840 return flagutil.processflagswrite(self, text, flags)
1847 1841
1848 1842 def revision(self, nodeorrev, _df=None, raw=False):
1849 1843 """return an uncompressed revision of a given node or revision
1850 1844 number.
1851 1845
1852 1846 _df - an existing file handle to read from. (internal-only)
1853 1847 raw - an optional argument specifying if the revision data is to be
1854 1848 treated as raw data when applying flag transforms. 'raw' should be set
1855 1849 to True when generating changegroups or in debug commands.
1856 1850 """
1857 1851 if raw:
1858 1852 msg = (
1859 1853 b'revlog.revision(..., raw=True) is deprecated, '
1860 1854 b'use revlog.rawdata(...)'
1861 1855 )
1862 1856 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1863 1857 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1864 1858
1865 1859 def sidedata(self, nodeorrev, _df=None):
1866 1860 """a map of extra data related to the changeset but not part of the hash
1867 1861
1868 1862 This function currently return a dictionary. However, more advanced
1869 1863 mapping object will likely be used in the future for a more
1870 1864 efficient/lazy code.
1871 1865 """
1872 1866 return self._revisiondata(nodeorrev, _df)[1]
1873 1867
1874 1868 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1875 1869 # deal with <nodeorrev> argument type
1876 1870 if isinstance(nodeorrev, int):
1877 1871 rev = nodeorrev
1878 1872 node = self.node(rev)
1879 1873 else:
1880 1874 node = nodeorrev
1881 1875 rev = None
1882 1876
1883 1877 # fast path the special `nullid` rev
1884 1878 if node == nullid:
1885 1879 return b"", {}
1886 1880
1887 1881 # ``rawtext`` is the text as stored inside the revlog. Might be the
1888 1882 # revision or might need to be processed to retrieve the revision.
1889 1883 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1890 1884
1891 1885 if self.version & 0xFFFF == REVLOGV2:
1892 1886 if rev is None:
1893 1887 rev = self.rev(node)
1894 1888 sidedata = self._sidedata(rev)
1895 1889 else:
1896 1890 sidedata = {}
1897 1891
1898 1892 if raw and validated:
1899 1893 # if we don't want to process the raw text and that raw
1900 1894 # text is cached, we can exit early.
1901 1895 return rawtext, sidedata
1902 1896 if rev is None:
1903 1897 rev = self.rev(node)
1904 1898 # the revlog's flag for this revision
1905 1899 # (usually alter its state or content)
1906 1900 flags = self.flags(rev)
1907 1901
1908 1902 if validated and flags == REVIDX_DEFAULT_FLAGS:
1909 1903 # no extra flags set, no flag processor runs, text = rawtext
1910 1904 return rawtext, sidedata
1911 1905
1912 1906 if raw:
1913 1907 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1914 1908 text = rawtext
1915 1909 else:
1916 1910 r = flagutil.processflagsread(self, rawtext, flags)
1917 1911 text, validatehash = r
1918 1912 if validatehash:
1919 1913 self.checkhash(text, node, rev=rev)
1920 1914 if not validated:
1921 1915 self._revisioncache = (node, rev, rawtext)
1922 1916
1923 1917 return text, sidedata
1924 1918
1925 1919 def _rawtext(self, node, rev, _df=None):
1926 1920 """return the possibly unvalidated rawtext for a revision
1927 1921
1928 1922 returns (rev, rawtext, validated)
1929 1923 """
1930 1924
1931 1925 # revision in the cache (could be useful to apply delta)
1932 1926 cachedrev = None
1933 1927 # An intermediate text to apply deltas to
1934 1928 basetext = None
1935 1929
1936 1930 # Check if we have the entry in cache
1937 1931 # The cache entry looks like (node, rev, rawtext)
1938 1932 if self._revisioncache:
1939 1933 if self._revisioncache[0] == node:
1940 1934 return (rev, self._revisioncache[2], True)
1941 1935 cachedrev = self._revisioncache[1]
1942 1936
1943 1937 if rev is None:
1944 1938 rev = self.rev(node)
1945 1939
1946 1940 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1947 1941 if stopped:
1948 1942 basetext = self._revisioncache[2]
1949 1943
1950 1944 # drop cache to save memory, the caller is expected to
1951 1945 # update self._revisioncache after validating the text
1952 1946 self._revisioncache = None
1953 1947
1954 1948 targetsize = None
1955 1949 rawsize = self.index[rev][2]
1956 1950 if 0 <= rawsize:
1957 1951 targetsize = 4 * rawsize
1958 1952
1959 1953 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1960 1954 if basetext is None:
1961 1955 basetext = bytes(bins[0])
1962 1956 bins = bins[1:]
1963 1957
1964 1958 rawtext = mdiff.patches(basetext, bins)
1965 1959 del basetext # let us have a chance to free memory early
1966 1960 return (rev, rawtext, False)
1967 1961
1968 1962 def _sidedata(self, rev):
1969 1963 """Return the sidedata for a given revision number."""
1970 1964 index_entry = self.index[rev]
1971 1965 sidedata_offset = index_entry[8]
1972 1966 sidedata_size = index_entry[9]
1973 1967
1974 1968 if self._inline:
1975 1969 sidedata_offset += self.index.entry_size * (1 + rev)
1976 1970 if sidedata_size == 0:
1977 1971 return {}
1978 1972
1979 1973 segment = self._getsegment(sidedata_offset, sidedata_size)
1980 1974 sidedata = sidedatautil.deserialize_sidedata(segment)
1981 1975 return sidedata
1982 1976
1983 1977 def rawdata(self, nodeorrev, _df=None):
1984 1978 """return an uncompressed raw data of a given node or revision number.
1985 1979
1986 1980 _df - an existing file handle to read from. (internal-only)
1987 1981 """
1988 1982 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1989 1983
1990 1984 def hash(self, text, p1, p2):
1991 1985 """Compute a node hash.
1992 1986
1993 1987 Available as a function so that subclasses can replace the hash
1994 1988 as needed.
1995 1989 """
1996 1990 return storageutil.hashrevisionsha1(text, p1, p2)
1997 1991
1998 1992 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1999 1993 """Check node hash integrity.
2000 1994
2001 1995 Available as a function so that subclasses can extend hash mismatch
2002 1996 behaviors as needed.
2003 1997 """
2004 1998 try:
2005 1999 if p1 is None and p2 is None:
2006 2000 p1, p2 = self.parents(node)
2007 2001 if node != self.hash(text, p1, p2):
2008 2002 # Clear the revision cache on hash failure. The revision cache
2009 2003 # only stores the raw revision and clearing the cache does have
2010 2004 # the side-effect that we won't have a cache hit when the raw
2011 2005 # revision data is accessed. But this case should be rare and
2012 2006 # it is extra work to teach the cache about the hash
2013 2007 # verification state.
2014 2008 if self._revisioncache and self._revisioncache[0] == node:
2015 2009 self._revisioncache = None
2016 2010
2017 2011 revornode = rev
2018 2012 if revornode is None:
2019 2013 revornode = templatefilters.short(hex(node))
2020 2014 raise error.RevlogError(
2021 2015 _(b"integrity check failed on %s:%s")
2022 2016 % (self.indexfile, pycompat.bytestr(revornode))
2023 2017 )
2024 2018 except error.RevlogError:
2025 2019 if self._censorable and storageutil.iscensoredtext(text):
2026 2020 raise error.CensoredNodeError(self.indexfile, node, text)
2027 2021 raise
2028 2022
2029 2023 def _enforceinlinesize(self, tr, fp=None):
2030 2024 """Check if the revlog is too big for inline and convert if so.
2031 2025
2032 2026 This should be called after revisions are added to the revlog. If the
2033 2027 revlog has grown too large to be an inline revlog, it will convert it
2034 2028 to use multiple index and data files.
2035 2029 """
2036 2030 tiprev = len(self) - 1
2037 2031 if (
2038 2032 not self._inline
2039 2033 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
2040 2034 ):
2041 2035 return
2042 2036
2043 2037 troffset = tr.findoffset(self.indexfile)
2044 2038 if troffset is None:
2045 2039 raise error.RevlogError(
2046 2040 _(b"%s not found in the transaction") % self.indexfile
2047 2041 )
2048 2042 trindex = 0
2049 2043 tr.add(self.datafile, 0)
2050 2044
2051 2045 if fp:
2052 2046 fp.flush()
2053 2047 fp.close()
2054 2048 # We can't use the cached file handle after close(). So prevent
2055 2049 # its usage.
2056 2050 self._writinghandles = None
2057 2051
2058 2052 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
2059 2053 for r in self:
2060 2054 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
2061 2055 if troffset <= self.start(r):
2062 2056 trindex = r
2063 2057
2064 2058 with self._indexfp(b'w') as fp:
2065 2059 self.version &= ~FLAG_INLINE_DATA
2066 2060 self._inline = False
2067 2061 io = self._io
2068 2062 for i in self:
2069 2063 e = io.packentry(self.index[i], self.node, self.version, i)
2070 2064 fp.write(e)
2071 2065
2072 2066 # the temp file replace the real index when we exit the context
2073 2067 # manager
2074 2068
2075 2069 tr.replace(self.indexfile, trindex * self.index.entry_size)
2076 2070 nodemaputil.setup_persistent_nodemap(tr, self)
2077 2071 self._chunkclear()
2078 2072
2079 2073 def _nodeduplicatecallback(self, transaction, node):
2080 2074 """called when trying to add a node already stored."""
2081 2075
2082 2076 def addrevision(
2083 2077 self,
2084 2078 text,
2085 2079 transaction,
2086 2080 link,
2087 2081 p1,
2088 2082 p2,
2089 2083 cachedelta=None,
2090 2084 node=None,
2091 2085 flags=REVIDX_DEFAULT_FLAGS,
2092 2086 deltacomputer=None,
2093 2087 sidedata=None,
2094 2088 ):
2095 2089 """add a revision to the log
2096 2090
2097 2091 text - the revision data to add
2098 2092 transaction - the transaction object used for rollback
2099 2093 link - the linkrev data to add
2100 2094 p1, p2 - the parent nodeids of the revision
2101 2095 cachedelta - an optional precomputed delta
2102 2096 node - nodeid of revision; typically node is not specified, and it is
2103 2097 computed by default as hash(text, p1, p2), however subclasses might
2104 2098 use different hashing method (and override checkhash() in such case)
2105 2099 flags - the known flags to set on the revision
2106 2100 deltacomputer - an optional deltacomputer instance shared between
2107 2101 multiple calls
2108 2102 """
2109 2103 if link == nullrev:
2110 2104 raise error.RevlogError(
2111 2105 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2112 2106 )
2113 2107
2114 2108 if sidedata is None:
2115 2109 sidedata = {}
2116 2110 elif not self.hassidedata:
2117 2111 raise error.ProgrammingError(
2118 2112 _(b"trying to add sidedata to a revlog who don't support them")
2119 2113 )
2120 2114
2121 2115 if flags:
2122 2116 node = node or self.hash(text, p1, p2)
2123 2117
2124 2118 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2125 2119
2126 2120 # If the flag processor modifies the revision data, ignore any provided
2127 2121 # cachedelta.
2128 2122 if rawtext != text:
2129 2123 cachedelta = None
2130 2124
2131 2125 if len(rawtext) > _maxentrysize:
2132 2126 raise error.RevlogError(
2133 2127 _(
2134 2128 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2135 2129 )
2136 2130 % (self.indexfile, len(rawtext))
2137 2131 )
2138 2132
2139 2133 node = node or self.hash(rawtext, p1, p2)
2140 2134 rev = self.index.get_rev(node)
2141 2135 if rev is not None:
2142 2136 return rev
2143 2137
2144 2138 if validatehash:
2145 2139 self.checkhash(rawtext, node, p1=p1, p2=p2)
2146 2140
2147 2141 return self.addrawrevision(
2148 2142 rawtext,
2149 2143 transaction,
2150 2144 link,
2151 2145 p1,
2152 2146 p2,
2153 2147 node,
2154 2148 flags,
2155 2149 cachedelta=cachedelta,
2156 2150 deltacomputer=deltacomputer,
2157 2151 sidedata=sidedata,
2158 2152 )
2159 2153
2160 2154 def addrawrevision(
2161 2155 self,
2162 2156 rawtext,
2163 2157 transaction,
2164 2158 link,
2165 2159 p1,
2166 2160 p2,
2167 2161 node,
2168 2162 flags,
2169 2163 cachedelta=None,
2170 2164 deltacomputer=None,
2171 2165 sidedata=None,
2172 2166 ):
2173 2167 """add a raw revision with known flags, node and parents
2174 2168 useful when reusing a revision not stored in this revlog (ex: received
2175 2169 over wire, or read from an external bundle).
2176 2170 """
2177 2171 dfh = None
2178 2172 if not self._inline:
2179 2173 dfh = self._datafp(b"a+")
2180 2174 ifh = self._indexfp(b"a+")
2181 2175 try:
2182 2176 return self._addrevision(
2183 2177 node,
2184 2178 rawtext,
2185 2179 transaction,
2186 2180 link,
2187 2181 p1,
2188 2182 p2,
2189 2183 flags,
2190 2184 cachedelta,
2191 2185 ifh,
2192 2186 dfh,
2193 2187 deltacomputer=deltacomputer,
2194 2188 sidedata=sidedata,
2195 2189 )
2196 2190 finally:
2197 2191 if dfh:
2198 2192 dfh.close()
2199 2193 ifh.close()
2200 2194
2201 2195 def compress(self, data):
2202 2196 """Generate a possibly-compressed representation of data."""
2203 2197 if not data:
2204 2198 return b'', data
2205 2199
2206 2200 compressed = self._compressor.compress(data)
2207 2201
2208 2202 if compressed:
2209 2203 # The revlog compressor added the header in the returned data.
2210 2204 return b'', compressed
2211 2205
2212 2206 if data[0:1] == b'\0':
2213 2207 return b'', data
2214 2208 return b'u', data
2215 2209
2216 2210 def decompress(self, data):
2217 2211 """Decompress a revlog chunk.
2218 2212
2219 2213 The chunk is expected to begin with a header identifying the
2220 2214 format type so it can be routed to an appropriate decompressor.
2221 2215 """
2222 2216 if not data:
2223 2217 return data
2224 2218
2225 2219 # Revlogs are read much more frequently than they are written and many
2226 2220 # chunks only take microseconds to decompress, so performance is
2227 2221 # important here.
2228 2222 #
2229 2223 # We can make a few assumptions about revlogs:
2230 2224 #
2231 2225 # 1) the majority of chunks will be compressed (as opposed to inline
2232 2226 # raw data).
2233 2227 # 2) decompressing *any* data will likely by at least 10x slower than
2234 2228 # returning raw inline data.
2235 2229 # 3) we want to prioritize common and officially supported compression
2236 2230 # engines
2237 2231 #
2238 2232 # It follows that we want to optimize for "decompress compressed data
2239 2233 # when encoded with common and officially supported compression engines"
2240 2234 # case over "raw data" and "data encoded by less common or non-official
2241 2235 # compression engines." That is why we have the inline lookup first
2242 2236 # followed by the compengines lookup.
2243 2237 #
2244 2238 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2245 2239 # compressed chunks. And this matters for changelog and manifest reads.
2246 2240 t = data[0:1]
2247 2241
2248 2242 if t == b'x':
2249 2243 try:
2250 2244 return _zlibdecompress(data)
2251 2245 except zlib.error as e:
2252 2246 raise error.RevlogError(
2253 2247 _(b'revlog decompress error: %s')
2254 2248 % stringutil.forcebytestr(e)
2255 2249 )
2256 2250 # '\0' is more common than 'u' so it goes first.
2257 2251 elif t == b'\0':
2258 2252 return data
2259 2253 elif t == b'u':
2260 2254 return util.buffer(data, 1)
2261 2255
2262 2256 try:
2263 2257 compressor = self._decompressors[t]
2264 2258 except KeyError:
2265 2259 try:
2266 2260 engine = util.compengines.forrevlogheader(t)
2267 2261 compressor = engine.revlogcompressor(self._compengineopts)
2268 2262 self._decompressors[t] = compressor
2269 2263 except KeyError:
2270 2264 raise error.RevlogError(
2271 2265 _(b'unknown compression type %s') % binascii.hexlify(t)
2272 2266 )
2273 2267
2274 2268 return compressor.decompress(data)
2275 2269
2276 2270 def _addrevision(
2277 2271 self,
2278 2272 node,
2279 2273 rawtext,
2280 2274 transaction,
2281 2275 link,
2282 2276 p1,
2283 2277 p2,
2284 2278 flags,
2285 2279 cachedelta,
2286 2280 ifh,
2287 2281 dfh,
2288 2282 alwayscache=False,
2289 2283 deltacomputer=None,
2290 2284 sidedata=None,
2291 2285 ):
2292 2286 """internal function to add revisions to the log
2293 2287
2294 2288 see addrevision for argument descriptions.
2295 2289
2296 2290 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2297 2291
2298 2292 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2299 2293 be used.
2300 2294
2301 2295 invariants:
2302 2296 - rawtext is optional (can be None); if not set, cachedelta must be set.
2303 2297 if both are set, they must correspond to each other.
2304 2298 """
2305 2299 if node == nullid:
2306 2300 raise error.RevlogError(
2307 2301 _(b"%s: attempt to add null revision") % self.indexfile
2308 2302 )
2309 2303 if node == wdirid or node in wdirfilenodeids:
2310 2304 raise error.RevlogError(
2311 2305 _(b"%s: attempt to add wdir revision") % self.indexfile
2312 2306 )
2313 2307
2314 2308 if self._inline:
2315 2309 fh = ifh
2316 2310 else:
2317 2311 fh = dfh
2318 2312
2319 2313 btext = [rawtext]
2320 2314
2321 2315 curr = len(self)
2322 2316 prev = curr - 1
2323 2317
2324 2318 offset = self._get_data_offset(prev)
2325 2319
2326 2320 if self._concurrencychecker:
2327 2321 if self._inline:
2328 2322 # offset is "as if" it were in the .d file, so we need to add on
2329 2323 # the size of the entry metadata.
2330 2324 self._concurrencychecker(
2331 2325 ifh, self.indexfile, offset + curr * self.index.entry_size
2332 2326 )
2333 2327 else:
2334 2328 # Entries in the .i are a consistent size.
2335 2329 self._concurrencychecker(
2336 2330 ifh, self.indexfile, curr * self.index.entry_size
2337 2331 )
2338 2332 self._concurrencychecker(dfh, self.datafile, offset)
2339 2333
2340 2334 p1r, p2r = self.rev(p1), self.rev(p2)
2341 2335
2342 2336 # full versions are inserted when the needed deltas
2343 2337 # become comparable to the uncompressed text
2344 2338 if rawtext is None:
2345 2339 # need rawtext size, before changed by flag processors, which is
2346 2340 # the non-raw size. use revlog explicitly to avoid filelog's extra
2347 2341 # logic that might remove metadata size.
2348 2342 textlen = mdiff.patchedsize(
2349 2343 revlog.size(self, cachedelta[0]), cachedelta[1]
2350 2344 )
2351 2345 else:
2352 2346 textlen = len(rawtext)
2353 2347
2354 2348 if deltacomputer is None:
2355 2349 deltacomputer = deltautil.deltacomputer(self)
2356 2350
2357 2351 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2358 2352
2359 2353 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2360 2354
2361 2355 if sidedata:
2362 2356 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2363 2357 sidedata_offset = offset + deltainfo.deltalen
2364 2358 else:
2365 2359 serialized_sidedata = b""
2366 2360 # Don't store the offset if the sidedata is empty, that way
2367 2361 # we can easily detect empty sidedata and they will be no different
2368 2362 # than ones we manually add.
2369 2363 sidedata_offset = 0
2370 2364
2371 2365 e = (
2372 2366 offset_type(offset, flags),
2373 2367 deltainfo.deltalen,
2374 2368 textlen,
2375 2369 deltainfo.base,
2376 2370 link,
2377 2371 p1r,
2378 2372 p2r,
2379 2373 node,
2380 2374 sidedata_offset,
2381 2375 len(serialized_sidedata),
2382 2376 )
2383 2377
2384 2378 if self.version & 0xFFFF != REVLOGV2:
2385 2379 e = e[:8]
2386 2380
2387 2381 self.index.append(e)
2388 2382 entry = self._io.packentry(e, self.node, self.version, curr)
2389 2383 self._writeentry(
2390 2384 transaction,
2391 2385 ifh,
2392 2386 dfh,
2393 2387 entry,
2394 2388 deltainfo.data,
2395 2389 link,
2396 2390 offset,
2397 2391 serialized_sidedata,
2398 2392 )
2399 2393
2400 2394 rawtext = btext[0]
2401 2395
2402 2396 if alwayscache and rawtext is None:
2403 2397 rawtext = deltacomputer.buildtext(revinfo, fh)
2404 2398
2405 2399 if type(rawtext) == bytes: # only accept immutable objects
2406 2400 self._revisioncache = (node, curr, rawtext)
2407 2401 self._chainbasecache[curr] = deltainfo.chainbase
2408 2402 return curr
2409 2403
2410 2404 def _get_data_offset(self, prev):
2411 2405 """Returns the current offset in the (in-transaction) data file.
2412 2406 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2413 2407 file to store that information: since sidedata can be rewritten to the
2414 2408 end of the data file within a transaction, you can have cases where, for
2415 2409 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2416 2410 to `n - 1`'s sidedata being written after `n`'s data.
2417 2411
2418 2412 TODO cache this in a docket file before getting out of experimental."""
2419 2413 if self.version & 0xFFFF != REVLOGV2:
2420 2414 return self.end(prev)
2421 2415
2422 2416 offset = 0
2423 2417 for rev, entry in enumerate(self.index):
2424 2418 sidedata_end = entry[8] + entry[9]
2425 2419 # Sidedata for a previous rev has potentially been written after
2426 2420 # this rev's end, so take the max.
2427 2421 offset = max(self.end(rev), offset, sidedata_end)
2428 2422 return offset
2429 2423
2430 2424 def _writeentry(
2431 2425 self, transaction, ifh, dfh, entry, data, link, offset, sidedata
2432 2426 ):
2433 2427 # Files opened in a+ mode have inconsistent behavior on various
2434 2428 # platforms. Windows requires that a file positioning call be made
2435 2429 # when the file handle transitions between reads and writes. See
2436 2430 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2437 2431 # platforms, Python or the platform itself can be buggy. Some versions
2438 2432 # of Solaris have been observed to not append at the end of the file
2439 2433 # if the file was seeked to before the end. See issue4943 for more.
2440 2434 #
2441 2435 # We work around this issue by inserting a seek() before writing.
2442 2436 # Note: This is likely not necessary on Python 3. However, because
2443 2437 # the file handle is reused for reads and may be seeked there, we need
2444 2438 # to be careful before changing this.
2445 2439 ifh.seek(0, os.SEEK_END)
2446 2440 if dfh:
2447 2441 dfh.seek(0, os.SEEK_END)
2448 2442
2449 2443 curr = len(self) - 1
2450 2444 if not self._inline:
2451 2445 transaction.add(self.datafile, offset)
2452 2446 transaction.add(self.indexfile, curr * len(entry))
2453 2447 if data[0]:
2454 2448 dfh.write(data[0])
2455 2449 dfh.write(data[1])
2456 2450 if sidedata:
2457 2451 dfh.write(sidedata)
2458 2452 ifh.write(entry)
2459 2453 else:
2460 2454 offset += curr * self.index.entry_size
2461 2455 transaction.add(self.indexfile, offset)
2462 2456 ifh.write(entry)
2463 2457 ifh.write(data[0])
2464 2458 ifh.write(data[1])
2465 2459 if sidedata:
2466 2460 ifh.write(sidedata)
2467 2461 self._enforceinlinesize(transaction, ifh)
2468 2462 nodemaputil.setup_persistent_nodemap(transaction, self)
2469 2463
2470 2464 def addgroup(
2471 2465 self,
2472 2466 deltas,
2473 2467 linkmapper,
2474 2468 transaction,
2475 2469 alwayscache=False,
2476 2470 addrevisioncb=None,
2477 2471 duplicaterevisioncb=None,
2478 2472 ):
2479 2473 """
2480 2474 add a delta group
2481 2475
2482 2476 given a set of deltas, add them to the revision log. the
2483 2477 first delta is against its parent, which should be in our
2484 2478 log, the rest are against the previous delta.
2485 2479
2486 2480 If ``addrevisioncb`` is defined, it will be called with arguments of
2487 2481 this revlog and the node that was added.
2488 2482 """
2489 2483
2490 2484 if self._writinghandles:
2491 2485 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2492 2486
2493 2487 r = len(self)
2494 2488 end = 0
2495 2489 if r:
2496 2490 end = self.end(r - 1)
2497 2491 ifh = self._indexfp(b"a+")
2498 2492 isize = r * self.index.entry_size
2499 2493 if self._inline:
2500 2494 transaction.add(self.indexfile, end + isize)
2501 2495 dfh = None
2502 2496 else:
2503 2497 transaction.add(self.indexfile, isize)
2504 2498 transaction.add(self.datafile, end)
2505 2499 dfh = self._datafp(b"a+")
2506 2500
2507 2501 def flush():
2508 2502 if dfh:
2509 2503 dfh.flush()
2510 2504 ifh.flush()
2511 2505
2512 2506 self._writinghandles = (ifh, dfh)
2513 2507 empty = True
2514 2508
2515 2509 try:
2516 2510 deltacomputer = deltautil.deltacomputer(self)
2517 2511 # loop through our set of deltas
2518 2512 for data in deltas:
2519 2513 node, p1, p2, linknode, deltabase, delta, flags, sidedata = data
2520 2514 link = linkmapper(linknode)
2521 2515 flags = flags or REVIDX_DEFAULT_FLAGS
2522 2516
2523 2517 rev = self.index.get_rev(node)
2524 2518 if rev is not None:
2525 2519 # this can happen if two branches make the same change
2526 2520 self._nodeduplicatecallback(transaction, rev)
2527 2521 if duplicaterevisioncb:
2528 2522 duplicaterevisioncb(self, rev)
2529 2523 empty = False
2530 2524 continue
2531 2525
2532 2526 for p in (p1, p2):
2533 2527 if not self.index.has_node(p):
2534 2528 raise error.LookupError(
2535 2529 p, self.indexfile, _(b'unknown parent')
2536 2530 )
2537 2531
2538 2532 if not self.index.has_node(deltabase):
2539 2533 raise error.LookupError(
2540 2534 deltabase, self.indexfile, _(b'unknown delta base')
2541 2535 )
2542 2536
2543 2537 baserev = self.rev(deltabase)
2544 2538
2545 2539 if baserev != nullrev and self.iscensored(baserev):
2546 2540 # if base is censored, delta must be full replacement in a
2547 2541 # single patch operation
2548 2542 hlen = struct.calcsize(b">lll")
2549 2543 oldlen = self.rawsize(baserev)
2550 2544 newlen = len(delta) - hlen
2551 2545 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2552 2546 raise error.CensoredBaseError(
2553 2547 self.indexfile, self.node(baserev)
2554 2548 )
2555 2549
2556 2550 if not flags and self._peek_iscensored(baserev, delta, flush):
2557 2551 flags |= REVIDX_ISCENSORED
2558 2552
2559 2553 # We assume consumers of addrevisioncb will want to retrieve
2560 2554 # the added revision, which will require a call to
2561 2555 # revision(). revision() will fast path if there is a cache
2562 2556 # hit. So, we tell _addrevision() to always cache in this case.
2563 2557 # We're only using addgroup() in the context of changegroup
2564 2558 # generation so the revision data can always be handled as raw
2565 2559 # by the flagprocessor.
2566 2560 rev = self._addrevision(
2567 2561 node,
2568 2562 None,
2569 2563 transaction,
2570 2564 link,
2571 2565 p1,
2572 2566 p2,
2573 2567 flags,
2574 2568 (baserev, delta),
2575 2569 ifh,
2576 2570 dfh,
2577 2571 alwayscache=alwayscache,
2578 2572 deltacomputer=deltacomputer,
2579 2573 sidedata=sidedata,
2580 2574 )
2581 2575
2582 2576 if addrevisioncb:
2583 2577 addrevisioncb(self, rev)
2584 2578 empty = False
2585 2579
2586 2580 if not dfh and not self._inline:
2587 2581 # addrevision switched from inline to conventional
2588 2582 # reopen the index
2589 2583 ifh.close()
2590 2584 dfh = self._datafp(b"a+")
2591 2585 ifh = self._indexfp(b"a+")
2592 2586 self._writinghandles = (ifh, dfh)
2593 2587 finally:
2594 2588 self._writinghandles = None
2595 2589
2596 2590 if dfh:
2597 2591 dfh.close()
2598 2592 ifh.close()
2599 2593 return not empty
2600 2594
2601 2595 def iscensored(self, rev):
2602 2596 """Check if a file revision is censored."""
2603 2597 if not self._censorable:
2604 2598 return False
2605 2599
2606 2600 return self.flags(rev) & REVIDX_ISCENSORED
2607 2601
2608 2602 def _peek_iscensored(self, baserev, delta, flush):
2609 2603 """Quickly check if a delta produces a censored revision."""
2610 2604 if not self._censorable:
2611 2605 return False
2612 2606
2613 2607 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2614 2608
2615 2609 def getstrippoint(self, minlink):
2616 2610 """find the minimum rev that must be stripped to strip the linkrev
2617 2611
2618 2612 Returns a tuple containing the minimum rev and a set of all revs that
2619 2613 have linkrevs that will be broken by this strip.
2620 2614 """
2621 2615 return storageutil.resolvestripinfo(
2622 2616 minlink,
2623 2617 len(self) - 1,
2624 2618 self.headrevs(),
2625 2619 self.linkrev,
2626 2620 self.parentrevs,
2627 2621 )
2628 2622
2629 2623 def strip(self, minlink, transaction):
2630 2624 """truncate the revlog on the first revision with a linkrev >= minlink
2631 2625
2632 2626 This function is called when we're stripping revision minlink and
2633 2627 its descendants from the repository.
2634 2628
2635 2629 We have to remove all revisions with linkrev >= minlink, because
2636 2630 the equivalent changelog revisions will be renumbered after the
2637 2631 strip.
2638 2632
2639 2633 So we truncate the revlog on the first of these revisions, and
2640 2634 trust that the caller has saved the revisions that shouldn't be
2641 2635 removed and that it'll re-add them after this truncation.
2642 2636 """
2643 2637 if len(self) == 0:
2644 2638 return
2645 2639
2646 2640 rev, _ = self.getstrippoint(minlink)
2647 2641 if rev == len(self):
2648 2642 return
2649 2643
2650 2644 # first truncate the files on disk
2651 2645 end = self.start(rev)
2652 2646 if not self._inline:
2653 2647 transaction.add(self.datafile, end)
2654 2648 end = rev * self.index.entry_size
2655 2649 else:
2656 2650 end += rev * self.index.entry_size
2657 2651
2658 2652 transaction.add(self.indexfile, end)
2659 2653
2660 2654 # then reset internal state in memory to forget those revisions
2661 2655 self._revisioncache = None
2662 2656 self._chaininfocache = util.lrucachedict(500)
2663 2657 self._chunkclear()
2664 2658
2665 2659 del self.index[rev:-1]
2666 2660
2667 2661 def checksize(self):
2668 2662 """Check size of index and data files
2669 2663
2670 2664 return a (dd, di) tuple.
2671 2665 - dd: extra bytes for the "data" file
2672 2666 - di: extra bytes for the "index" file
2673 2667
2674 2668 A healthy revlog will return (0, 0).
2675 2669 """
2676 2670 expected = 0
2677 2671 if len(self):
2678 2672 expected = max(0, self.end(len(self) - 1))
2679 2673
2680 2674 try:
2681 2675 with self._datafp() as f:
2682 2676 f.seek(0, io.SEEK_END)
2683 2677 actual = f.tell()
2684 2678 dd = actual - expected
2685 2679 except IOError as inst:
2686 2680 if inst.errno != errno.ENOENT:
2687 2681 raise
2688 2682 dd = 0
2689 2683
2690 2684 try:
2691 2685 f = self.opener(self.indexfile)
2692 2686 f.seek(0, io.SEEK_END)
2693 2687 actual = f.tell()
2694 2688 f.close()
2695 2689 s = self.index.entry_size
2696 2690 i = max(0, actual // s)
2697 2691 di = actual - (i * s)
2698 2692 if self._inline:
2699 2693 databytes = 0
2700 2694 for r in self:
2701 2695 databytes += max(0, self.length(r))
2702 2696 dd = 0
2703 2697 di = actual - len(self) * s - databytes
2704 2698 except IOError as inst:
2705 2699 if inst.errno != errno.ENOENT:
2706 2700 raise
2707 2701 di = 0
2708 2702
2709 2703 return (dd, di)
2710 2704
2711 2705 def files(self):
2712 2706 res = [self.indexfile]
2713 2707 if not self._inline:
2714 2708 res.append(self.datafile)
2715 2709 return res
2716 2710
2717 2711 def emitrevisions(
2718 2712 self,
2719 2713 nodes,
2720 2714 nodesorder=None,
2721 2715 revisiondata=False,
2722 2716 assumehaveparentrevisions=False,
2723 2717 deltamode=repository.CG_DELTAMODE_STD,
2724 2718 sidedata_helpers=None,
2725 2719 ):
2726 2720 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2727 2721 raise error.ProgrammingError(
2728 2722 b'unhandled value for nodesorder: %s' % nodesorder
2729 2723 )
2730 2724
2731 2725 if nodesorder is None and not self._generaldelta:
2732 2726 nodesorder = b'storage'
2733 2727
2734 2728 if (
2735 2729 not self._storedeltachains
2736 2730 and deltamode != repository.CG_DELTAMODE_PREV
2737 2731 ):
2738 2732 deltamode = repository.CG_DELTAMODE_FULL
2739 2733
2740 2734 return storageutil.emitrevisions(
2741 2735 self,
2742 2736 nodes,
2743 2737 nodesorder,
2744 2738 revlogrevisiondelta,
2745 2739 deltaparentfn=self.deltaparent,
2746 2740 candeltafn=self.candelta,
2747 2741 rawsizefn=self.rawsize,
2748 2742 revdifffn=self.revdiff,
2749 2743 flagsfn=self.flags,
2750 2744 deltamode=deltamode,
2751 2745 revisiondata=revisiondata,
2752 2746 assumehaveparentrevisions=assumehaveparentrevisions,
2753 2747 sidedata_helpers=sidedata_helpers,
2754 2748 )
2755 2749
2756 2750 DELTAREUSEALWAYS = b'always'
2757 2751 DELTAREUSESAMEREVS = b'samerevs'
2758 2752 DELTAREUSENEVER = b'never'
2759 2753
2760 2754 DELTAREUSEFULLADD = b'fulladd'
2761 2755
2762 2756 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2763 2757
2764 2758 def clone(
2765 2759 self,
2766 2760 tr,
2767 2761 destrevlog,
2768 2762 addrevisioncb=None,
2769 2763 deltareuse=DELTAREUSESAMEREVS,
2770 2764 forcedeltabothparents=None,
2771 2765 sidedatacompanion=None,
2772 2766 ):
2773 2767 """Copy this revlog to another, possibly with format changes.
2774 2768
2775 2769 The destination revlog will contain the same revisions and nodes.
2776 2770 However, it may not be bit-for-bit identical due to e.g. delta encoding
2777 2771 differences.
2778 2772
2779 2773 The ``deltareuse`` argument control how deltas from the existing revlog
2780 2774 are preserved in the destination revlog. The argument can have the
2781 2775 following values:
2782 2776
2783 2777 DELTAREUSEALWAYS
2784 2778 Deltas will always be reused (if possible), even if the destination
2785 2779 revlog would not select the same revisions for the delta. This is the
2786 2780 fastest mode of operation.
2787 2781 DELTAREUSESAMEREVS
2788 2782 Deltas will be reused if the destination revlog would pick the same
2789 2783 revisions for the delta. This mode strikes a balance between speed
2790 2784 and optimization.
2791 2785 DELTAREUSENEVER
2792 2786 Deltas will never be reused. This is the slowest mode of execution.
2793 2787 This mode can be used to recompute deltas (e.g. if the diff/delta
2794 2788 algorithm changes).
2795 2789 DELTAREUSEFULLADD
2796 2790 Revision will be re-added as if their were new content. This is
2797 2791 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2798 2792 eg: large file detection and handling.
2799 2793
2800 2794 Delta computation can be slow, so the choice of delta reuse policy can
2801 2795 significantly affect run time.
2802 2796
2803 2797 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2804 2798 two extremes. Deltas will be reused if they are appropriate. But if the
2805 2799 delta could choose a better revision, it will do so. This means if you
2806 2800 are converting a non-generaldelta revlog to a generaldelta revlog,
2807 2801 deltas will be recomputed if the delta's parent isn't a parent of the
2808 2802 revision.
2809 2803
2810 2804 In addition to the delta policy, the ``forcedeltabothparents``
2811 2805 argument controls whether to force compute deltas against both parents
2812 2806 for merges. By default, the current default is used.
2813 2807
2814 2808 If not None, the `sidedatacompanion` is callable that accept two
2815 2809 arguments:
2816 2810
2817 2811 (srcrevlog, rev)
2818 2812
2819 2813 and return a quintet that control changes to sidedata content from the
2820 2814 old revision to the new clone result:
2821 2815
2822 2816 (dropall, filterout, update, new_flags, dropped_flags)
2823 2817
2824 2818 * if `dropall` is True, all sidedata should be dropped
2825 2819 * `filterout` is a set of sidedata keys that should be dropped
2826 2820 * `update` is a mapping of additionnal/new key -> value
2827 2821 * new_flags is a bitfields of new flags that the revision should get
2828 2822 * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
2829 2823 """
2830 2824 if deltareuse not in self.DELTAREUSEALL:
2831 2825 raise ValueError(
2832 2826 _(b'value for deltareuse invalid: %s') % deltareuse
2833 2827 )
2834 2828
2835 2829 if len(destrevlog):
2836 2830 raise ValueError(_(b'destination revlog is not empty'))
2837 2831
2838 2832 if getattr(self, 'filteredrevs', None):
2839 2833 raise ValueError(_(b'source revlog has filtered revisions'))
2840 2834 if getattr(destrevlog, 'filteredrevs', None):
2841 2835 raise ValueError(_(b'destination revlog has filtered revisions'))
2842 2836
2843 2837 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2844 2838 # if possible.
2845 2839 oldlazydelta = destrevlog._lazydelta
2846 2840 oldlazydeltabase = destrevlog._lazydeltabase
2847 2841 oldamd = destrevlog._deltabothparents
2848 2842
2849 2843 try:
2850 2844 if deltareuse == self.DELTAREUSEALWAYS:
2851 2845 destrevlog._lazydeltabase = True
2852 2846 destrevlog._lazydelta = True
2853 2847 elif deltareuse == self.DELTAREUSESAMEREVS:
2854 2848 destrevlog._lazydeltabase = False
2855 2849 destrevlog._lazydelta = True
2856 2850 elif deltareuse == self.DELTAREUSENEVER:
2857 2851 destrevlog._lazydeltabase = False
2858 2852 destrevlog._lazydelta = False
2859 2853
2860 2854 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2861 2855
2862 2856 self._clone(
2863 2857 tr,
2864 2858 destrevlog,
2865 2859 addrevisioncb,
2866 2860 deltareuse,
2867 2861 forcedeltabothparents,
2868 2862 sidedatacompanion,
2869 2863 )
2870 2864
2871 2865 finally:
2872 2866 destrevlog._lazydelta = oldlazydelta
2873 2867 destrevlog._lazydeltabase = oldlazydeltabase
2874 2868 destrevlog._deltabothparents = oldamd
2875 2869
2876 2870 def _clone(
2877 2871 self,
2878 2872 tr,
2879 2873 destrevlog,
2880 2874 addrevisioncb,
2881 2875 deltareuse,
2882 2876 forcedeltabothparents,
2883 2877 sidedatacompanion,
2884 2878 ):
2885 2879 """perform the core duty of `revlog.clone` after parameter processing"""
2886 2880 deltacomputer = deltautil.deltacomputer(destrevlog)
2887 2881 index = self.index
2888 2882 for rev in self:
2889 2883 entry = index[rev]
2890 2884
2891 2885 # Some classes override linkrev to take filtered revs into
2892 2886 # account. Use raw entry from index.
2893 2887 flags = entry[0] & 0xFFFF
2894 2888 linkrev = entry[4]
2895 2889 p1 = index[entry[5]][7]
2896 2890 p2 = index[entry[6]][7]
2897 2891 node = entry[7]
2898 2892
2899 2893 sidedataactions = (False, [], {}, 0, 0)
2900 2894 if sidedatacompanion is not None:
2901 2895 sidedataactions = sidedatacompanion(self, rev)
2902 2896
2903 2897 # (Possibly) reuse the delta from the revlog if allowed and
2904 2898 # the revlog chunk is a delta.
2905 2899 cachedelta = None
2906 2900 rawtext = None
2907 2901 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2908 2902 dropall = sidedataactions[0]
2909 2903 filterout = sidedataactions[1]
2910 2904 update = sidedataactions[2]
2911 2905 new_flags = sidedataactions[3]
2912 2906 dropped_flags = sidedataactions[4]
2913 2907 text, sidedata = self._revisiondata(rev)
2914 2908 if dropall:
2915 2909 sidedata = {}
2916 2910 for key in filterout:
2917 2911 sidedata.pop(key, None)
2918 2912 sidedata.update(update)
2919 2913 if not sidedata:
2920 2914 sidedata = None
2921 2915
2922 2916 flags |= new_flags
2923 2917 flags &= ~dropped_flags
2924 2918
2925 2919 destrevlog.addrevision(
2926 2920 text,
2927 2921 tr,
2928 2922 linkrev,
2929 2923 p1,
2930 2924 p2,
2931 2925 cachedelta=cachedelta,
2932 2926 node=node,
2933 2927 flags=flags,
2934 2928 deltacomputer=deltacomputer,
2935 2929 sidedata=sidedata,
2936 2930 )
2937 2931 else:
2938 2932 if destrevlog._lazydelta:
2939 2933 dp = self.deltaparent(rev)
2940 2934 if dp != nullrev:
2941 2935 cachedelta = (dp, bytes(self._chunk(rev)))
2942 2936
2943 2937 if not cachedelta:
2944 2938 rawtext = self.rawdata(rev)
2945 2939
2946 2940 ifh = destrevlog.opener(
2947 2941 destrevlog.indexfile, b'a+', checkambig=False
2948 2942 )
2949 2943 dfh = None
2950 2944 if not destrevlog._inline:
2951 2945 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2952 2946 try:
2953 2947 destrevlog._addrevision(
2954 2948 node,
2955 2949 rawtext,
2956 2950 tr,
2957 2951 linkrev,
2958 2952 p1,
2959 2953 p2,
2960 2954 flags,
2961 2955 cachedelta,
2962 2956 ifh,
2963 2957 dfh,
2964 2958 deltacomputer=deltacomputer,
2965 2959 )
2966 2960 finally:
2967 2961 if dfh:
2968 2962 dfh.close()
2969 2963 ifh.close()
2970 2964
2971 2965 if addrevisioncb:
2972 2966 addrevisioncb(self, rev, node)
2973 2967
2974 2968 def censorrevision(self, tr, censornode, tombstone=b''):
2975 2969 if (self.version & 0xFFFF) == REVLOGV0:
2976 2970 raise error.RevlogError(
2977 2971 _(b'cannot censor with version %d revlogs') % self.version
2978 2972 )
2979 2973
2980 2974 censorrev = self.rev(censornode)
2981 2975 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2982 2976
2983 2977 if len(tombstone) > self.rawsize(censorrev):
2984 2978 raise error.Abort(
2985 2979 _(b'censor tombstone must be no longer than censored data')
2986 2980 )
2987 2981
2988 2982 # Rewriting the revlog in place is hard. Our strategy for censoring is
2989 2983 # to create a new revlog, copy all revisions to it, then replace the
2990 2984 # revlogs on transaction close.
2991 2985
2992 2986 newindexfile = self.indexfile + b'.tmpcensored'
2993 2987 newdatafile = self.datafile + b'.tmpcensored'
2994 2988
2995 2989 # This is a bit dangerous. We could easily have a mismatch of state.
2996 2990 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2997 2991 newrl.version = self.version
2998 2992 newrl._generaldelta = self._generaldelta
2999 2993 newrl._io = self._io
3000 2994
3001 2995 for rev in self.revs():
3002 2996 node = self.node(rev)
3003 2997 p1, p2 = self.parents(node)
3004 2998
3005 2999 if rev == censorrev:
3006 3000 newrl.addrawrevision(
3007 3001 tombstone,
3008 3002 tr,
3009 3003 self.linkrev(censorrev),
3010 3004 p1,
3011 3005 p2,
3012 3006 censornode,
3013 3007 REVIDX_ISCENSORED,
3014 3008 )
3015 3009
3016 3010 if newrl.deltaparent(rev) != nullrev:
3017 3011 raise error.Abort(
3018 3012 _(
3019 3013 b'censored revision stored as delta; '
3020 3014 b'cannot censor'
3021 3015 ),
3022 3016 hint=_(
3023 3017 b'censoring of revlogs is not '
3024 3018 b'fully implemented; please report '
3025 3019 b'this bug'
3026 3020 ),
3027 3021 )
3028 3022 continue
3029 3023
3030 3024 if self.iscensored(rev):
3031 3025 if self.deltaparent(rev) != nullrev:
3032 3026 raise error.Abort(
3033 3027 _(
3034 3028 b'cannot censor due to censored '
3035 3029 b'revision having delta stored'
3036 3030 )
3037 3031 )
3038 3032 rawtext = self._chunk(rev)
3039 3033 else:
3040 3034 rawtext = self.rawdata(rev)
3041 3035
3042 3036 newrl.addrawrevision(
3043 3037 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3044 3038 )
3045 3039
3046 3040 tr.addbackup(self.indexfile, location=b'store')
3047 3041 if not self._inline:
3048 3042 tr.addbackup(self.datafile, location=b'store')
3049 3043
3050 3044 self.opener.rename(newrl.indexfile, self.indexfile)
3051 3045 if not self._inline:
3052 3046 self.opener.rename(newrl.datafile, self.datafile)
3053 3047
3054 3048 self.clearcaches()
3055 3049 self._loadindex()
3056 3050
3057 3051 def verifyintegrity(self, state):
3058 3052 """Verifies the integrity of the revlog.
3059 3053
3060 3054 Yields ``revlogproblem`` instances describing problems that are
3061 3055 found.
3062 3056 """
3063 3057 dd, di = self.checksize()
3064 3058 if dd:
3065 3059 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3066 3060 if di:
3067 3061 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3068 3062
3069 3063 version = self.version & 0xFFFF
3070 3064
3071 3065 # The verifier tells us what version revlog we should be.
3072 3066 if version != state[b'expectedversion']:
3073 3067 yield revlogproblem(
3074 3068 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3075 3069 % (self.indexfile, version, state[b'expectedversion'])
3076 3070 )
3077 3071
3078 3072 state[b'skipread'] = set()
3079 3073 state[b'safe_renamed'] = set()
3080 3074
3081 3075 for rev in self:
3082 3076 node = self.node(rev)
3083 3077
3084 3078 # Verify contents. 4 cases to care about:
3085 3079 #
3086 3080 # common: the most common case
3087 3081 # rename: with a rename
3088 3082 # meta: file content starts with b'\1\n', the metadata
3089 3083 # header defined in filelog.py, but without a rename
3090 3084 # ext: content stored externally
3091 3085 #
3092 3086 # More formally, their differences are shown below:
3093 3087 #
3094 3088 # | common | rename | meta | ext
3095 3089 # -------------------------------------------------------
3096 3090 # flags() | 0 | 0 | 0 | not 0
3097 3091 # renamed() | False | True | False | ?
3098 3092 # rawtext[0:2]=='\1\n'| False | True | True | ?
3099 3093 #
3100 3094 # "rawtext" means the raw text stored in revlog data, which
3101 3095 # could be retrieved by "rawdata(rev)". "text"
3102 3096 # mentioned below is "revision(rev)".
3103 3097 #
3104 3098 # There are 3 different lengths stored physically:
3105 3099 # 1. L1: rawsize, stored in revlog index
3106 3100 # 2. L2: len(rawtext), stored in revlog data
3107 3101 # 3. L3: len(text), stored in revlog data if flags==0, or
3108 3102 # possibly somewhere else if flags!=0
3109 3103 #
3110 3104 # L1 should be equal to L2. L3 could be different from them.
3111 3105 # "text" may or may not affect commit hash depending on flag
3112 3106 # processors (see flagutil.addflagprocessor).
3113 3107 #
3114 3108 # | common | rename | meta | ext
3115 3109 # -------------------------------------------------
3116 3110 # rawsize() | L1 | L1 | L1 | L1
3117 3111 # size() | L1 | L2-LM | L1(*) | L1 (?)
3118 3112 # len(rawtext) | L2 | L2 | L2 | L2
3119 3113 # len(text) | L2 | L2 | L2 | L3
3120 3114 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3121 3115 #
3122 3116 # LM: length of metadata, depending on rawtext
3123 3117 # (*): not ideal, see comment in filelog.size
3124 3118 # (?): could be "- len(meta)" if the resolved content has
3125 3119 # rename metadata
3126 3120 #
3127 3121 # Checks needed to be done:
3128 3122 # 1. length check: L1 == L2, in all cases.
3129 3123 # 2. hash check: depending on flag processor, we may need to
3130 3124 # use either "text" (external), or "rawtext" (in revlog).
3131 3125
3132 3126 try:
3133 3127 skipflags = state.get(b'skipflags', 0)
3134 3128 if skipflags:
3135 3129 skipflags &= self.flags(rev)
3136 3130
3137 3131 _verify_revision(self, skipflags, state, node)
3138 3132
3139 3133 l1 = self.rawsize(rev)
3140 3134 l2 = len(self.rawdata(node))
3141 3135
3142 3136 if l1 != l2:
3143 3137 yield revlogproblem(
3144 3138 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3145 3139 node=node,
3146 3140 )
3147 3141
3148 3142 except error.CensoredNodeError:
3149 3143 if state[b'erroroncensored']:
3150 3144 yield revlogproblem(
3151 3145 error=_(b'censored file data'), node=node
3152 3146 )
3153 3147 state[b'skipread'].add(node)
3154 3148 except Exception as e:
3155 3149 yield revlogproblem(
3156 3150 error=_(b'unpacking %s: %s')
3157 3151 % (short(node), stringutil.forcebytestr(e)),
3158 3152 node=node,
3159 3153 )
3160 3154 state[b'skipread'].add(node)
3161 3155
3162 3156 def storageinfo(
3163 3157 self,
3164 3158 exclusivefiles=False,
3165 3159 sharedfiles=False,
3166 3160 revisionscount=False,
3167 3161 trackedsize=False,
3168 3162 storedsize=False,
3169 3163 ):
3170 3164 d = {}
3171 3165
3172 3166 if exclusivefiles:
3173 3167 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3174 3168 if not self._inline:
3175 3169 d[b'exclusivefiles'].append((self.opener, self.datafile))
3176 3170
3177 3171 if sharedfiles:
3178 3172 d[b'sharedfiles'] = []
3179 3173
3180 3174 if revisionscount:
3181 3175 d[b'revisionscount'] = len(self)
3182 3176
3183 3177 if trackedsize:
3184 3178 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3185 3179
3186 3180 if storedsize:
3187 3181 d[b'storedsize'] = sum(
3188 3182 self.opener.stat(path).st_size for path in self.files()
3189 3183 )
3190 3184
3191 3185 return d
3192 3186
3193 3187 def rewrite_sidedata(self, helpers, startrev, endrev):
3194 3188 if self.version & 0xFFFF != REVLOGV2:
3195 3189 return
3196 3190 # inline are not yet supported because they suffer from an issue when
3197 3191 # rewriting them (since it's not an append-only operation).
3198 3192 # See issue6485.
3199 3193 assert not self._inline
3200 3194 if not helpers[1] and not helpers[2]:
3201 3195 # Nothing to generate or remove
3202 3196 return
3203 3197
3204 3198 new_entries = []
3205 3199 # append the new sidedata
3206 3200 with self._datafp(b'a+') as fp:
3207 3201 # Maybe this bug still exists, see revlog._writeentry
3208 3202 fp.seek(0, os.SEEK_END)
3209 3203 current_offset = fp.tell()
3210 3204 for rev in range(startrev, endrev + 1):
3211 3205 entry = self.index[rev]
3212 3206 new_sidedata = storageutil.run_sidedata_helpers(
3213 3207 store=self,
3214 3208 sidedata_helpers=helpers,
3215 3209 sidedata={},
3216 3210 rev=rev,
3217 3211 )
3218 3212
3219 3213 serialized_sidedata = sidedatautil.serialize_sidedata(
3220 3214 new_sidedata
3221 3215 )
3222 3216 if entry[8] != 0 or entry[9] != 0:
3223 3217 # rewriting entries that already have sidedata is not
3224 3218 # supported yet, because it introduces garbage data in the
3225 3219 # revlog.
3226 3220 msg = b"Rewriting existing sidedata is not supported yet"
3227 3221 raise error.Abort(msg)
3228 3222 entry = entry[:8]
3229 3223 entry += (current_offset, len(serialized_sidedata))
3230 3224
3231 3225 fp.write(serialized_sidedata)
3232 3226 new_entries.append(entry)
3233 3227 current_offset += len(serialized_sidedata)
3234 3228
3235 3229 # rewrite the new index entries
3236 3230 with self._indexfp(b'w+') as fp:
3237 3231 fp.seek(startrev * self.index.entry_size)
3238 3232 for i, entry in enumerate(new_entries):
3239 3233 rev = startrev + i
3240 3234 self.index.replace_sidedata_info(rev, entry[8], entry[9])
3241 3235 packed = self._io.packentry(entry, self.node, self.version, rev)
3242 3236 fp.write(packed)
@@ -1,171 +1,168 b''
1 1 ===============================================================
2 2 Test non-regression on the corruption associated with issue6528
3 3 ===============================================================
4 4
5 5 Setup
6 6 -----
7 7
8 8 $ hg init base-repo
9 9 $ cd base-repo
10 10
11 11 $ cat <<EOF > a.txt
12 12 > 1
13 13 > 2
14 14 > 3
15 15 > 4
16 16 > 5
17 17 > 6
18 18 > EOF
19 19
20 20 $ hg add a.txt
21 21 $ hg commit -m 'c_base_c - create a.txt'
22 22
23 23 Modify a.txt
24 24
25 25 $ sed -e 's/1/foo/' a.txt > a.tmp; mv a.tmp a.txt
26 26 $ hg commit -m 'c_modify_c - modify a.txt'
27 27
28 28 Modify and rename a.txt to b.txt
29 29
30 30 $ hg up -r "desc('c_base_c')"
31 31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 32 $ sed -e 's/6/bar/' a.txt > a.tmp; mv a.tmp a.txt
33 33 $ hg mv a.txt b.txt
34 34 $ hg commit -m 'c_rename_c - rename and modify a.txt to b.txt'
35 35 created new head
36 36
37 37 Merge each branch
38 38
39 39 $ hg merge -r "desc('c_modify_c')"
40 40 merging b.txt and a.txt to b.txt
41 41 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
42 42 (branch merge, don't forget to commit)
43 43 $ hg commit -m 'c_merge_c: commit merge'
44 44
45 45 $ hg debugrevlogindex b.txt
46 46 rev linkrev nodeid p1 p2
47 47 0 2 05b806ebe5ea 000000000000 000000000000
48 48 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
49 49
50 50 Check commit Graph
51 51
52 52 $ hg log -G
53 53 @ changeset: 3:a1cc2bdca0aa
54 54 |\ tag: tip
55 55 | | parent: 2:615c6ccefd15
56 56 | | parent: 1:373d507f4667
57 57 | | user: test
58 58 | | date: Thu Jan 01 00:00:00 1970 +0000
59 59 | | summary: c_merge_c: commit merge
60 60 | |
61 61 | o changeset: 2:615c6ccefd15
62 62 | | parent: 0:f5a5a568022f
63 63 | | user: test
64 64 | | date: Thu Jan 01 00:00:00 1970 +0000
65 65 | | summary: c_rename_c - rename and modify a.txt to b.txt
66 66 | |
67 67 o | changeset: 1:373d507f4667
68 68 |/ user: test
69 69 | date: Thu Jan 01 00:00:00 1970 +0000
70 70 | summary: c_modify_c - modify a.txt
71 71 |
72 72 o changeset: 0:f5a5a568022f
73 73 user: test
74 74 date: Thu Jan 01 00:00:00 1970 +0000
75 75 summary: c_base_c - create a.txt
76 76
77 77
78 78 $ hg cat -r . b.txt
79 79 foo
80 80 2
81 81 3
82 82 4
83 83 5
84 84 bar
85 85 $ cat b.txt
86 86 foo
87 87 2
88 88 3
89 89 4
90 90 5
91 91 bar
92 92 $ cd ..
93 93
94 94
95 95 Check the lack of corruption
96 96 ----------------------------
97 97
98 98 $ hg clone --pull base-repo cloned
99 99 requesting all changes
100 100 adding changesets
101 101 adding manifests
102 102 adding file changes
103 103 added 4 changesets with 4 changes to 2 files
104 104 new changesets f5a5a568022f:a1cc2bdca0aa
105 105 updating to branch default
106 106 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 107 $ cd cloned
108 108 $ hg up -r "desc('c_merge_c')"
109 109 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 110
111 111
112 112 Status is buggy, even with debugrebuilddirstate
113 113
114 114 $ hg cat -r . b.txt
115 115 foo
116 116 2
117 117 3
118 118 4
119 119 5
120 120 bar
121 121 $ cat b.txt
122 122 foo
123 123 2
124 124 3
125 125 4
126 126 5
127 127 bar
128 128 $ hg status
129 M b.txt (known-bad-output !)
130 129 $ hg debugrebuilddirstate
131 130 $ hg status
132 M b.txt (known-bad-output !)
133 131
134 132 the history was altered
135 133
136 134 in theory p1/p2 order does not matter but in practice p1 == nullid is used as a
137 135 marker that some metadata are present and should be fetched.
138 136
139 137 $ hg debugrevlogindex b.txt
140 138 rev linkrev nodeid p1 p2
141 139 0 2 05b806ebe5ea 000000000000 000000000000
142 1 3 a58b36ad6b65 05b806ebe5ea 000000000000 (known-bad-output !)
143 1 3 a58b36ad6b65 000000000000 05b806ebe5ea (missing-correct-output !)
140 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
144 141
145 142 Check commit Graph
146 143
147 144 $ hg log -G
148 145 @ changeset: 3:a1cc2bdca0aa
149 146 |\ tag: tip
150 147 | | parent: 2:615c6ccefd15
151 148 | | parent: 1:373d507f4667
152 149 | | user: test
153 150 | | date: Thu Jan 01 00:00:00 1970 +0000
154 151 | | summary: c_merge_c: commit merge
155 152 | |
156 153 | o changeset: 2:615c6ccefd15
157 154 | | parent: 0:f5a5a568022f
158 155 | | user: test
159 156 | | date: Thu Jan 01 00:00:00 1970 +0000
160 157 | | summary: c_rename_c - rename and modify a.txt to b.txt
161 158 | |
162 159 o | changeset: 1:373d507f4667
163 160 |/ user: test
164 161 | date: Thu Jan 01 00:00:00 1970 +0000
165 162 | summary: c_modify_c - modify a.txt
166 163 |
167 164 o changeset: 0:f5a5a568022f
168 165 user: test
169 166 date: Thu Jan 01 00:00:00 1970 +0000
170 167 summary: c_base_c - create a.txt
171 168
@@ -1,347 +1,347 b''
1 1 #require no-reposimplestore
2 2
3 3 $ . "$TESTDIR/narrow-library.sh"
4 4
5 5 create full repo
6 6
7 7 $ hg init master
8 8 $ cd master
9 9 $ cat >> .hg/hgrc <<EOF
10 10 > [narrow]
11 11 > serveellipses=True
12 12 > EOF
13 13
14 14 $ mkdir inside
15 15 $ echo 1 > inside/f
16 16 $ hg commit -Aqm 'initial inside'
17 17
18 18 $ mkdir outside
19 19 $ echo 1 > outside/f
20 20 $ hg commit -Aqm 'initial outside'
21 21
22 22 $ echo 2a > outside/f
23 23 $ hg commit -Aqm 'outside 2a'
24 24 $ echo 3 > inside/f
25 25 $ hg commit -Aqm 'inside 3'
26 26 $ echo 4a > outside/f
27 27 $ hg commit -Aqm 'outside 4a'
28 28 $ hg update '.~3'
29 29 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 $ echo 2b > outside/f
32 32 $ hg commit -Aqm 'outside 2b'
33 33 $ echo 3 > inside/f
34 34 $ hg commit -Aqm 'inside 3'
35 35 $ echo 4b > outside/f
36 36 $ hg commit -Aqm 'outside 4b'
37 37 $ hg update '.~3'
38 38 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 39
40 40 $ echo 2c > outside/f
41 41 $ hg commit -Aqm 'outside 2c'
42 42 $ echo 3 > inside/f
43 43 $ hg commit -Aqm 'inside 3'
44 44 $ echo 4c > outside/f
45 45 $ hg commit -Aqm 'outside 4c'
46 46 $ hg update '.~3'
47 47 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 48
49 49 $ echo 2d > outside/f
50 50 $ hg commit -Aqm 'outside 2d'
51 51 $ echo 3 > inside/f
52 52 $ hg commit -Aqm 'inside 3'
53 53 $ echo 4d > outside/f
54 54 $ hg commit -Aqm 'outside 4d'
55 55
56 56 $ hg update -r 'desc("outside 4a")'
57 57 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 58 $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
59 59 merging outside/f
60 60 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
61 61 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
62 62 $ echo 5 > outside/f
63 63 $ rm outside/f.orig
64 64 $ hg resolve --mark outside/f
65 65 (no more unresolved files)
66 66 $ hg commit -m 'merge a/b 5'
67 67 $ echo 6 > outside/f
68 68 $ hg commit -Aqm 'outside 6'
69 69
70 70 $ hg merge -r 'desc("outside 4c")' 2>&1 | egrep -v '(warning:|incomplete!)'
71 71 merging outside/f
72 72 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
73 73 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
74 74 $ echo 7 > outside/f
75 75 $ rm outside/f.orig
76 76 $ hg resolve --mark outside/f
77 77 (no more unresolved files)
78 78 $ hg commit -Aqm 'merge a/b/c 7'
79 79 $ echo 8 > outside/f
80 80 $ hg commit -Aqm 'outside 8'
81 81
82 82 $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
83 83 merging outside/f
84 84 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
85 85 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
86 86 $ echo 9 > outside/f
87 87 $ rm outside/f.orig
88 88 $ hg resolve --mark outside/f
89 89 (no more unresolved files)
90 90 $ hg commit -Aqm 'merge a/b/c/d 9'
91 91 $ echo 10 > outside/f
92 92 $ hg commit -Aqm 'outside 10'
93 93
94 94 $ echo 11 > inside/f
95 95 $ hg commit -Aqm 'inside 11'
96 96 $ echo 12 > outside/f
97 97 $ hg commit -Aqm 'outside 12'
98 98
99 99 $ hg log -G -T '{rev} {node|short} {desc}\n'
100 100 @ 21 8d874d57adea outside 12
101 101 |
102 102 o 20 7ef88b4dd4fa inside 11
103 103 |
104 104 o 19 2a20009de83e outside 10
105 105 |
106 106 o 18 3ac1f5779de3 merge a/b/c/d 9
107 107 |\
108 108 | o 17 38a9c2f7e546 outside 8
109 109 | |
110 110 | o 16 094aa62fc898 merge a/b/c 7
111 111 | |\
112 112 | | o 15 f29d083d32e4 outside 6
113 113 | | |
114 114 | | o 14 2dc11382541d merge a/b 5
115 115 | | |\
116 116 o | | | 13 27d07ef97221 outside 4d
117 117 | | | |
118 118 o | | | 12 465567bdfb2d inside 3
119 119 | | | |
120 120 o | | | 11 d1c61993ec83 outside 2d
121 121 | | | |
122 122 | o | | 10 56859a8e33b9 outside 4c
123 123 | | | |
124 124 | o | | 9 bb96a08b062a inside 3
125 125 | | | |
126 126 | o | | 8 b844052e7b3b outside 2c
127 127 |/ / /
128 128 | | o 7 9db2d8fcc2a6 outside 4b
129 129 | | |
130 130 | | o 6 6418167787a6 inside 3
131 131 | | |
132 132 +---o 5 77344f344d83 outside 2b
133 133 | |
134 134 | o 4 9cadde08dc9f outside 4a
135 135 | |
136 136 | o 3 019ef06f125b inside 3
137 137 | |
138 138 | o 2 75e40c075a19 outside 2a
139 139 |/
140 140 o 1 906d6c682641 initial outside
141 141 |
142 142 o 0 9f8e82b51004 initial inside
143 143
144 144
145 145 Now narrow and shallow clone this and get a hopefully correct graph
146 146
147 147 $ cd ..
148 148 $ hg clone --narrow ssh://user@dummy/master narrow --include inside --depth 7
149 149 requesting all changes
150 150 adding changesets
151 151 adding manifests
152 152 adding file changes
153 153 added 8 changesets with 3 changes to 1 files
154 154 new changesets *:* (glob)
155 155 updating to branch default
156 156 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 157 $ cd narrow
158 158
159 159 To make updating the tests easier, we print the emitted nodes
160 160 sorted. This makes it easier to identify when the same node structure
161 161 has been emitted, just in a different order.
162 162
163 163 $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n'
164 164 @ 7 8d874d57adea... outside 12
165 165 |
166 166 o 6 7ef88b4dd4fa inside 11
167 167 |
168 168 o 5 2a20009de83e... outside 10
169 169 |
170 170 o 4 3ac1f5779de3... merge a/b/c/d 9
171 171 |\
172 172 | o 3 465567bdfb2d inside 3
173 173 | |
174 174 | o 2 d1c61993ec83... outside 2d
175 175 |
176 176 o 1 bb96a08b062a inside 3
177 177 |
178 178 o 0 b844052e7b3b... outside 2c
179 179
180 180
181 181 $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
182 ...2a20009de83e 3ac1f5779de3 000000000000 outside 10
182 ...2a20009de83e 000000000000 3ac1f5779de3 outside 10
183 183 ...3ac1f5779de3 bb96a08b062a 465567bdfb2d merge a/b/c/d 9
184 184 ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12
185 185 ...b844052e7b3b 000000000000 000000000000 outside 2c
186 186 ...d1c61993ec83 000000000000 000000000000 outside 2d
187 187 465567bdfb2d d1c61993ec83 000000000000 inside 3
188 188 7ef88b4dd4fa 2a20009de83e 000000000000 inside 11
189 189 bb96a08b062a b844052e7b3b 000000000000 inside 3
190 190
191 191 $ cd ..
192 192
193 193 Incremental test case: show a pull can pull in a conflicted merge even if elided
194 194
195 195 $ hg init pullmaster
196 196 $ cd pullmaster
197 197 $ cat >> .hg/hgrc <<EOF
198 198 > [narrow]
199 199 > serveellipses=True
200 200 > EOF
201 201 $ mkdir inside outside
202 202 $ echo v1 > inside/f
203 203 $ echo v1 > outside/f
204 204 $ hg add inside/f outside/f
205 205 $ hg commit -m init
206 206
207 207 $ for line in a b c d
208 208 > do
209 209 > hg update -r 0
210 210 > echo v2$line > outside/f
211 211 > hg commit -m "outside 2$line"
212 212 > echo v2$line > inside/f
213 213 > hg commit -m "inside 2$line"
214 214 > echo v3$line > outside/f
215 215 > hg commit -m "outside 3$line"
216 216 > echo v4$line > outside/f
217 217 > hg commit -m "outside 4$line"
218 218 > done
219 219 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
220 220 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
221 221 created new head
222 222 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
223 223 created new head
224 224 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
225 225 created new head
226 226
227 227 $ cd ..
228 228 $ hg clone --narrow ssh://user@dummy/pullmaster pullshallow \
229 229 > --include inside --depth 3
230 230 requesting all changes
231 231 adding changesets
232 232 adding manifests
233 233 adding file changes
234 234 added 12 changesets with 5 changes to 1 files (+3 heads)
235 235 new changesets *:* (glob)
236 236 updating to branch default
237 237 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
238 238 $ cd pullshallow
239 239
240 240 $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n'
241 241 @ 11 0ebbd712a0c8... outside 4d
242 242 |
243 243 o 10 0d4c867aeb23 inside 2d
244 244 |
245 245 o 9 e932969c3961... outside 2d
246 246
247 247 o 8 33d530345455... outside 4c
248 248 |
249 249 o 7 0ce6481bfe07 inside 2c
250 250 |
251 251 o 6 caa65c940632... outside 2c
252 252
253 253 o 5 3df233defecc... outside 4b
254 254 |
255 255 o 4 7162cc6d11a4 inside 2b
256 256 |
257 257 o 3 f2a632f0082d... outside 2b
258 258
259 259 o 2 b8a3da16ba49... outside 4a
260 260 |
261 261 o 1 53f543eb8e45 inside 2a
262 262 |
263 263 o 0 1be3e5221c6a... outside 2a
264 264
265 265 $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
266 266 ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d
267 267 ...1be3e5221c6a 000000000000 000000000000 outside 2a
268 268 ...33d530345455 0ce6481bfe07 000000000000 outside 4c
269 269 ...3df233defecc 7162cc6d11a4 000000000000 outside 4b
270 270 ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a
271 271 ...caa65c940632 000000000000 000000000000 outside 2c
272 272 ...e932969c3961 000000000000 000000000000 outside 2d
273 273 ...f2a632f0082d 000000000000 000000000000 outside 2b
274 274 0ce6481bfe07 caa65c940632 000000000000 inside 2c
275 275 0d4c867aeb23 e932969c3961 000000000000 inside 2d
276 276 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a
277 277 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b
278 278
279 279 $ cd ../pullmaster
280 280 $ hg update -r 'desc("outside 4a")'
281 281 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
282 282 $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
283 283 merging inside/f
284 284 merging outside/f
285 285 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
286 286 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
287 287 $ echo 3 > inside/f
288 288 $ echo 5 > outside/f
289 289 $ rm -f {in,out}side/f.orig
290 290 $ hg resolve --mark inside/f outside/f
291 291 (no more unresolved files)
292 292 $ hg commit -m 'merge a/b 5'
293 293
294 294 $ hg update -r 'desc("outside 4c")'
295 295 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
296 296 $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
297 297 merging inside/f
298 298 merging outside/f
299 299 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
300 300 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
301 301 $ echo 3 > inside/f
302 302 $ echo 5 > outside/f
303 303 $ rm -f {in,out}side/f.orig
304 304 $ hg resolve --mark inside/f outside/f
305 305 (no more unresolved files)
306 306 $ hg commit -m 'merge c/d 5'
307 307
308 308 $ hg update -r 'desc("merge a/b 5")'
309 309 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
310 310 $ hg merge -r 'desc("merge c/d 5")'
311 311 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
312 312 (branch merge, don't forget to commit)
313 313 $ echo 6 > outside/f
314 314 $ hg commit -m 'outside 6'
315 315 $ echo 7 > outside/f
316 316 $ hg commit -m 'outside 7'
317 317 $ echo 8 > outside/f
318 318 $ hg commit -m 'outside 8'
319 319
320 320 $ cd ../pullshallow
321 321 $ hg pull --depth 3
322 322 pulling from ssh://user@dummy/pullmaster
323 323 searching for changes
324 324 adding changesets
325 325 adding manifests
326 326 adding file changes
327 327 added 4 changesets with 3 changes to 1 files (-3 heads)
328 328 new changesets *:* (glob)
329 329 (run 'hg update' to get a working copy)
330 330
331 331 $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
332 332 ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d
333 333 ...1be3e5221c6a 000000000000 000000000000 outside 2a
334 334 ...33d530345455 0ce6481bfe07 000000000000 outside 4c
335 335 ...3df233defecc 7162cc6d11a4 000000000000 outside 4b
336 336 ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a
337 337 ...bf545653453e 968003d40c60 000000000000 outside 8
338 338 ...caa65c940632 000000000000 000000000000 outside 2c
339 339 ...e932969c3961 000000000000 000000000000 outside 2d
340 340 ...f2a632f0082d 000000000000 000000000000 outside 2b
341 341 0ce6481bfe07 caa65c940632 000000000000 inside 2c
342 342 0d4c867aeb23 e932969c3961 000000000000 inside 2d
343 343 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a
344 344 67d49c0bdbda b8a3da16ba49 3df233defecc merge a/b 5
345 345 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b
346 346 968003d40c60 67d49c0bdbda e867021d52c2 outside 6
347 347 e867021d52c2 33d530345455 0ebbd712a0c8 merge c/d 5
General Comments 0
You need to be logged in to leave comments. Login now