##// END OF EJS Templates
revlog: open files in 'r+' instead of 'a+'...
marmoute -
r47991:8f6165c9 default
parent child Browse files
Show More
@@ -1,3192 +1,3206 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import errno
20 20 import io
21 21 import os
22 22 import struct
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from .pycompat import getattr
36 36 from .revlogutils.constants import (
37 37 ALL_KINDS,
38 38 FLAG_GENERALDELTA,
39 39 FLAG_INLINE_DATA,
40 40 INDEX_HEADER,
41 41 REVLOGV0,
42 42 REVLOGV1,
43 43 REVLOGV1_FLAGS,
44 44 REVLOGV2,
45 45 REVLOGV2_FLAGS,
46 46 REVLOG_DEFAULT_FLAGS,
47 47 REVLOG_DEFAULT_FORMAT,
48 48 REVLOG_DEFAULT_VERSION,
49 49 )
50 50 from .revlogutils.flagutil import (
51 51 REVIDX_DEFAULT_FLAGS,
52 52 REVIDX_ELLIPSIS,
53 53 REVIDX_EXTSTORED,
54 54 REVIDX_FLAGS_ORDER,
55 55 REVIDX_HASCOPIESINFO,
56 56 REVIDX_ISCENSORED,
57 57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 58 )
59 59 from .thirdparty import attr
60 60 from . import (
61 61 ancestor,
62 62 dagop,
63 63 error,
64 64 mdiff,
65 65 policy,
66 66 pycompat,
67 67 templatefilters,
68 68 util,
69 69 )
70 70 from .interfaces import (
71 71 repository,
72 72 util as interfaceutil,
73 73 )
74 74 from .revlogutils import (
75 75 deltas as deltautil,
76 76 flagutil,
77 77 nodemap as nodemaputil,
78 78 revlogv0,
79 79 sidedata as sidedatautil,
80 80 )
81 81 from .utils import (
82 82 storageutil,
83 83 stringutil,
84 84 )
85 85
86 86 # blanked usage of all the name to prevent pyflakes constraints
87 87 # We need these name available in the module for extensions.
88 88
89 89 REVLOGV0
90 90 REVLOGV1
91 91 REVLOGV2
92 92 FLAG_INLINE_DATA
93 93 FLAG_GENERALDELTA
94 94 REVLOG_DEFAULT_FLAGS
95 95 REVLOG_DEFAULT_FORMAT
96 96 REVLOG_DEFAULT_VERSION
97 97 REVLOGV1_FLAGS
98 98 REVLOGV2_FLAGS
99 99 REVIDX_ISCENSORED
100 100 REVIDX_ELLIPSIS
101 101 REVIDX_HASCOPIESINFO
102 102 REVIDX_EXTSTORED
103 103 REVIDX_DEFAULT_FLAGS
104 104 REVIDX_FLAGS_ORDER
105 105 REVIDX_RAWTEXT_CHANGING_FLAGS
106 106
107 107 parsers = policy.importmod('parsers')
108 108 rustancestor = policy.importrust('ancestor')
109 109 rustdagop = policy.importrust('dagop')
110 110 rustrevlog = policy.importrust('revlog')
111 111
112 112 # Aliased for performance.
113 113 _zlibdecompress = zlib.decompress
114 114
115 115 # max size of revlog with inline data
116 116 _maxinline = 131072
117 117 _chunksize = 1048576
118 118
119 119 # Flag processors for REVIDX_ELLIPSIS.
120 120 def ellipsisreadprocessor(rl, text):
121 121 return text, False
122 122
123 123
124 124 def ellipsiswriteprocessor(rl, text):
125 125 return text, False
126 126
127 127
128 128 def ellipsisrawprocessor(rl, text):
129 129 return False
130 130
131 131
132 132 ellipsisprocessor = (
133 133 ellipsisreadprocessor,
134 134 ellipsiswriteprocessor,
135 135 ellipsisrawprocessor,
136 136 )
137 137
138 138
139 139 def offset_type(offset, type):
140 140 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
141 141 raise ValueError(b'unknown revlog index flags')
142 142 return int(int(offset) << 16 | type)
143 143
144 144
145 145 def _verify_revision(rl, skipflags, state, node):
146 146 """Verify the integrity of the given revlog ``node`` while providing a hook
147 147 point for extensions to influence the operation."""
148 148 if skipflags:
149 149 state[b'skipread'].add(node)
150 150 else:
151 151 # Side-effect: read content and verify hash.
152 152 rl.revision(node)
153 153
154 154
155 155 # True if a fast implementation for persistent-nodemap is available
156 156 #
157 157 # We also consider we have a "fast" implementation in "pure" python because
158 158 # people using pure don't really have performance consideration (and a
159 159 # wheelbarrow of other slowness source)
160 160 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
161 161 parsers, 'BaseIndexObject'
162 162 )
163 163
164 164
165 165 @attr.s(slots=True, frozen=True)
166 166 class _revisioninfo(object):
167 167 """Information about a revision that allows building its fulltext
168 168 node: expected hash of the revision
169 169 p1, p2: parent revs of the revision
170 170 btext: built text cache consisting of a one-element list
171 171 cachedelta: (baserev, uncompressed_delta) or None
172 172 flags: flags associated to the revision storage
173 173
174 174 One of btext[0] or cachedelta must be set.
175 175 """
176 176
177 177 node = attr.ib()
178 178 p1 = attr.ib()
179 179 p2 = attr.ib()
180 180 btext = attr.ib()
181 181 textlen = attr.ib()
182 182 cachedelta = attr.ib()
183 183 flags = attr.ib()
184 184
185 185
186 186 @interfaceutil.implementer(repository.irevisiondelta)
187 187 @attr.s(slots=True)
188 188 class revlogrevisiondelta(object):
189 189 node = attr.ib()
190 190 p1node = attr.ib()
191 191 p2node = attr.ib()
192 192 basenode = attr.ib()
193 193 flags = attr.ib()
194 194 baserevisionsize = attr.ib()
195 195 revision = attr.ib()
196 196 delta = attr.ib()
197 197 sidedata = attr.ib()
198 198 protocol_flags = attr.ib()
199 199 linknode = attr.ib(default=None)
200 200
201 201
202 202 @interfaceutil.implementer(repository.iverifyproblem)
203 203 @attr.s(frozen=True)
204 204 class revlogproblem(object):
205 205 warning = attr.ib(default=None)
206 206 error = attr.ib(default=None)
207 207 node = attr.ib(default=None)
208 208
209 209
210 210 def parse_index_v1(data, inline):
211 211 # call the C implementation to parse the index data
212 212 index, cache = parsers.parse_index2(data, inline)
213 213 return index, cache
214 214
215 215
216 216 def parse_index_v2(data, inline):
217 217 # call the C implementation to parse the index data
218 218 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
219 219 return index, cache
220 220
221 221
222 222 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
223 223
224 224 def parse_index_v1_nodemap(data, inline):
225 225 index, cache = parsers.parse_index_devel_nodemap(data, inline)
226 226 return index, cache
227 227
228 228
229 229 else:
230 230 parse_index_v1_nodemap = None
231 231
232 232
233 233 def parse_index_v1_mixed(data, inline):
234 234 index, cache = parse_index_v1(data, inline)
235 235 return rustrevlog.MixedIndex(index), cache
236 236
237 237
238 238 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
239 239 # signed integer)
240 240 _maxentrysize = 0x7FFFFFFF
241 241
242 242
243 243 class revlog(object):
244 244 """
245 245 the underlying revision storage object
246 246
247 247 A revlog consists of two parts, an index and the revision data.
248 248
249 249 The index is a file with a fixed record size containing
250 250 information on each revision, including its nodeid (hash), the
251 251 nodeids of its parents, the position and offset of its data within
252 252 the data file, and the revision it's based on. Finally, each entry
253 253 contains a linkrev entry that can serve as a pointer to external
254 254 data.
255 255
256 256 The revision data itself is a linear collection of data chunks.
257 257 Each chunk represents a revision and is usually represented as a
258 258 delta against the previous chunk. To bound lookup time, runs of
259 259 deltas are limited to about 2 times the length of the original
260 260 version data. This makes retrieval of a version proportional to
261 261 its size, or O(1) relative to the number of revisions.
262 262
263 263 Both pieces of the revlog are written to in an append-only
264 264 fashion, which means we never need to rewrite a file to insert or
265 265 remove data, and can use some simple techniques to avoid the need
266 266 for locking while reading.
267 267
268 268 If checkambig, indexfile is opened with checkambig=True at
269 269 writing, to avoid file stat ambiguity.
270 270
271 271 If mmaplargeindex is True, and an mmapindexthreshold is set, the
272 272 index will be mmapped rather than read if it is larger than the
273 273 configured threshold.
274 274
275 275 If censorable is True, the revlog can have censored revisions.
276 276
277 277 If `upperboundcomp` is not None, this is the expected maximal gain from
278 278 compression for the data content.
279 279
280 280 `concurrencychecker` is an optional function that receives 3 arguments: a
281 281 file handle, a filename, and an expected position. It should check whether
282 282 the current position in the file handle is valid, and log/warn/fail (by
283 283 raising).
284 284 """
285 285
286 286 _flagserrorclass = error.RevlogError
287 287
288 288 def __init__(
289 289 self,
290 290 opener,
291 291 target,
292 292 radix,
293 293 postfix=None,
294 294 checkambig=False,
295 295 mmaplargeindex=False,
296 296 censorable=False,
297 297 upperboundcomp=None,
298 298 persistentnodemap=False,
299 299 concurrencychecker=None,
300 300 ):
301 301 """
302 302 create a revlog object
303 303
304 304 opener is a function that abstracts the file opening operation
305 305 and can be used to implement COW semantics or the like.
306 306
307 307 `target`: a (KIND, ID) tuple that identify the content stored in
308 308 this revlog. It help the rest of the code to understand what the revlog
309 309 is about without having to resort to heuristic and index filename
310 310 analysis. Note: that this must be reliably be set by normal code, but
311 311 that test, debug, or performance measurement code might not set this to
312 312 accurate value.
313 313 """
314 314 self.upperboundcomp = upperboundcomp
315 315
316 316 self.radix = radix
317 317
318 318 self._indexfile = None
319 319 self._datafile = None
320 320 self._nodemap_file = None
321 321 self.postfix = postfix
322 322 self.opener = opener
323 323 if persistentnodemap:
324 324 self._nodemap_file = nodemaputil.get_nodemap_file(self)
325 325
326 326 assert target[0] in ALL_KINDS
327 327 assert len(target) == 2
328 328 self.target = target
329 329 # When True, indexfile is opened with checkambig=True at writing, to
330 330 # avoid file stat ambiguity.
331 331 self._checkambig = checkambig
332 332 self._mmaplargeindex = mmaplargeindex
333 333 self._censorable = censorable
334 334 # 3-tuple of (node, rev, text) for a raw revision.
335 335 self._revisioncache = None
336 336 # Maps rev to chain base rev.
337 337 self._chainbasecache = util.lrucachedict(100)
338 338 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
339 339 self._chunkcache = (0, b'')
340 340 # How much data to read and cache into the raw revlog data cache.
341 341 self._chunkcachesize = 65536
342 342 self._maxchainlen = None
343 343 self._deltabothparents = True
344 344 self.index = None
345 345 self._nodemap_docket = None
346 346 # Mapping of partial identifiers to full nodes.
347 347 self._pcache = {}
348 348 # Mapping of revision integer to full node.
349 349 self._compengine = b'zlib'
350 350 self._compengineopts = {}
351 351 self._maxdeltachainspan = -1
352 352 self._withsparseread = False
353 353 self._sparserevlog = False
354 354 self._srdensitythreshold = 0.50
355 355 self._srmingapsize = 262144
356 356
357 357 # Make copy of flag processors so each revlog instance can support
358 358 # custom flags.
359 359 self._flagprocessors = dict(flagutil.flagprocessors)
360 360
361 361 # 2-tuple of file handles being used for active writing.
362 362 self._writinghandles = None
363 363 # prevent nesting of addgroup
364 364 self._adding_group = None
365 365
366 366 self._loadindex()
367 367
368 368 self._concurrencychecker = concurrencychecker
369 369
370 370 def _init_opts(self):
371 371 """process options (from above/config) to setup associated default revlog mode
372 372
373 373 These values might be affected when actually reading on disk information.
374 374
375 375 The relevant values are returned for use in _loadindex().
376 376
377 377 * newversionflags:
378 378 version header to use if we need to create a new revlog
379 379
380 380 * mmapindexthreshold:
381 381 minimal index size for start to use mmap
382 382
383 383 * force_nodemap:
384 384 force the usage of a "development" version of the nodemap code
385 385 """
386 386 mmapindexthreshold = None
387 387 opts = self.opener.options
388 388
389 389 if b'revlogv2' in opts:
390 390 new_header = REVLOGV2 | FLAG_INLINE_DATA
391 391 elif b'revlogv1' in opts:
392 392 new_header = REVLOGV1 | FLAG_INLINE_DATA
393 393 if b'generaldelta' in opts:
394 394 new_header |= FLAG_GENERALDELTA
395 395 elif b'revlogv0' in self.opener.options:
396 396 new_header = REVLOGV0
397 397 else:
398 398 new_header = REVLOG_DEFAULT_VERSION
399 399
400 400 if b'chunkcachesize' in opts:
401 401 self._chunkcachesize = opts[b'chunkcachesize']
402 402 if b'maxchainlen' in opts:
403 403 self._maxchainlen = opts[b'maxchainlen']
404 404 if b'deltabothparents' in opts:
405 405 self._deltabothparents = opts[b'deltabothparents']
406 406 self._lazydelta = bool(opts.get(b'lazydelta', True))
407 407 self._lazydeltabase = False
408 408 if self._lazydelta:
409 409 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
410 410 if b'compengine' in opts:
411 411 self._compengine = opts[b'compengine']
412 412 if b'zlib.level' in opts:
413 413 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
414 414 if b'zstd.level' in opts:
415 415 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
416 416 if b'maxdeltachainspan' in opts:
417 417 self._maxdeltachainspan = opts[b'maxdeltachainspan']
418 418 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
419 419 mmapindexthreshold = opts[b'mmapindexthreshold']
420 420 self.hassidedata = bool(opts.get(b'side-data', False))
421 421 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
422 422 withsparseread = bool(opts.get(b'with-sparse-read', False))
423 423 # sparse-revlog forces sparse-read
424 424 self._withsparseread = self._sparserevlog or withsparseread
425 425 if b'sparse-read-density-threshold' in opts:
426 426 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
427 427 if b'sparse-read-min-gap-size' in opts:
428 428 self._srmingapsize = opts[b'sparse-read-min-gap-size']
429 429 if opts.get(b'enableellipsis'):
430 430 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
431 431
432 432 # revlog v0 doesn't have flag processors
433 433 for flag, processor in pycompat.iteritems(
434 434 opts.get(b'flagprocessors', {})
435 435 ):
436 436 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
437 437
438 438 if self._chunkcachesize <= 0:
439 439 raise error.RevlogError(
440 440 _(b'revlog chunk cache size %r is not greater than 0')
441 441 % self._chunkcachesize
442 442 )
443 443 elif self._chunkcachesize & (self._chunkcachesize - 1):
444 444 raise error.RevlogError(
445 445 _(b'revlog chunk cache size %r is not a power of 2')
446 446 % self._chunkcachesize
447 447 )
448 448 force_nodemap = opts.get(b'devel-force-nodemap', False)
449 449 return new_header, mmapindexthreshold, force_nodemap
450 450
451 451 def _get_data(self, filepath, mmap_threshold):
452 452 """return a file content with or without mmap
453 453
454 454 If the file is missing return the empty string"""
455 455 try:
456 456 with self.opener(filepath) as fp:
457 457 if mmap_threshold is not None:
458 458 file_size = self.opener.fstat(fp).st_size
459 459 if file_size >= mmap_threshold:
460 460 # TODO: should .close() to release resources without
461 461 # relying on Python GC
462 462 return util.buffer(util.mmapread(fp))
463 463 return fp.read()
464 464 except IOError as inst:
465 465 if inst.errno != errno.ENOENT:
466 466 raise
467 467 return b''
468 468
469 469 def _loadindex(self):
470 470
471 471 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
472 472
473 473 if self.postfix is None:
474 474 entry_point = b'%s.i' % self.radix
475 475 else:
476 476 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
477 477
478 478 entry_data = b''
479 479 self._initempty = True
480 480 entry_data = self._get_data(entry_point, mmapindexthreshold)
481 481 if len(entry_data) > 0:
482 482 header = INDEX_HEADER.unpack(entry_data[:4])[0]
483 483 self._initempty = False
484 484 else:
485 485 header = new_header
486 486
487 487 self._format_flags = header & ~0xFFFF
488 488 self._format_version = header & 0xFFFF
489 489
490 490 if self._format_version == REVLOGV0:
491 491 if self._format_flags:
492 492 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
493 493 display_flag = self._format_flags >> 16
494 494 msg %= (display_flag, self._format_version, self.display_id)
495 495 raise error.RevlogError(msg)
496 496
497 497 self._inline = False
498 498 self._generaldelta = False
499 499
500 500 elif self._format_version == REVLOGV1:
501 501 if self._format_flags & ~REVLOGV1_FLAGS:
502 502 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
503 503 display_flag = self._format_flags >> 16
504 504 msg %= (display_flag, self._format_version, self.display_id)
505 505 raise error.RevlogError(msg)
506 506
507 507 self._inline = self._format_flags & FLAG_INLINE_DATA
508 508 self._generaldelta = self._format_flags & FLAG_GENERALDELTA
509 509
510 510 elif self._format_version == REVLOGV2:
511 511 if self._format_flags & ~REVLOGV2_FLAGS:
512 512 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
513 513 display_flag = self._format_flags >> 16
514 514 msg %= (display_flag, self._format_version, self.display_id)
515 515 raise error.RevlogError(msg)
516 516
517 517 # There is a bug in the transaction handling when going from an
518 518 # inline revlog to a separate index and data file. Turn it off until
519 519 # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
520 520 # See issue6485
521 521 self._inline = False
522 522 # generaldelta implied by version 2 revlogs.
523 523 self._generaldelta = True
524 524
525 525 else:
526 526 msg = _(b'unknown version (%d) in revlog %s')
527 527 msg %= (self._format_version, self.display_id)
528 528 raise error.RevlogError(msg)
529 529
530 530 index_data = entry_data
531 531 self._indexfile = entry_point
532 532
533 533 if self.postfix is None or self.postfix == b'a':
534 534 self._datafile = b'%s.d' % self.radix
535 535 else:
536 536 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
537 537
538 538 self.nodeconstants = sha1nodeconstants
539 539 self.nullid = self.nodeconstants.nullid
540 540
541 541 # sparse-revlog can't be on without general-delta (issue6056)
542 542 if not self._generaldelta:
543 543 self._sparserevlog = False
544 544
545 545 self._storedeltachains = True
546 546
547 547 devel_nodemap = (
548 548 self._nodemap_file
549 549 and force_nodemap
550 550 and parse_index_v1_nodemap is not None
551 551 )
552 552
553 553 use_rust_index = False
554 554 if rustrevlog is not None:
555 555 if self._nodemap_file is not None:
556 556 use_rust_index = True
557 557 else:
558 558 use_rust_index = self.opener.options.get(b'rust.index')
559 559
560 560 self._parse_index = parse_index_v1
561 561 if self._format_version == REVLOGV0:
562 562 self._parse_index = revlogv0.parse_index_v0
563 563 elif self._format_version == REVLOGV2:
564 564 self._parse_index = parse_index_v2
565 565 elif devel_nodemap:
566 566 self._parse_index = parse_index_v1_nodemap
567 567 elif use_rust_index:
568 568 self._parse_index = parse_index_v1_mixed
569 569 try:
570 570 d = self._parse_index(index_data, self._inline)
571 571 index, _chunkcache = d
572 572 use_nodemap = (
573 573 not self._inline
574 574 and self._nodemap_file is not None
575 575 and util.safehasattr(index, 'update_nodemap_data')
576 576 )
577 577 if use_nodemap:
578 578 nodemap_data = nodemaputil.persisted_data(self)
579 579 if nodemap_data is not None:
580 580 docket = nodemap_data[0]
581 581 if (
582 582 len(d[0]) > docket.tip_rev
583 583 and d[0][docket.tip_rev][7] == docket.tip_node
584 584 ):
585 585 # no changelog tampering
586 586 self._nodemap_docket = docket
587 587 index.update_nodemap_data(*nodemap_data)
588 588 except (ValueError, IndexError):
589 589 raise error.RevlogError(
590 590 _(b"index %s is corrupted") % self.display_id
591 591 )
592 592 self.index, self._chunkcache = d
593 593 if not self._chunkcache:
594 594 self._chunkclear()
595 595 # revnum -> (chain-length, sum-delta-length)
596 596 self._chaininfocache = util.lrucachedict(500)
597 597 # revlog header -> revlog compressor
598 598 self._decompressors = {}
599 599
600 600 @util.propertycache
601 601 def revlog_kind(self):
602 602 return self.target[0]
603 603
604 604 @util.propertycache
605 605 def display_id(self):
606 606 """The public facing "ID" of the revlog that we use in message"""
607 607 # Maybe we should build a user facing representation of
608 608 # revlog.target instead of using `self.radix`
609 609 return self.radix
610 610
611 611 @util.propertycache
612 612 def _compressor(self):
613 613 engine = util.compengines[self._compengine]
614 614 return engine.revlogcompressor(self._compengineopts)
615 615
616 616 def _indexfp(self, mode=b'r'):
617 617 """file object for the revlog's index file"""
618 618 args = {'mode': mode}
619 619 if mode != b'r':
620 620 args['checkambig'] = self._checkambig
621 621 if mode == b'w':
622 622 args['atomictemp'] = True
623 623 return self.opener(self._indexfile, **args)
624 624
625 625 def _datafp(self, mode=b'r'):
626 626 """file object for the revlog's data file"""
627 627 return self.opener(self._datafile, mode=mode)
628 628
629 629 @contextlib.contextmanager
630 630 def _datareadfp(self, existingfp=None):
631 631 """file object suitable to read data"""
632 632 # Use explicit file handle, if given.
633 633 if existingfp is not None:
634 634 yield existingfp
635 635
636 636 # Use a file handle being actively used for writes, if available.
637 637 # There is some danger to doing this because reads will seek the
638 638 # file. However, _writeentry() performs a SEEK_END before all writes,
639 639 # so we should be safe.
640 640 elif self._writinghandles:
641 641 if self._inline:
642 642 yield self._writinghandles[0]
643 643 else:
644 644 yield self._writinghandles[1]
645 645
646 646 # Otherwise open a new file handle.
647 647 else:
648 648 if self._inline:
649 649 func = self._indexfp
650 650 else:
651 651 func = self._datafp
652 652 with func() as fp:
653 653 yield fp
654 654
655 655 def tiprev(self):
656 656 return len(self.index) - 1
657 657
658 658 def tip(self):
659 659 return self.node(self.tiprev())
660 660
661 661 def __contains__(self, rev):
662 662 return 0 <= rev < len(self)
663 663
664 664 def __len__(self):
665 665 return len(self.index)
666 666
667 667 def __iter__(self):
668 668 return iter(pycompat.xrange(len(self)))
669 669
670 670 def revs(self, start=0, stop=None):
671 671 """iterate over all rev in this revlog (from start to stop)"""
672 672 return storageutil.iterrevs(len(self), start=start, stop=stop)
673 673
674 674 @property
675 675 def nodemap(self):
676 676 msg = (
677 677 b"revlog.nodemap is deprecated, "
678 678 b"use revlog.index.[has_node|rev|get_rev]"
679 679 )
680 680 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
681 681 return self.index.nodemap
682 682
683 683 @property
684 684 def _nodecache(self):
685 685 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
686 686 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
687 687 return self.index.nodemap
688 688
689 689 def hasnode(self, node):
690 690 try:
691 691 self.rev(node)
692 692 return True
693 693 except KeyError:
694 694 return False
695 695
696 696 def candelta(self, baserev, rev):
697 697 """whether two revisions (baserev, rev) can be delta-ed or not"""
698 698 # Disable delta if either rev requires a content-changing flag
699 699 # processor (ex. LFS). This is because such flag processor can alter
700 700 # the rawtext content that the delta will be based on, and two clients
701 701 # could have a same revlog node with different flags (i.e. different
702 702 # rawtext contents) and the delta could be incompatible.
703 703 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
704 704 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
705 705 ):
706 706 return False
707 707 return True
708 708
709 709 def update_caches(self, transaction):
710 710 if self._nodemap_file is not None:
711 711 if transaction is None:
712 712 nodemaputil.update_persistent_nodemap(self)
713 713 else:
714 714 nodemaputil.setup_persistent_nodemap(transaction, self)
715 715
716 716 def clearcaches(self):
717 717 self._revisioncache = None
718 718 self._chainbasecache.clear()
719 719 self._chunkcache = (0, b'')
720 720 self._pcache = {}
721 721 self._nodemap_docket = None
722 722 self.index.clearcaches()
723 723 # The python code is the one responsible for validating the docket, we
724 724 # end up having to refresh it here.
725 725 use_nodemap = (
726 726 not self._inline
727 727 and self._nodemap_file is not None
728 728 and util.safehasattr(self.index, 'update_nodemap_data')
729 729 )
730 730 if use_nodemap:
731 731 nodemap_data = nodemaputil.persisted_data(self)
732 732 if nodemap_data is not None:
733 733 self._nodemap_docket = nodemap_data[0]
734 734 self.index.update_nodemap_data(*nodemap_data)
735 735
736 736 def rev(self, node):
737 737 try:
738 738 return self.index.rev(node)
739 739 except TypeError:
740 740 raise
741 741 except error.RevlogError:
742 742 # parsers.c radix tree lookup failed
743 743 if (
744 744 node == self.nodeconstants.wdirid
745 745 or node in self.nodeconstants.wdirfilenodeids
746 746 ):
747 747 raise error.WdirUnsupported
748 748 raise error.LookupError(node, self.display_id, _(b'no node'))
749 749
750 750 # Accessors for index entries.
751 751
752 752 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
753 753 # are flags.
754 754 def start(self, rev):
755 755 return int(self.index[rev][0] >> 16)
756 756
757 757 def flags(self, rev):
758 758 return self.index[rev][0] & 0xFFFF
759 759
760 760 def length(self, rev):
761 761 return self.index[rev][1]
762 762
763 763 def sidedata_length(self, rev):
764 764 if not self.hassidedata:
765 765 return 0
766 766 return self.index[rev][9]
767 767
768 768 def rawsize(self, rev):
769 769 """return the length of the uncompressed text for a given revision"""
770 770 l = self.index[rev][2]
771 771 if l >= 0:
772 772 return l
773 773
774 774 t = self.rawdata(rev)
775 775 return len(t)
776 776
777 777 def size(self, rev):
778 778 """length of non-raw text (processed by a "read" flag processor)"""
779 779 # fast path: if no "read" flag processor could change the content,
780 780 # size is rawsize. note: ELLIPSIS is known to not change the content.
781 781 flags = self.flags(rev)
782 782 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
783 783 return self.rawsize(rev)
784 784
785 785 return len(self.revision(rev, raw=False))
786 786
787 787 def chainbase(self, rev):
788 788 base = self._chainbasecache.get(rev)
789 789 if base is not None:
790 790 return base
791 791
792 792 index = self.index
793 793 iterrev = rev
794 794 base = index[iterrev][3]
795 795 while base != iterrev:
796 796 iterrev = base
797 797 base = index[iterrev][3]
798 798
799 799 self._chainbasecache[rev] = base
800 800 return base
801 801
802 802 def linkrev(self, rev):
803 803 return self.index[rev][4]
804 804
805 805 def parentrevs(self, rev):
806 806 try:
807 807 entry = self.index[rev]
808 808 except IndexError:
809 809 if rev == wdirrev:
810 810 raise error.WdirUnsupported
811 811 raise
812 812 if entry[5] == nullrev:
813 813 return entry[6], entry[5]
814 814 else:
815 815 return entry[5], entry[6]
816 816
817 817 # fast parentrevs(rev) where rev isn't filtered
818 818 _uncheckedparentrevs = parentrevs
819 819
820 820 def node(self, rev):
821 821 try:
822 822 return self.index[rev][7]
823 823 except IndexError:
824 824 if rev == wdirrev:
825 825 raise error.WdirUnsupported
826 826 raise
827 827
828 828 # Derived from index values.
829 829
830 830 def end(self, rev):
831 831 return self.start(rev) + self.length(rev)
832 832
833 833 def parents(self, node):
834 834 i = self.index
835 835 d = i[self.rev(node)]
836 836 # inline node() to avoid function call overhead
837 837 if d[5] == self.nullid:
838 838 return i[d[6]][7], i[d[5]][7]
839 839 else:
840 840 return i[d[5]][7], i[d[6]][7]
841 841
842 842 def chainlen(self, rev):
843 843 return self._chaininfo(rev)[0]
844 844
845 845 def _chaininfo(self, rev):
846 846 chaininfocache = self._chaininfocache
847 847 if rev in chaininfocache:
848 848 return chaininfocache[rev]
849 849 index = self.index
850 850 generaldelta = self._generaldelta
851 851 iterrev = rev
852 852 e = index[iterrev]
853 853 clen = 0
854 854 compresseddeltalen = 0
855 855 while iterrev != e[3]:
856 856 clen += 1
857 857 compresseddeltalen += e[1]
858 858 if generaldelta:
859 859 iterrev = e[3]
860 860 else:
861 861 iterrev -= 1
862 862 if iterrev in chaininfocache:
863 863 t = chaininfocache[iterrev]
864 864 clen += t[0]
865 865 compresseddeltalen += t[1]
866 866 break
867 867 e = index[iterrev]
868 868 else:
869 869 # Add text length of base since decompressing that also takes
870 870 # work. For cache hits the length is already included.
871 871 compresseddeltalen += e[1]
872 872 r = (clen, compresseddeltalen)
873 873 chaininfocache[rev] = r
874 874 return r
875 875
876 876 def _deltachain(self, rev, stoprev=None):
877 877 """Obtain the delta chain for a revision.
878 878
879 879 ``stoprev`` specifies a revision to stop at. If not specified, we
880 880 stop at the base of the chain.
881 881
882 882 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
883 883 revs in ascending order and ``stopped`` is a bool indicating whether
884 884 ``stoprev`` was hit.
885 885 """
886 886 # Try C implementation.
887 887 try:
888 888 return self.index.deltachain(rev, stoprev, self._generaldelta)
889 889 except AttributeError:
890 890 pass
891 891
892 892 chain = []
893 893
894 894 # Alias to prevent attribute lookup in tight loop.
895 895 index = self.index
896 896 generaldelta = self._generaldelta
897 897
898 898 iterrev = rev
899 899 e = index[iterrev]
900 900 while iterrev != e[3] and iterrev != stoprev:
901 901 chain.append(iterrev)
902 902 if generaldelta:
903 903 iterrev = e[3]
904 904 else:
905 905 iterrev -= 1
906 906 e = index[iterrev]
907 907
908 908 if iterrev == stoprev:
909 909 stopped = True
910 910 else:
911 911 chain.append(iterrev)
912 912 stopped = False
913 913
914 914 chain.reverse()
915 915 return chain, stopped
916 916
917 917 def ancestors(self, revs, stoprev=0, inclusive=False):
918 918 """Generate the ancestors of 'revs' in reverse revision order.
919 919 Does not generate revs lower than stoprev.
920 920
921 921 See the documentation for ancestor.lazyancestors for more details."""
922 922
923 923 # first, make sure start revisions aren't filtered
924 924 revs = list(revs)
925 925 checkrev = self.node
926 926 for r in revs:
927 927 checkrev(r)
928 928 # and we're sure ancestors aren't filtered as well
929 929
930 930 if rustancestor is not None:
931 931 lazyancestors = rustancestor.LazyAncestors
932 932 arg = self.index
933 933 else:
934 934 lazyancestors = ancestor.lazyancestors
935 935 arg = self._uncheckedparentrevs
936 936 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
937 937
938 938 def descendants(self, revs):
939 939 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
940 940
941 941 def findcommonmissing(self, common=None, heads=None):
942 942 """Return a tuple of the ancestors of common and the ancestors of heads
943 943 that are not ancestors of common. In revset terminology, we return the
944 944 tuple:
945 945
946 946 ::common, (::heads) - (::common)
947 947
948 948 The list is sorted by revision number, meaning it is
949 949 topologically sorted.
950 950
951 951 'heads' and 'common' are both lists of node IDs. If heads is
952 952 not supplied, uses all of the revlog's heads. If common is not
953 953 supplied, uses nullid."""
954 954 if common is None:
955 955 common = [self.nullid]
956 956 if heads is None:
957 957 heads = self.heads()
958 958
959 959 common = [self.rev(n) for n in common]
960 960 heads = [self.rev(n) for n in heads]
961 961
962 962 # we want the ancestors, but inclusive
963 963 class lazyset(object):
964 964 def __init__(self, lazyvalues):
965 965 self.addedvalues = set()
966 966 self.lazyvalues = lazyvalues
967 967
968 968 def __contains__(self, value):
969 969 return value in self.addedvalues or value in self.lazyvalues
970 970
971 971 def __iter__(self):
972 972 added = self.addedvalues
973 973 for r in added:
974 974 yield r
975 975 for r in self.lazyvalues:
976 976 if not r in added:
977 977 yield r
978 978
979 979 def add(self, value):
980 980 self.addedvalues.add(value)
981 981
982 982 def update(self, values):
983 983 self.addedvalues.update(values)
984 984
985 985 has = lazyset(self.ancestors(common))
986 986 has.add(nullrev)
987 987 has.update(common)
988 988
989 989 # take all ancestors from heads that aren't in has
990 990 missing = set()
991 991 visit = collections.deque(r for r in heads if r not in has)
992 992 while visit:
993 993 r = visit.popleft()
994 994 if r in missing:
995 995 continue
996 996 else:
997 997 missing.add(r)
998 998 for p in self.parentrevs(r):
999 999 if p not in has:
1000 1000 visit.append(p)
1001 1001 missing = list(missing)
1002 1002 missing.sort()
1003 1003 return has, [self.node(miss) for miss in missing]
1004 1004
1005 1005 def incrementalmissingrevs(self, common=None):
1006 1006 """Return an object that can be used to incrementally compute the
1007 1007 revision numbers of the ancestors of arbitrary sets that are not
1008 1008 ancestors of common. This is an ancestor.incrementalmissingancestors
1009 1009 object.
1010 1010
1011 1011 'common' is a list of revision numbers. If common is not supplied, uses
1012 1012 nullrev.
1013 1013 """
1014 1014 if common is None:
1015 1015 common = [nullrev]
1016 1016
1017 1017 if rustancestor is not None:
1018 1018 return rustancestor.MissingAncestors(self.index, common)
1019 1019 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1020 1020
1021 1021 def findmissingrevs(self, common=None, heads=None):
1022 1022 """Return the revision numbers of the ancestors of heads that
1023 1023 are not ancestors of common.
1024 1024
1025 1025 More specifically, return a list of revision numbers corresponding to
1026 1026 nodes N such that every N satisfies the following constraints:
1027 1027
1028 1028 1. N is an ancestor of some node in 'heads'
1029 1029 2. N is not an ancestor of any node in 'common'
1030 1030
1031 1031 The list is sorted by revision number, meaning it is
1032 1032 topologically sorted.
1033 1033
1034 1034 'heads' and 'common' are both lists of revision numbers. If heads is
1035 1035 not supplied, uses all of the revlog's heads. If common is not
1036 1036 supplied, uses nullid."""
1037 1037 if common is None:
1038 1038 common = [nullrev]
1039 1039 if heads is None:
1040 1040 heads = self.headrevs()
1041 1041
1042 1042 inc = self.incrementalmissingrevs(common=common)
1043 1043 return inc.missingancestors(heads)
1044 1044
1045 1045 def findmissing(self, common=None, heads=None):
1046 1046 """Return the ancestors of heads that are not ancestors of common.
1047 1047
1048 1048 More specifically, return a list of nodes N such that every N
1049 1049 satisfies the following constraints:
1050 1050
1051 1051 1. N is an ancestor of some node in 'heads'
1052 1052 2. N is not an ancestor of any node in 'common'
1053 1053
1054 1054 The list is sorted by revision number, meaning it is
1055 1055 topologically sorted.
1056 1056
1057 1057 'heads' and 'common' are both lists of node IDs. If heads is
1058 1058 not supplied, uses all of the revlog's heads. If common is not
1059 1059 supplied, uses nullid."""
1060 1060 if common is None:
1061 1061 common = [self.nullid]
1062 1062 if heads is None:
1063 1063 heads = self.heads()
1064 1064
1065 1065 common = [self.rev(n) for n in common]
1066 1066 heads = [self.rev(n) for n in heads]
1067 1067
1068 1068 inc = self.incrementalmissingrevs(common=common)
1069 1069 return [self.node(r) for r in inc.missingancestors(heads)]
1070 1070
1071 1071 def nodesbetween(self, roots=None, heads=None):
1072 1072 """Return a topological path from 'roots' to 'heads'.
1073 1073
1074 1074 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1075 1075 topologically sorted list of all nodes N that satisfy both of
1076 1076 these constraints:
1077 1077
1078 1078 1. N is a descendant of some node in 'roots'
1079 1079 2. N is an ancestor of some node in 'heads'
1080 1080
1081 1081 Every node is considered to be both a descendant and an ancestor
1082 1082 of itself, so every reachable node in 'roots' and 'heads' will be
1083 1083 included in 'nodes'.
1084 1084
1085 1085 'outroots' is the list of reachable nodes in 'roots', i.e., the
1086 1086 subset of 'roots' that is returned in 'nodes'. Likewise,
1087 1087 'outheads' is the subset of 'heads' that is also in 'nodes'.
1088 1088
1089 1089 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1090 1090 unspecified, uses nullid as the only root. If 'heads' is
1091 1091 unspecified, uses list of all of the revlog's heads."""
1092 1092 nonodes = ([], [], [])
1093 1093 if roots is not None:
1094 1094 roots = list(roots)
1095 1095 if not roots:
1096 1096 return nonodes
1097 1097 lowestrev = min([self.rev(n) for n in roots])
1098 1098 else:
1099 1099 roots = [self.nullid] # Everybody's a descendant of nullid
1100 1100 lowestrev = nullrev
1101 1101 if (lowestrev == nullrev) and (heads is None):
1102 1102 # We want _all_ the nodes!
1103 1103 return (
1104 1104 [self.node(r) for r in self],
1105 1105 [self.nullid],
1106 1106 list(self.heads()),
1107 1107 )
1108 1108 if heads is None:
1109 1109 # All nodes are ancestors, so the latest ancestor is the last
1110 1110 # node.
1111 1111 highestrev = len(self) - 1
1112 1112 # Set ancestors to None to signal that every node is an ancestor.
1113 1113 ancestors = None
1114 1114 # Set heads to an empty dictionary for later discovery of heads
1115 1115 heads = {}
1116 1116 else:
1117 1117 heads = list(heads)
1118 1118 if not heads:
1119 1119 return nonodes
1120 1120 ancestors = set()
1121 1121 # Turn heads into a dictionary so we can remove 'fake' heads.
1122 1122 # Also, later we will be using it to filter out the heads we can't
1123 1123 # find from roots.
1124 1124 heads = dict.fromkeys(heads, False)
1125 1125 # Start at the top and keep marking parents until we're done.
1126 1126 nodestotag = set(heads)
1127 1127 # Remember where the top was so we can use it as a limit later.
1128 1128 highestrev = max([self.rev(n) for n in nodestotag])
1129 1129 while nodestotag:
1130 1130 # grab a node to tag
1131 1131 n = nodestotag.pop()
1132 1132 # Never tag nullid
1133 1133 if n == self.nullid:
1134 1134 continue
1135 1135 # A node's revision number represents its place in a
1136 1136 # topologically sorted list of nodes.
1137 1137 r = self.rev(n)
1138 1138 if r >= lowestrev:
1139 1139 if n not in ancestors:
1140 1140 # If we are possibly a descendant of one of the roots
1141 1141 # and we haven't already been marked as an ancestor
1142 1142 ancestors.add(n) # Mark as ancestor
1143 1143 # Add non-nullid parents to list of nodes to tag.
1144 1144 nodestotag.update(
1145 1145 [p for p in self.parents(n) if p != self.nullid]
1146 1146 )
1147 1147 elif n in heads: # We've seen it before, is it a fake head?
1148 1148 # So it is, real heads should not be the ancestors of
1149 1149 # any other heads.
1150 1150 heads.pop(n)
1151 1151 if not ancestors:
1152 1152 return nonodes
1153 1153 # Now that we have our set of ancestors, we want to remove any
1154 1154 # roots that are not ancestors.
1155 1155
1156 1156 # If one of the roots was nullid, everything is included anyway.
1157 1157 if lowestrev > nullrev:
1158 1158 # But, since we weren't, let's recompute the lowest rev to not
1159 1159 # include roots that aren't ancestors.
1160 1160
1161 1161 # Filter out roots that aren't ancestors of heads
1162 1162 roots = [root for root in roots if root in ancestors]
1163 1163 # Recompute the lowest revision
1164 1164 if roots:
1165 1165 lowestrev = min([self.rev(root) for root in roots])
1166 1166 else:
1167 1167 # No more roots? Return empty list
1168 1168 return nonodes
1169 1169 else:
1170 1170 # We are descending from nullid, and don't need to care about
1171 1171 # any other roots.
1172 1172 lowestrev = nullrev
1173 1173 roots = [self.nullid]
1174 1174 # Transform our roots list into a set.
1175 1175 descendants = set(roots)
1176 1176 # Also, keep the original roots so we can filter out roots that aren't
1177 1177 # 'real' roots (i.e. are descended from other roots).
1178 1178 roots = descendants.copy()
1179 1179 # Our topologically sorted list of output nodes.
1180 1180 orderedout = []
1181 1181 # Don't start at nullid since we don't want nullid in our output list,
1182 1182 # and if nullid shows up in descendants, empty parents will look like
1183 1183 # they're descendants.
1184 1184 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1185 1185 n = self.node(r)
1186 1186 isdescendant = False
1187 1187 if lowestrev == nullrev: # Everybody is a descendant of nullid
1188 1188 isdescendant = True
1189 1189 elif n in descendants:
1190 1190 # n is already a descendant
1191 1191 isdescendant = True
1192 1192 # This check only needs to be done here because all the roots
1193 1193 # will start being marked is descendants before the loop.
1194 1194 if n in roots:
1195 1195 # If n was a root, check if it's a 'real' root.
1196 1196 p = tuple(self.parents(n))
1197 1197 # If any of its parents are descendants, it's not a root.
1198 1198 if (p[0] in descendants) or (p[1] in descendants):
1199 1199 roots.remove(n)
1200 1200 else:
1201 1201 p = tuple(self.parents(n))
1202 1202 # A node is a descendant if either of its parents are
1203 1203 # descendants. (We seeded the dependents list with the roots
1204 1204 # up there, remember?)
1205 1205 if (p[0] in descendants) or (p[1] in descendants):
1206 1206 descendants.add(n)
1207 1207 isdescendant = True
1208 1208 if isdescendant and ((ancestors is None) or (n in ancestors)):
1209 1209 # Only include nodes that are both descendants and ancestors.
1210 1210 orderedout.append(n)
1211 1211 if (ancestors is not None) and (n in heads):
1212 1212 # We're trying to figure out which heads are reachable
1213 1213 # from roots.
1214 1214 # Mark this head as having been reached
1215 1215 heads[n] = True
1216 1216 elif ancestors is None:
1217 1217 # Otherwise, we're trying to discover the heads.
1218 1218 # Assume this is a head because if it isn't, the next step
1219 1219 # will eventually remove it.
1220 1220 heads[n] = True
1221 1221 # But, obviously its parents aren't.
1222 1222 for p in self.parents(n):
1223 1223 heads.pop(p, None)
1224 1224 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1225 1225 roots = list(roots)
1226 1226 assert orderedout
1227 1227 assert roots
1228 1228 assert heads
1229 1229 return (orderedout, roots, heads)
1230 1230
1231 1231 def headrevs(self, revs=None):
1232 1232 if revs is None:
1233 1233 try:
1234 1234 return self.index.headrevs()
1235 1235 except AttributeError:
1236 1236 return self._headrevs()
1237 1237 if rustdagop is not None:
1238 1238 return rustdagop.headrevs(self.index, revs)
1239 1239 return dagop.headrevs(revs, self._uncheckedparentrevs)
1240 1240
1241 1241 def computephases(self, roots):
1242 1242 return self.index.computephasesmapsets(roots)
1243 1243
1244 1244 def _headrevs(self):
1245 1245 count = len(self)
1246 1246 if not count:
1247 1247 return [nullrev]
1248 1248 # we won't iter over filtered rev so nobody is a head at start
1249 1249 ishead = [0] * (count + 1)
1250 1250 index = self.index
1251 1251 for r in self:
1252 1252 ishead[r] = 1 # I may be an head
1253 1253 e = index[r]
1254 1254 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1255 1255 return [r for r, val in enumerate(ishead) if val]
1256 1256
1257 1257 def heads(self, start=None, stop=None):
1258 1258 """return the list of all nodes that have no children
1259 1259
1260 1260 if start is specified, only heads that are descendants of
1261 1261 start will be returned
1262 1262 if stop is specified, it will consider all the revs from stop
1263 1263 as if they had no children
1264 1264 """
1265 1265 if start is None and stop is None:
1266 1266 if not len(self):
1267 1267 return [self.nullid]
1268 1268 return [self.node(r) for r in self.headrevs()]
1269 1269
1270 1270 if start is None:
1271 1271 start = nullrev
1272 1272 else:
1273 1273 start = self.rev(start)
1274 1274
1275 1275 stoprevs = {self.rev(n) for n in stop or []}
1276 1276
1277 1277 revs = dagop.headrevssubset(
1278 1278 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1279 1279 )
1280 1280
1281 1281 return [self.node(rev) for rev in revs]
1282 1282
1283 1283 def children(self, node):
1284 1284 """find the children of a given node"""
1285 1285 c = []
1286 1286 p = self.rev(node)
1287 1287 for r in self.revs(start=p + 1):
1288 1288 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1289 1289 if prevs:
1290 1290 for pr in prevs:
1291 1291 if pr == p:
1292 1292 c.append(self.node(r))
1293 1293 elif p == nullrev:
1294 1294 c.append(self.node(r))
1295 1295 return c
1296 1296
1297 1297 def commonancestorsheads(self, a, b):
1298 1298 """calculate all the heads of the common ancestors of nodes a and b"""
1299 1299 a, b = self.rev(a), self.rev(b)
1300 1300 ancs = self._commonancestorsheads(a, b)
1301 1301 return pycompat.maplist(self.node, ancs)
1302 1302
1303 1303 def _commonancestorsheads(self, *revs):
1304 1304 """calculate all the heads of the common ancestors of revs"""
1305 1305 try:
1306 1306 ancs = self.index.commonancestorsheads(*revs)
1307 1307 except (AttributeError, OverflowError): # C implementation failed
1308 1308 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1309 1309 return ancs
1310 1310
1311 1311 def isancestor(self, a, b):
1312 1312 """return True if node a is an ancestor of node b
1313 1313
1314 1314 A revision is considered an ancestor of itself."""
1315 1315 a, b = self.rev(a), self.rev(b)
1316 1316 return self.isancestorrev(a, b)
1317 1317
1318 1318 def isancestorrev(self, a, b):
1319 1319 """return True if revision a is an ancestor of revision b
1320 1320
1321 1321 A revision is considered an ancestor of itself.
1322 1322
1323 1323 The implementation of this is trivial but the use of
1324 1324 reachableroots is not."""
1325 1325 if a == nullrev:
1326 1326 return True
1327 1327 elif a == b:
1328 1328 return True
1329 1329 elif a > b:
1330 1330 return False
1331 1331 return bool(self.reachableroots(a, [b], [a], includepath=False))
1332 1332
1333 1333 def reachableroots(self, minroot, heads, roots, includepath=False):
1334 1334 """return (heads(::(<roots> and <roots>::<heads>)))
1335 1335
1336 1336 If includepath is True, return (<roots>::<heads>)."""
1337 1337 try:
1338 1338 return self.index.reachableroots2(
1339 1339 minroot, heads, roots, includepath
1340 1340 )
1341 1341 except AttributeError:
1342 1342 return dagop._reachablerootspure(
1343 1343 self.parentrevs, minroot, roots, heads, includepath
1344 1344 )
1345 1345
1346 1346 def ancestor(self, a, b):
1347 1347 """calculate the "best" common ancestor of nodes a and b"""
1348 1348
1349 1349 a, b = self.rev(a), self.rev(b)
1350 1350 try:
1351 1351 ancs = self.index.ancestors(a, b)
1352 1352 except (AttributeError, OverflowError):
1353 1353 ancs = ancestor.ancestors(self.parentrevs, a, b)
1354 1354 if ancs:
1355 1355 # choose a consistent winner when there's a tie
1356 1356 return min(map(self.node, ancs))
1357 1357 return self.nullid
1358 1358
1359 1359 def _match(self, id):
1360 1360 if isinstance(id, int):
1361 1361 # rev
1362 1362 return self.node(id)
1363 1363 if len(id) == self.nodeconstants.nodelen:
1364 1364 # possibly a binary node
1365 1365 # odds of a binary node being all hex in ASCII are 1 in 10**25
1366 1366 try:
1367 1367 node = id
1368 1368 self.rev(node) # quick search the index
1369 1369 return node
1370 1370 except error.LookupError:
1371 1371 pass # may be partial hex id
1372 1372 try:
1373 1373 # str(rev)
1374 1374 rev = int(id)
1375 1375 if b"%d" % rev != id:
1376 1376 raise ValueError
1377 1377 if rev < 0:
1378 1378 rev = len(self) + rev
1379 1379 if rev < 0 or rev >= len(self):
1380 1380 raise ValueError
1381 1381 return self.node(rev)
1382 1382 except (ValueError, OverflowError):
1383 1383 pass
1384 1384 if len(id) == 2 * self.nodeconstants.nodelen:
1385 1385 try:
1386 1386 # a full hex nodeid?
1387 1387 node = bin(id)
1388 1388 self.rev(node)
1389 1389 return node
1390 1390 except (TypeError, error.LookupError):
1391 1391 pass
1392 1392
1393 1393 def _partialmatch(self, id):
1394 1394 # we don't care wdirfilenodeids as they should be always full hash
1395 1395 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1396 1396 try:
1397 1397 partial = self.index.partialmatch(id)
1398 1398 if partial and self.hasnode(partial):
1399 1399 if maybewdir:
1400 1400 # single 'ff...' match in radix tree, ambiguous with wdir
1401 1401 raise error.RevlogError
1402 1402 return partial
1403 1403 if maybewdir:
1404 1404 # no 'ff...' match in radix tree, wdir identified
1405 1405 raise error.WdirUnsupported
1406 1406 return None
1407 1407 except error.RevlogError:
1408 1408 # parsers.c radix tree lookup gave multiple matches
1409 1409 # fast path: for unfiltered changelog, radix tree is accurate
1410 1410 if not getattr(self, 'filteredrevs', None):
1411 1411 raise error.AmbiguousPrefixLookupError(
1412 1412 id, self.display_id, _(b'ambiguous identifier')
1413 1413 )
1414 1414 # fall through to slow path that filters hidden revisions
1415 1415 except (AttributeError, ValueError):
1416 1416 # we are pure python, or key was too short to search radix tree
1417 1417 pass
1418 1418
1419 1419 if id in self._pcache:
1420 1420 return self._pcache[id]
1421 1421
1422 1422 if len(id) <= 40:
1423 1423 try:
1424 1424 # hex(node)[:...]
1425 1425 l = len(id) // 2 # grab an even number of digits
1426 1426 prefix = bin(id[: l * 2])
1427 1427 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1428 1428 nl = [
1429 1429 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1430 1430 ]
1431 1431 if self.nodeconstants.nullhex.startswith(id):
1432 1432 nl.append(self.nullid)
1433 1433 if len(nl) > 0:
1434 1434 if len(nl) == 1 and not maybewdir:
1435 1435 self._pcache[id] = nl[0]
1436 1436 return nl[0]
1437 1437 raise error.AmbiguousPrefixLookupError(
1438 1438 id, self.display_id, _(b'ambiguous identifier')
1439 1439 )
1440 1440 if maybewdir:
1441 1441 raise error.WdirUnsupported
1442 1442 return None
1443 1443 except TypeError:
1444 1444 pass
1445 1445
1446 1446 def lookup(self, id):
1447 1447 """locate a node based on:
1448 1448 - revision number or str(revision number)
1449 1449 - nodeid or subset of hex nodeid
1450 1450 """
1451 1451 n = self._match(id)
1452 1452 if n is not None:
1453 1453 return n
1454 1454 n = self._partialmatch(id)
1455 1455 if n:
1456 1456 return n
1457 1457
1458 1458 raise error.LookupError(id, self.display_id, _(b'no match found'))
1459 1459
1460 1460 def shortest(self, node, minlength=1):
1461 1461 """Find the shortest unambiguous prefix that matches node."""
1462 1462
1463 1463 def isvalid(prefix):
1464 1464 try:
1465 1465 matchednode = self._partialmatch(prefix)
1466 1466 except error.AmbiguousPrefixLookupError:
1467 1467 return False
1468 1468 except error.WdirUnsupported:
1469 1469 # single 'ff...' match
1470 1470 return True
1471 1471 if matchednode is None:
1472 1472 raise error.LookupError(node, self.display_id, _(b'no node'))
1473 1473 return True
1474 1474
1475 1475 def maybewdir(prefix):
1476 1476 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1477 1477
1478 1478 hexnode = hex(node)
1479 1479
1480 1480 def disambiguate(hexnode, minlength):
1481 1481 """Disambiguate against wdirid."""
1482 1482 for length in range(minlength, len(hexnode) + 1):
1483 1483 prefix = hexnode[:length]
1484 1484 if not maybewdir(prefix):
1485 1485 return prefix
1486 1486
1487 1487 if not getattr(self, 'filteredrevs', None):
1488 1488 try:
1489 1489 length = max(self.index.shortest(node), minlength)
1490 1490 return disambiguate(hexnode, length)
1491 1491 except error.RevlogError:
1492 1492 if node != self.nodeconstants.wdirid:
1493 1493 raise error.LookupError(
1494 1494 node, self.display_id, _(b'no node')
1495 1495 )
1496 1496 except AttributeError:
1497 1497 # Fall through to pure code
1498 1498 pass
1499 1499
1500 1500 if node == self.nodeconstants.wdirid:
1501 1501 for length in range(minlength, len(hexnode) + 1):
1502 1502 prefix = hexnode[:length]
1503 1503 if isvalid(prefix):
1504 1504 return prefix
1505 1505
1506 1506 for length in range(minlength, len(hexnode) + 1):
1507 1507 prefix = hexnode[:length]
1508 1508 if isvalid(prefix):
1509 1509 return disambiguate(hexnode, length)
1510 1510
1511 1511 def cmp(self, node, text):
1512 1512 """compare text with a given file revision
1513 1513
1514 1514 returns True if text is different than what is stored.
1515 1515 """
1516 1516 p1, p2 = self.parents(node)
1517 1517 return storageutil.hashrevisionsha1(text, p1, p2) != node
1518 1518
1519 1519 def _cachesegment(self, offset, data):
1520 1520 """Add a segment to the revlog cache.
1521 1521
1522 1522 Accepts an absolute offset and the data that is at that location.
1523 1523 """
1524 1524 o, d = self._chunkcache
1525 1525 # try to add to existing cache
1526 1526 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1527 1527 self._chunkcache = o, d + data
1528 1528 else:
1529 1529 self._chunkcache = offset, data
1530 1530
1531 1531 def _readsegment(self, offset, length, df=None):
1532 1532 """Load a segment of raw data from the revlog.
1533 1533
1534 1534 Accepts an absolute offset, length to read, and an optional existing
1535 1535 file handle to read from.
1536 1536
1537 1537 If an existing file handle is passed, it will be seeked and the
1538 1538 original seek position will NOT be restored.
1539 1539
1540 1540 Returns a str or buffer of raw byte data.
1541 1541
1542 1542 Raises if the requested number of bytes could not be read.
1543 1543 """
1544 1544 # Cache data both forward and backward around the requested
1545 1545 # data, in a fixed size window. This helps speed up operations
1546 1546 # involving reading the revlog backwards.
1547 1547 cachesize = self._chunkcachesize
1548 1548 realoffset = offset & ~(cachesize - 1)
1549 1549 reallength = (
1550 1550 (offset + length + cachesize) & ~(cachesize - 1)
1551 1551 ) - realoffset
1552 1552 with self._datareadfp(df) as df:
1553 1553 df.seek(realoffset)
1554 1554 d = df.read(reallength)
1555 1555
1556 1556 self._cachesegment(realoffset, d)
1557 1557 if offset != realoffset or reallength != length:
1558 1558 startoffset = offset - realoffset
1559 1559 if len(d) - startoffset < length:
1560 1560 raise error.RevlogError(
1561 1561 _(
1562 1562 b'partial read of revlog %s; expected %d bytes from '
1563 1563 b'offset %d, got %d'
1564 1564 )
1565 1565 % (
1566 1566 self._indexfile if self._inline else self._datafile,
1567 1567 length,
1568 1568 offset,
1569 1569 len(d) - startoffset,
1570 1570 )
1571 1571 )
1572 1572
1573 1573 return util.buffer(d, startoffset, length)
1574 1574
1575 1575 if len(d) < length:
1576 1576 raise error.RevlogError(
1577 1577 _(
1578 1578 b'partial read of revlog %s; expected %d bytes from offset '
1579 1579 b'%d, got %d'
1580 1580 )
1581 1581 % (
1582 1582 self._indexfile if self._inline else self._datafile,
1583 1583 length,
1584 1584 offset,
1585 1585 len(d),
1586 1586 )
1587 1587 )
1588 1588
1589 1589 return d
1590 1590
1591 1591 def _getsegment(self, offset, length, df=None):
1592 1592 """Obtain a segment of raw data from the revlog.
1593 1593
1594 1594 Accepts an absolute offset, length of bytes to obtain, and an
1595 1595 optional file handle to the already-opened revlog. If the file
1596 1596 handle is used, it's original seek position will not be preserved.
1597 1597
1598 1598 Requests for data may be returned from a cache.
1599 1599
1600 1600 Returns a str or a buffer instance of raw byte data.
1601 1601 """
1602 1602 o, d = self._chunkcache
1603 1603 l = len(d)
1604 1604
1605 1605 # is it in the cache?
1606 1606 cachestart = offset - o
1607 1607 cacheend = cachestart + length
1608 1608 if cachestart >= 0 and cacheend <= l:
1609 1609 if cachestart == 0 and cacheend == l:
1610 1610 return d # avoid a copy
1611 1611 return util.buffer(d, cachestart, cacheend - cachestart)
1612 1612
1613 1613 return self._readsegment(offset, length, df=df)
1614 1614
1615 1615 def _getsegmentforrevs(self, startrev, endrev, df=None):
1616 1616 """Obtain a segment of raw data corresponding to a range of revisions.
1617 1617
1618 1618 Accepts the start and end revisions and an optional already-open
1619 1619 file handle to be used for reading. If the file handle is read, its
1620 1620 seek position will not be preserved.
1621 1621
1622 1622 Requests for data may be satisfied by a cache.
1623 1623
1624 1624 Returns a 2-tuple of (offset, data) for the requested range of
1625 1625 revisions. Offset is the integer offset from the beginning of the
1626 1626 revlog and data is a str or buffer of the raw byte data.
1627 1627
1628 1628 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1629 1629 to determine where each revision's data begins and ends.
1630 1630 """
1631 1631 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1632 1632 # (functions are expensive).
1633 1633 index = self.index
1634 1634 istart = index[startrev]
1635 1635 start = int(istart[0] >> 16)
1636 1636 if startrev == endrev:
1637 1637 end = start + istart[1]
1638 1638 else:
1639 1639 iend = index[endrev]
1640 1640 end = int(iend[0] >> 16) + iend[1]
1641 1641
1642 1642 if self._inline:
1643 1643 start += (startrev + 1) * self.index.entry_size
1644 1644 end += (endrev + 1) * self.index.entry_size
1645 1645 length = end - start
1646 1646
1647 1647 return start, self._getsegment(start, length, df=df)
1648 1648
1649 1649 def _chunk(self, rev, df=None):
1650 1650 """Obtain a single decompressed chunk for a revision.
1651 1651
1652 1652 Accepts an integer revision and an optional already-open file handle
1653 1653 to be used for reading. If used, the seek position of the file will not
1654 1654 be preserved.
1655 1655
1656 1656 Returns a str holding uncompressed data for the requested revision.
1657 1657 """
1658 1658 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1659 1659
1660 1660 def _chunks(self, revs, df=None, targetsize=None):
1661 1661 """Obtain decompressed chunks for the specified revisions.
1662 1662
1663 1663 Accepts an iterable of numeric revisions that are assumed to be in
1664 1664 ascending order. Also accepts an optional already-open file handle
1665 1665 to be used for reading. If used, the seek position of the file will
1666 1666 not be preserved.
1667 1667
1668 1668 This function is similar to calling ``self._chunk()`` multiple times,
1669 1669 but is faster.
1670 1670
1671 1671 Returns a list with decompressed data for each requested revision.
1672 1672 """
1673 1673 if not revs:
1674 1674 return []
1675 1675 start = self.start
1676 1676 length = self.length
1677 1677 inline = self._inline
1678 1678 iosize = self.index.entry_size
1679 1679 buffer = util.buffer
1680 1680
1681 1681 l = []
1682 1682 ladd = l.append
1683 1683
1684 1684 if not self._withsparseread:
1685 1685 slicedchunks = (revs,)
1686 1686 else:
1687 1687 slicedchunks = deltautil.slicechunk(
1688 1688 self, revs, targetsize=targetsize
1689 1689 )
1690 1690
1691 1691 for revschunk in slicedchunks:
1692 1692 firstrev = revschunk[0]
1693 1693 # Skip trailing revisions with empty diff
1694 1694 for lastrev in revschunk[::-1]:
1695 1695 if length(lastrev) != 0:
1696 1696 break
1697 1697
1698 1698 try:
1699 1699 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1700 1700 except OverflowError:
1701 1701 # issue4215 - we can't cache a run of chunks greater than
1702 1702 # 2G on Windows
1703 1703 return [self._chunk(rev, df=df) for rev in revschunk]
1704 1704
1705 1705 decomp = self.decompress
1706 1706 for rev in revschunk:
1707 1707 chunkstart = start(rev)
1708 1708 if inline:
1709 1709 chunkstart += (rev + 1) * iosize
1710 1710 chunklength = length(rev)
1711 1711 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1712 1712
1713 1713 return l
1714 1714
1715 1715 def _chunkclear(self):
1716 1716 """Clear the raw chunk cache."""
1717 1717 self._chunkcache = (0, b'')
1718 1718
1719 1719 def deltaparent(self, rev):
1720 1720 """return deltaparent of the given revision"""
1721 1721 base = self.index[rev][3]
1722 1722 if base == rev:
1723 1723 return nullrev
1724 1724 elif self._generaldelta:
1725 1725 return base
1726 1726 else:
1727 1727 return rev - 1
1728 1728
1729 1729 def issnapshot(self, rev):
1730 1730 """tells whether rev is a snapshot"""
1731 1731 if not self._sparserevlog:
1732 1732 return self.deltaparent(rev) == nullrev
1733 1733 elif util.safehasattr(self.index, b'issnapshot'):
1734 1734 # directly assign the method to cache the testing and access
1735 1735 self.issnapshot = self.index.issnapshot
1736 1736 return self.issnapshot(rev)
1737 1737 if rev == nullrev:
1738 1738 return True
1739 1739 entry = self.index[rev]
1740 1740 base = entry[3]
1741 1741 if base == rev:
1742 1742 return True
1743 1743 if base == nullrev:
1744 1744 return True
1745 1745 p1 = entry[5]
1746 1746 p2 = entry[6]
1747 1747 if base == p1 or base == p2:
1748 1748 return False
1749 1749 return self.issnapshot(base)
1750 1750
1751 1751 def snapshotdepth(self, rev):
1752 1752 """number of snapshot in the chain before this one"""
1753 1753 if not self.issnapshot(rev):
1754 1754 raise error.ProgrammingError(b'revision %d not a snapshot')
1755 1755 return len(self._deltachain(rev)[0]) - 1
1756 1756
1757 1757 def revdiff(self, rev1, rev2):
1758 1758 """return or calculate a delta between two revisions
1759 1759
1760 1760 The delta calculated is in binary form and is intended to be written to
1761 1761 revlog data directly. So this function needs raw revision data.
1762 1762 """
1763 1763 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1764 1764 return bytes(self._chunk(rev2))
1765 1765
1766 1766 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1767 1767
1768 1768 def _processflags(self, text, flags, operation, raw=False):
1769 1769 """deprecated entry point to access flag processors"""
1770 1770 msg = b'_processflag(...) use the specialized variant'
1771 1771 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1772 1772 if raw:
1773 1773 return text, flagutil.processflagsraw(self, text, flags)
1774 1774 elif operation == b'read':
1775 1775 return flagutil.processflagsread(self, text, flags)
1776 1776 else: # write operation
1777 1777 return flagutil.processflagswrite(self, text, flags)
1778 1778
1779 1779 def revision(self, nodeorrev, _df=None, raw=False):
1780 1780 """return an uncompressed revision of a given node or revision
1781 1781 number.
1782 1782
1783 1783 _df - an existing file handle to read from. (internal-only)
1784 1784 raw - an optional argument specifying if the revision data is to be
1785 1785 treated as raw data when applying flag transforms. 'raw' should be set
1786 1786 to True when generating changegroups or in debug commands.
1787 1787 """
1788 1788 if raw:
1789 1789 msg = (
1790 1790 b'revlog.revision(..., raw=True) is deprecated, '
1791 1791 b'use revlog.rawdata(...)'
1792 1792 )
1793 1793 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1794 1794 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1795 1795
1796 1796 def sidedata(self, nodeorrev, _df=None):
1797 1797 """a map of extra data related to the changeset but not part of the hash
1798 1798
1799 1799 This function currently return a dictionary. However, more advanced
1800 1800 mapping object will likely be used in the future for a more
1801 1801 efficient/lazy code.
1802 1802 """
1803 1803 return self._revisiondata(nodeorrev, _df)[1]
1804 1804
1805 1805 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1806 1806 # deal with <nodeorrev> argument type
1807 1807 if isinstance(nodeorrev, int):
1808 1808 rev = nodeorrev
1809 1809 node = self.node(rev)
1810 1810 else:
1811 1811 node = nodeorrev
1812 1812 rev = None
1813 1813
1814 1814 # fast path the special `nullid` rev
1815 1815 if node == self.nullid:
1816 1816 return b"", {}
1817 1817
1818 1818 # ``rawtext`` is the text as stored inside the revlog. Might be the
1819 1819 # revision or might need to be processed to retrieve the revision.
1820 1820 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1821 1821
1822 1822 if self.hassidedata:
1823 1823 if rev is None:
1824 1824 rev = self.rev(node)
1825 1825 sidedata = self._sidedata(rev)
1826 1826 else:
1827 1827 sidedata = {}
1828 1828
1829 1829 if raw and validated:
1830 1830 # if we don't want to process the raw text and that raw
1831 1831 # text is cached, we can exit early.
1832 1832 return rawtext, sidedata
1833 1833 if rev is None:
1834 1834 rev = self.rev(node)
1835 1835 # the revlog's flag for this revision
1836 1836 # (usually alter its state or content)
1837 1837 flags = self.flags(rev)
1838 1838
1839 1839 if validated and flags == REVIDX_DEFAULT_FLAGS:
1840 1840 # no extra flags set, no flag processor runs, text = rawtext
1841 1841 return rawtext, sidedata
1842 1842
1843 1843 if raw:
1844 1844 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1845 1845 text = rawtext
1846 1846 else:
1847 1847 r = flagutil.processflagsread(self, rawtext, flags)
1848 1848 text, validatehash = r
1849 1849 if validatehash:
1850 1850 self.checkhash(text, node, rev=rev)
1851 1851 if not validated:
1852 1852 self._revisioncache = (node, rev, rawtext)
1853 1853
1854 1854 return text, sidedata
1855 1855
1856 1856 def _rawtext(self, node, rev, _df=None):
1857 1857 """return the possibly unvalidated rawtext for a revision
1858 1858
1859 1859 returns (rev, rawtext, validated)
1860 1860 """
1861 1861
1862 1862 # revision in the cache (could be useful to apply delta)
1863 1863 cachedrev = None
1864 1864 # An intermediate text to apply deltas to
1865 1865 basetext = None
1866 1866
1867 1867 # Check if we have the entry in cache
1868 1868 # The cache entry looks like (node, rev, rawtext)
1869 1869 if self._revisioncache:
1870 1870 if self._revisioncache[0] == node:
1871 1871 return (rev, self._revisioncache[2], True)
1872 1872 cachedrev = self._revisioncache[1]
1873 1873
1874 1874 if rev is None:
1875 1875 rev = self.rev(node)
1876 1876
1877 1877 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1878 1878 if stopped:
1879 1879 basetext = self._revisioncache[2]
1880 1880
1881 1881 # drop cache to save memory, the caller is expected to
1882 1882 # update self._revisioncache after validating the text
1883 1883 self._revisioncache = None
1884 1884
1885 1885 targetsize = None
1886 1886 rawsize = self.index[rev][2]
1887 1887 if 0 <= rawsize:
1888 1888 targetsize = 4 * rawsize
1889 1889
1890 1890 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1891 1891 if basetext is None:
1892 1892 basetext = bytes(bins[0])
1893 1893 bins = bins[1:]
1894 1894
1895 1895 rawtext = mdiff.patches(basetext, bins)
1896 1896 del basetext # let us have a chance to free memory early
1897 1897 return (rev, rawtext, False)
1898 1898
1899 1899 def _sidedata(self, rev):
1900 1900 """Return the sidedata for a given revision number."""
1901 1901 index_entry = self.index[rev]
1902 1902 sidedata_offset = index_entry[8]
1903 1903 sidedata_size = index_entry[9]
1904 1904
1905 1905 if self._inline:
1906 1906 sidedata_offset += self.index.entry_size * (1 + rev)
1907 1907 if sidedata_size == 0:
1908 1908 return {}
1909 1909
1910 1910 segment = self._getsegment(sidedata_offset, sidedata_size)
1911 1911 sidedata = sidedatautil.deserialize_sidedata(segment)
1912 1912 return sidedata
1913 1913
1914 1914 def rawdata(self, nodeorrev, _df=None):
1915 1915 """return an uncompressed raw data of a given node or revision number.
1916 1916
1917 1917 _df - an existing file handle to read from. (internal-only)
1918 1918 """
1919 1919 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1920 1920
1921 1921 def hash(self, text, p1, p2):
1922 1922 """Compute a node hash.
1923 1923
1924 1924 Available as a function so that subclasses can replace the hash
1925 1925 as needed.
1926 1926 """
1927 1927 return storageutil.hashrevisionsha1(text, p1, p2)
1928 1928
1929 1929 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1930 1930 """Check node hash integrity.
1931 1931
1932 1932 Available as a function so that subclasses can extend hash mismatch
1933 1933 behaviors as needed.
1934 1934 """
1935 1935 try:
1936 1936 if p1 is None and p2 is None:
1937 1937 p1, p2 = self.parents(node)
1938 1938 if node != self.hash(text, p1, p2):
1939 1939 # Clear the revision cache on hash failure. The revision cache
1940 1940 # only stores the raw revision and clearing the cache does have
1941 1941 # the side-effect that we won't have a cache hit when the raw
1942 1942 # revision data is accessed. But this case should be rare and
1943 1943 # it is extra work to teach the cache about the hash
1944 1944 # verification state.
1945 1945 if self._revisioncache and self._revisioncache[0] == node:
1946 1946 self._revisioncache = None
1947 1947
1948 1948 revornode = rev
1949 1949 if revornode is None:
1950 1950 revornode = templatefilters.short(hex(node))
1951 1951 raise error.RevlogError(
1952 1952 _(b"integrity check failed on %s:%s")
1953 1953 % (self.display_id, pycompat.bytestr(revornode))
1954 1954 )
1955 1955 except error.RevlogError:
1956 1956 if self._censorable and storageutil.iscensoredtext(text):
1957 1957 raise error.CensoredNodeError(self.display_id, node, text)
1958 1958 raise
1959 1959
1960 1960 def _enforceinlinesize(self, tr):
1961 1961 """Check if the revlog is too big for inline and convert if so.
1962 1962
1963 1963 This should be called after revisions are added to the revlog. If the
1964 1964 revlog has grown too large to be an inline revlog, it will convert it
1965 1965 to use multiple index and data files.
1966 1966 """
1967 1967 tiprev = len(self) - 1
1968 1968 total_size = self.start(tiprev) + self.length(tiprev)
1969 1969 if not self._inline or total_size < _maxinline:
1970 1970 return
1971 1971
1972 1972 troffset = tr.findoffset(self._indexfile)
1973 1973 if troffset is None:
1974 1974 raise error.RevlogError(
1975 1975 _(b"%s not found in the transaction") % self._indexfile
1976 1976 )
1977 1977 trindex = 0
1978 1978 tr.add(self._datafile, 0)
1979 1979
1980 1980 existing_handles = False
1981 1981 if self._writinghandles is not None:
1982 1982 existing_handles = True
1983 1983 fp = self._writinghandles[0]
1984 1984 fp.flush()
1985 1985 fp.close()
1986 1986 # We can't use the cached file handle after close(). So prevent
1987 1987 # its usage.
1988 1988 self._writinghandles = None
1989 1989
1990 1990 new_dfh = self._datafp(b'w+')
1991 1991 new_dfh.truncate(0) # drop any potentially existing data
1992 1992 try:
1993 1993 with self._indexfp(b'r') as read_ifh:
1994 1994 for r in self:
1995 1995 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
1996 1996 if troffset <= self.start(r):
1997 1997 trindex = r
1998 1998 new_dfh.flush()
1999 1999
2000 2000 with self.opener(self._indexfile, mode=b'w', atomictemp=True) as fp:
2001 2001 self._format_flags &= ~FLAG_INLINE_DATA
2002 2002 self._inline = False
2003 2003 for i in self:
2004 2004 e = self.index.entry_binary(i)
2005 2005 if i == 0:
2006 2006 header = self._format_flags | self._format_version
2007 2007 header = self.index.pack_header(header)
2008 2008 e = header + e
2009 2009 fp.write(e)
2010 2010 # the temp file replace the real index when we exit the context
2011 2011 # manager
2012 2012
2013 2013 tr.replace(self._indexfile, trindex * self.index.entry_size)
2014 2014 nodemaputil.setup_persistent_nodemap(tr, self)
2015 2015 self._chunkclear()
2016 2016
2017 2017 if existing_handles:
2018 2018 # switched from inline to conventional reopen the index
2019 ifh = self._indexfp(b"a+")
2019 ifh = self._indexfp(b"r+")
2020 2020 self._writinghandles = (ifh, new_dfh)
2021 2021 new_dfh = None
2022 2022 finally:
2023 2023 if new_dfh is not None:
2024 2024 new_dfh.close()
2025 2025
2026 2026 def _nodeduplicatecallback(self, transaction, node):
2027 2027 """called when trying to add a node already stored."""
2028 2028
2029 2029 @contextlib.contextmanager
2030 2030 def _writing(self, transaction):
2031 2031 if self._writinghandles is not None:
2032 2032 yield
2033 2033 else:
2034 2034 r = len(self)
2035 2035 dsize = 0
2036 2036 if r:
2037 2037 dsize = self.end(r - 1)
2038 2038 dfh = None
2039 2039 if not self._inline:
2040 dfh = self._datafp(b"a+")
2040 try:
2041 dfh = self._datafp(b"r+")
2042 dfh.seek(0, os.SEEK_END)
2043 except IOError as inst:
2044 if inst.errno != errno.ENOENT:
2045 raise
2046 dfh = self._datafp(b"w+")
2041 2047 transaction.add(self._datafile, dsize)
2042 2048 try:
2043 2049 isize = r * self.index.entry_size
2044 ifh = self._indexfp(b"a+")
2050 try:
2051 ifh = self._indexfp(b"r+")
2052 ifh.seek(0, os.SEEK_END)
2053 except IOError as inst:
2054 if inst.errno != errno.ENOENT:
2055 raise
2056 ifh = self._indexfp(b"w+")
2045 2057 if self._inline:
2046 2058 transaction.add(self._indexfile, dsize + isize)
2047 2059 else:
2048 2060 transaction.add(self._indexfile, isize)
2049 2061 try:
2050 2062 self._writinghandles = (ifh, dfh)
2051 2063 try:
2052 2064 yield
2053 2065 finally:
2054 2066 self._writinghandles = None
2055 2067 finally:
2056 2068 ifh.close()
2057 2069 finally:
2058 2070 if dfh is not None:
2059 2071 dfh.close()
2060 2072
2061 2073 def addrevision(
2062 2074 self,
2063 2075 text,
2064 2076 transaction,
2065 2077 link,
2066 2078 p1,
2067 2079 p2,
2068 2080 cachedelta=None,
2069 2081 node=None,
2070 2082 flags=REVIDX_DEFAULT_FLAGS,
2071 2083 deltacomputer=None,
2072 2084 sidedata=None,
2073 2085 ):
2074 2086 """add a revision to the log
2075 2087
2076 2088 text - the revision data to add
2077 2089 transaction - the transaction object used for rollback
2078 2090 link - the linkrev data to add
2079 2091 p1, p2 - the parent nodeids of the revision
2080 2092 cachedelta - an optional precomputed delta
2081 2093 node - nodeid of revision; typically node is not specified, and it is
2082 2094 computed by default as hash(text, p1, p2), however subclasses might
2083 2095 use different hashing method (and override checkhash() in such case)
2084 2096 flags - the known flags to set on the revision
2085 2097 deltacomputer - an optional deltacomputer instance shared between
2086 2098 multiple calls
2087 2099 """
2088 2100 if link == nullrev:
2089 2101 raise error.RevlogError(
2090 2102 _(b"attempted to add linkrev -1 to %s") % self.display_id
2091 2103 )
2092 2104
2093 2105 if sidedata is None:
2094 2106 sidedata = {}
2095 2107 elif sidedata and not self.hassidedata:
2096 2108 raise error.ProgrammingError(
2097 2109 _(b"trying to add sidedata to a revlog who don't support them")
2098 2110 )
2099 2111
2100 2112 if flags:
2101 2113 node = node or self.hash(text, p1, p2)
2102 2114
2103 2115 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2104 2116
2105 2117 # If the flag processor modifies the revision data, ignore any provided
2106 2118 # cachedelta.
2107 2119 if rawtext != text:
2108 2120 cachedelta = None
2109 2121
2110 2122 if len(rawtext) > _maxentrysize:
2111 2123 raise error.RevlogError(
2112 2124 _(
2113 2125 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2114 2126 )
2115 2127 % (self.display_id, len(rawtext))
2116 2128 )
2117 2129
2118 2130 node = node or self.hash(rawtext, p1, p2)
2119 2131 rev = self.index.get_rev(node)
2120 2132 if rev is not None:
2121 2133 return rev
2122 2134
2123 2135 if validatehash:
2124 2136 self.checkhash(rawtext, node, p1=p1, p2=p2)
2125 2137
2126 2138 return self.addrawrevision(
2127 2139 rawtext,
2128 2140 transaction,
2129 2141 link,
2130 2142 p1,
2131 2143 p2,
2132 2144 node,
2133 2145 flags,
2134 2146 cachedelta=cachedelta,
2135 2147 deltacomputer=deltacomputer,
2136 2148 sidedata=sidedata,
2137 2149 )
2138 2150
2139 2151 def addrawrevision(
2140 2152 self,
2141 2153 rawtext,
2142 2154 transaction,
2143 2155 link,
2144 2156 p1,
2145 2157 p2,
2146 2158 node,
2147 2159 flags,
2148 2160 cachedelta=None,
2149 2161 deltacomputer=None,
2150 2162 sidedata=None,
2151 2163 ):
2152 2164 """add a raw revision with known flags, node and parents
2153 2165 useful when reusing a revision not stored in this revlog (ex: received
2154 2166 over wire, or read from an external bundle).
2155 2167 """
2156 2168 with self._writing(transaction):
2157 2169 return self._addrevision(
2158 2170 node,
2159 2171 rawtext,
2160 2172 transaction,
2161 2173 link,
2162 2174 p1,
2163 2175 p2,
2164 2176 flags,
2165 2177 cachedelta,
2166 2178 deltacomputer=deltacomputer,
2167 2179 sidedata=sidedata,
2168 2180 )
2169 2181
2170 2182 def compress(self, data):
2171 2183 """Generate a possibly-compressed representation of data."""
2172 2184 if not data:
2173 2185 return b'', data
2174 2186
2175 2187 compressed = self._compressor.compress(data)
2176 2188
2177 2189 if compressed:
2178 2190 # The revlog compressor added the header in the returned data.
2179 2191 return b'', compressed
2180 2192
2181 2193 if data[0:1] == b'\0':
2182 2194 return b'', data
2183 2195 return b'u', data
2184 2196
2185 2197 def decompress(self, data):
2186 2198 """Decompress a revlog chunk.
2187 2199
2188 2200 The chunk is expected to begin with a header identifying the
2189 2201 format type so it can be routed to an appropriate decompressor.
2190 2202 """
2191 2203 if not data:
2192 2204 return data
2193 2205
2194 2206 # Revlogs are read much more frequently than they are written and many
2195 2207 # chunks only take microseconds to decompress, so performance is
2196 2208 # important here.
2197 2209 #
2198 2210 # We can make a few assumptions about revlogs:
2199 2211 #
2200 2212 # 1) the majority of chunks will be compressed (as opposed to inline
2201 2213 # raw data).
2202 2214 # 2) decompressing *any* data will likely by at least 10x slower than
2203 2215 # returning raw inline data.
2204 2216 # 3) we want to prioritize common and officially supported compression
2205 2217 # engines
2206 2218 #
2207 2219 # It follows that we want to optimize for "decompress compressed data
2208 2220 # when encoded with common and officially supported compression engines"
2209 2221 # case over "raw data" and "data encoded by less common or non-official
2210 2222 # compression engines." That is why we have the inline lookup first
2211 2223 # followed by the compengines lookup.
2212 2224 #
2213 2225 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2214 2226 # compressed chunks. And this matters for changelog and manifest reads.
2215 2227 t = data[0:1]
2216 2228
2217 2229 if t == b'x':
2218 2230 try:
2219 2231 return _zlibdecompress(data)
2220 2232 except zlib.error as e:
2221 2233 raise error.RevlogError(
2222 2234 _(b'revlog decompress error: %s')
2223 2235 % stringutil.forcebytestr(e)
2224 2236 )
2225 2237 # '\0' is more common than 'u' so it goes first.
2226 2238 elif t == b'\0':
2227 2239 return data
2228 2240 elif t == b'u':
2229 2241 return util.buffer(data, 1)
2230 2242
2231 2243 try:
2232 2244 compressor = self._decompressors[t]
2233 2245 except KeyError:
2234 2246 try:
2235 2247 engine = util.compengines.forrevlogheader(t)
2236 2248 compressor = engine.revlogcompressor(self._compengineopts)
2237 2249 self._decompressors[t] = compressor
2238 2250 except KeyError:
2239 2251 raise error.RevlogError(
2240 2252 _(b'unknown compression type %s') % binascii.hexlify(t)
2241 2253 )
2242 2254
2243 2255 return compressor.decompress(data)
2244 2256
2245 2257 def _addrevision(
2246 2258 self,
2247 2259 node,
2248 2260 rawtext,
2249 2261 transaction,
2250 2262 link,
2251 2263 p1,
2252 2264 p2,
2253 2265 flags,
2254 2266 cachedelta,
2255 2267 alwayscache=False,
2256 2268 deltacomputer=None,
2257 2269 sidedata=None,
2258 2270 ):
2259 2271 """internal function to add revisions to the log
2260 2272
2261 2273 see addrevision for argument descriptions.
2262 2274
2263 2275 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2264 2276
2265 2277 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2266 2278 be used.
2267 2279
2268 2280 invariants:
2269 2281 - rawtext is optional (can be None); if not set, cachedelta must be set.
2270 2282 if both are set, they must correspond to each other.
2271 2283 """
2272 2284 if node == self.nullid:
2273 2285 raise error.RevlogError(
2274 2286 _(b"%s: attempt to add null revision") % self.display_id
2275 2287 )
2276 2288 if (
2277 2289 node == self.nodeconstants.wdirid
2278 2290 or node in self.nodeconstants.wdirfilenodeids
2279 2291 ):
2280 2292 raise error.RevlogError(
2281 2293 _(b"%s: attempt to add wdir revision") % self.display_id
2282 2294 )
2283 2295 if self._writinghandles is None:
2284 2296 msg = b'adding revision outside `revlog._writing` context'
2285 2297 raise error.ProgrammingError(msg)
2286 2298
2287 2299 if self._inline:
2288 2300 fh = self._writinghandles[0]
2289 2301 else:
2290 2302 fh = self._writinghandles[1]
2291 2303
2292 2304 btext = [rawtext]
2293 2305
2294 2306 curr = len(self)
2295 2307 prev = curr - 1
2296 2308
2297 2309 offset = self._get_data_offset(prev)
2298 2310
2299 2311 if self._concurrencychecker:
2300 2312 ifh, dfh = self._writinghandles
2301 2313 if self._inline:
2302 2314 # offset is "as if" it were in the .d file, so we need to add on
2303 2315 # the size of the entry metadata.
2304 2316 self._concurrencychecker(
2305 2317 ifh, self._indexfile, offset + curr * self.index.entry_size
2306 2318 )
2307 2319 else:
2308 2320 # Entries in the .i are a consistent size.
2309 2321 self._concurrencychecker(
2310 2322 ifh, self._indexfile, curr * self.index.entry_size
2311 2323 )
2312 2324 self._concurrencychecker(dfh, self._datafile, offset)
2313 2325
2314 2326 p1r, p2r = self.rev(p1), self.rev(p2)
2315 2327
2316 2328 # full versions are inserted when the needed deltas
2317 2329 # become comparable to the uncompressed text
2318 2330 if rawtext is None:
2319 2331 # need rawtext size, before changed by flag processors, which is
2320 2332 # the non-raw size. use revlog explicitly to avoid filelog's extra
2321 2333 # logic that might remove metadata size.
2322 2334 textlen = mdiff.patchedsize(
2323 2335 revlog.size(self, cachedelta[0]), cachedelta[1]
2324 2336 )
2325 2337 else:
2326 2338 textlen = len(rawtext)
2327 2339
2328 2340 if deltacomputer is None:
2329 2341 deltacomputer = deltautil.deltacomputer(self)
2330 2342
2331 2343 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2332 2344
2333 2345 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2334 2346
2335 2347 if sidedata and self.hassidedata:
2336 2348 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2337 2349 sidedata_offset = offset + deltainfo.deltalen
2338 2350 else:
2339 2351 serialized_sidedata = b""
2340 2352 # Don't store the offset if the sidedata is empty, that way
2341 2353 # we can easily detect empty sidedata and they will be no different
2342 2354 # than ones we manually add.
2343 2355 sidedata_offset = 0
2344 2356
2345 2357 e = (
2346 2358 offset_type(offset, flags),
2347 2359 deltainfo.deltalen,
2348 2360 textlen,
2349 2361 deltainfo.base,
2350 2362 link,
2351 2363 p1r,
2352 2364 p2r,
2353 2365 node,
2354 2366 sidedata_offset,
2355 2367 len(serialized_sidedata),
2356 2368 )
2357 2369
2358 2370 self.index.append(e)
2359 2371 entry = self.index.entry_binary(curr)
2360 2372 if curr == 0:
2361 2373 header = self._format_flags | self._format_version
2362 2374 header = self.index.pack_header(header)
2363 2375 entry = header + entry
2364 2376 self._writeentry(
2365 2377 transaction,
2366 2378 entry,
2367 2379 deltainfo.data,
2368 2380 link,
2369 2381 offset,
2370 2382 serialized_sidedata,
2371 2383 )
2372 2384
2373 2385 rawtext = btext[0]
2374 2386
2375 2387 if alwayscache and rawtext is None:
2376 2388 rawtext = deltacomputer.buildtext(revinfo, fh)
2377 2389
2378 2390 if type(rawtext) == bytes: # only accept immutable objects
2379 2391 self._revisioncache = (node, curr, rawtext)
2380 2392 self._chainbasecache[curr] = deltainfo.chainbase
2381 2393 return curr
2382 2394
2383 2395 def _get_data_offset(self, prev):
2384 2396 """Returns the current offset in the (in-transaction) data file.
2385 2397 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2386 2398 file to store that information: since sidedata can be rewritten to the
2387 2399 end of the data file within a transaction, you can have cases where, for
2388 2400 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2389 2401 to `n - 1`'s sidedata being written after `n`'s data.
2390 2402
2391 2403 TODO cache this in a docket file before getting out of experimental."""
2392 2404 if self._format_version != REVLOGV2:
2393 2405 return self.end(prev)
2394 2406
2395 2407 offset = 0
2396 2408 for rev, entry in enumerate(self.index):
2397 2409 sidedata_end = entry[8] + entry[9]
2398 2410 # Sidedata for a previous rev has potentially been written after
2399 2411 # this rev's end, so take the max.
2400 2412 offset = max(self.end(rev), offset, sidedata_end)
2401 2413 return offset
2402 2414
2403 2415 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2404 2416 # Files opened in a+ mode have inconsistent behavior on various
2405 2417 # platforms. Windows requires that a file positioning call be made
2406 2418 # when the file handle transitions between reads and writes. See
2407 2419 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2408 2420 # platforms, Python or the platform itself can be buggy. Some versions
2409 2421 # of Solaris have been observed to not append at the end of the file
2410 2422 # if the file was seeked to before the end. See issue4943 for more.
2411 2423 #
2412 2424 # We work around this issue by inserting a seek() before writing.
2413 2425 # Note: This is likely not necessary on Python 3. However, because
2414 2426 # the file handle is reused for reads and may be seeked there, we need
2415 2427 # to be careful before changing this.
2416 2428 if self._writinghandles is None:
2417 2429 msg = b'adding revision outside `revlog._writing` context'
2418 2430 raise error.ProgrammingError(msg)
2419 2431 ifh, dfh = self._writinghandles
2420 2432 ifh.seek(0, os.SEEK_END)
2421 2433 if dfh:
2422 2434 dfh.seek(0, os.SEEK_END)
2423 2435
2424 2436 curr = len(self) - 1
2425 2437 if not self._inline:
2426 2438 transaction.add(self._datafile, offset)
2427 2439 transaction.add(self._indexfile, curr * len(entry))
2428 2440 if data[0]:
2429 2441 dfh.write(data[0])
2430 2442 dfh.write(data[1])
2431 2443 if sidedata:
2432 2444 dfh.write(sidedata)
2433 2445 ifh.write(entry)
2434 2446 else:
2435 2447 offset += curr * self.index.entry_size
2436 2448 transaction.add(self._indexfile, offset)
2437 2449 ifh.write(entry)
2438 2450 ifh.write(data[0])
2439 2451 ifh.write(data[1])
2440 2452 if sidedata:
2441 2453 ifh.write(sidedata)
2442 2454 self._enforceinlinesize(transaction)
2443 2455 nodemaputil.setup_persistent_nodemap(transaction, self)
2444 2456
2445 2457 def addgroup(
2446 2458 self,
2447 2459 deltas,
2448 2460 linkmapper,
2449 2461 transaction,
2450 2462 alwayscache=False,
2451 2463 addrevisioncb=None,
2452 2464 duplicaterevisioncb=None,
2453 2465 ):
2454 2466 """
2455 2467 add a delta group
2456 2468
2457 2469 given a set of deltas, add them to the revision log. the
2458 2470 first delta is against its parent, which should be in our
2459 2471 log, the rest are against the previous delta.
2460 2472
2461 2473 If ``addrevisioncb`` is defined, it will be called with arguments of
2462 2474 this revlog and the node that was added.
2463 2475 """
2464 2476
2465 2477 if self._adding_group:
2466 2478 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2467 2479
2468 2480 self._adding_group = True
2469 2481 empty = True
2470 2482 try:
2471 2483 with self._writing(transaction):
2472 2484 deltacomputer = deltautil.deltacomputer(self)
2473 2485 # loop through our set of deltas
2474 2486 for data in deltas:
2475 2487 (
2476 2488 node,
2477 2489 p1,
2478 2490 p2,
2479 2491 linknode,
2480 2492 deltabase,
2481 2493 delta,
2482 2494 flags,
2483 2495 sidedata,
2484 2496 ) = data
2485 2497 link = linkmapper(linknode)
2486 2498 flags = flags or REVIDX_DEFAULT_FLAGS
2487 2499
2488 2500 rev = self.index.get_rev(node)
2489 2501 if rev is not None:
2490 2502 # this can happen if two branches make the same change
2491 2503 self._nodeduplicatecallback(transaction, rev)
2492 2504 if duplicaterevisioncb:
2493 2505 duplicaterevisioncb(self, rev)
2494 2506 empty = False
2495 2507 continue
2496 2508
2497 2509 for p in (p1, p2):
2498 2510 if not self.index.has_node(p):
2499 2511 raise error.LookupError(
2500 2512 p, self.radix, _(b'unknown parent')
2501 2513 )
2502 2514
2503 2515 if not self.index.has_node(deltabase):
2504 2516 raise error.LookupError(
2505 2517 deltabase, self.display_id, _(b'unknown delta base')
2506 2518 )
2507 2519
2508 2520 baserev = self.rev(deltabase)
2509 2521
2510 2522 if baserev != nullrev and self.iscensored(baserev):
2511 2523 # if base is censored, delta must be full replacement in a
2512 2524 # single patch operation
2513 2525 hlen = struct.calcsize(b">lll")
2514 2526 oldlen = self.rawsize(baserev)
2515 2527 newlen = len(delta) - hlen
2516 2528 if delta[:hlen] != mdiff.replacediffheader(
2517 2529 oldlen, newlen
2518 2530 ):
2519 2531 raise error.CensoredBaseError(
2520 2532 self.display_id, self.node(baserev)
2521 2533 )
2522 2534
2523 2535 if not flags and self._peek_iscensored(baserev, delta):
2524 2536 flags |= REVIDX_ISCENSORED
2525 2537
2526 2538 # We assume consumers of addrevisioncb will want to retrieve
2527 2539 # the added revision, which will require a call to
2528 2540 # revision(). revision() will fast path if there is a cache
2529 2541 # hit. So, we tell _addrevision() to always cache in this case.
2530 2542 # We're only using addgroup() in the context of changegroup
2531 2543 # generation so the revision data can always be handled as raw
2532 2544 # by the flagprocessor.
2533 2545 rev = self._addrevision(
2534 2546 node,
2535 2547 None,
2536 2548 transaction,
2537 2549 link,
2538 2550 p1,
2539 2551 p2,
2540 2552 flags,
2541 2553 (baserev, delta),
2542 2554 alwayscache=alwayscache,
2543 2555 deltacomputer=deltacomputer,
2544 2556 sidedata=sidedata,
2545 2557 )
2546 2558
2547 2559 if addrevisioncb:
2548 2560 addrevisioncb(self, rev)
2549 2561 empty = False
2550 2562 finally:
2551 2563 self._adding_group = False
2552 2564 return not empty
2553 2565
2554 2566 def iscensored(self, rev):
2555 2567 """Check if a file revision is censored."""
2556 2568 if not self._censorable:
2557 2569 return False
2558 2570
2559 2571 return self.flags(rev) & REVIDX_ISCENSORED
2560 2572
2561 2573 def _peek_iscensored(self, baserev, delta):
2562 2574 """Quickly check if a delta produces a censored revision."""
2563 2575 if not self._censorable:
2564 2576 return False
2565 2577
2566 2578 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2567 2579
2568 2580 def getstrippoint(self, minlink):
2569 2581 """find the minimum rev that must be stripped to strip the linkrev
2570 2582
2571 2583 Returns a tuple containing the minimum rev and a set of all revs that
2572 2584 have linkrevs that will be broken by this strip.
2573 2585 """
2574 2586 return storageutil.resolvestripinfo(
2575 2587 minlink,
2576 2588 len(self) - 1,
2577 2589 self.headrevs(),
2578 2590 self.linkrev,
2579 2591 self.parentrevs,
2580 2592 )
2581 2593
2582 2594 def strip(self, minlink, transaction):
2583 2595 """truncate the revlog on the first revision with a linkrev >= minlink
2584 2596
2585 2597 This function is called when we're stripping revision minlink and
2586 2598 its descendants from the repository.
2587 2599
2588 2600 We have to remove all revisions with linkrev >= minlink, because
2589 2601 the equivalent changelog revisions will be renumbered after the
2590 2602 strip.
2591 2603
2592 2604 So we truncate the revlog on the first of these revisions, and
2593 2605 trust that the caller has saved the revisions that shouldn't be
2594 2606 removed and that it'll re-add them after this truncation.
2595 2607 """
2596 2608 if len(self) == 0:
2597 2609 return
2598 2610
2599 2611 rev, _ = self.getstrippoint(minlink)
2600 2612 if rev == len(self):
2601 2613 return
2602 2614
2603 2615 # first truncate the files on disk
2604 2616 end = self.start(rev)
2605 2617 if not self._inline:
2606 2618 transaction.add(self._datafile, end)
2607 2619 end = rev * self.index.entry_size
2608 2620 else:
2609 2621 end += rev * self.index.entry_size
2610 2622
2611 2623 transaction.add(self._indexfile, end)
2612 2624
2613 2625 # then reset internal state in memory to forget those revisions
2614 2626 self._revisioncache = None
2615 2627 self._chaininfocache = util.lrucachedict(500)
2616 2628 self._chunkclear()
2617 2629
2618 2630 del self.index[rev:-1]
2619 2631
2620 2632 def checksize(self):
2621 2633 """Check size of index and data files
2622 2634
2623 2635 return a (dd, di) tuple.
2624 2636 - dd: extra bytes for the "data" file
2625 2637 - di: extra bytes for the "index" file
2626 2638
2627 2639 A healthy revlog will return (0, 0).
2628 2640 """
2629 2641 expected = 0
2630 2642 if len(self):
2631 2643 expected = max(0, self.end(len(self) - 1))
2632 2644
2633 2645 try:
2634 2646 with self._datafp() as f:
2635 2647 f.seek(0, io.SEEK_END)
2636 2648 actual = f.tell()
2637 2649 dd = actual - expected
2638 2650 except IOError as inst:
2639 2651 if inst.errno != errno.ENOENT:
2640 2652 raise
2641 2653 dd = 0
2642 2654
2643 2655 try:
2644 2656 f = self.opener(self._indexfile)
2645 2657 f.seek(0, io.SEEK_END)
2646 2658 actual = f.tell()
2647 2659 f.close()
2648 2660 s = self.index.entry_size
2649 2661 i = max(0, actual // s)
2650 2662 di = actual - (i * s)
2651 2663 if self._inline:
2652 2664 databytes = 0
2653 2665 for r in self:
2654 2666 databytes += max(0, self.length(r))
2655 2667 dd = 0
2656 2668 di = actual - len(self) * s - databytes
2657 2669 except IOError as inst:
2658 2670 if inst.errno != errno.ENOENT:
2659 2671 raise
2660 2672 di = 0
2661 2673
2662 2674 return (dd, di)
2663 2675
2664 2676 def files(self):
2665 2677 res = [self._indexfile]
2666 2678 if not self._inline:
2667 2679 res.append(self._datafile)
2668 2680 return res
2669 2681
2670 2682 def emitrevisions(
2671 2683 self,
2672 2684 nodes,
2673 2685 nodesorder=None,
2674 2686 revisiondata=False,
2675 2687 assumehaveparentrevisions=False,
2676 2688 deltamode=repository.CG_DELTAMODE_STD,
2677 2689 sidedata_helpers=None,
2678 2690 ):
2679 2691 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2680 2692 raise error.ProgrammingError(
2681 2693 b'unhandled value for nodesorder: %s' % nodesorder
2682 2694 )
2683 2695
2684 2696 if nodesorder is None and not self._generaldelta:
2685 2697 nodesorder = b'storage'
2686 2698
2687 2699 if (
2688 2700 not self._storedeltachains
2689 2701 and deltamode != repository.CG_DELTAMODE_PREV
2690 2702 ):
2691 2703 deltamode = repository.CG_DELTAMODE_FULL
2692 2704
2693 2705 return storageutil.emitrevisions(
2694 2706 self,
2695 2707 nodes,
2696 2708 nodesorder,
2697 2709 revlogrevisiondelta,
2698 2710 deltaparentfn=self.deltaparent,
2699 2711 candeltafn=self.candelta,
2700 2712 rawsizefn=self.rawsize,
2701 2713 revdifffn=self.revdiff,
2702 2714 flagsfn=self.flags,
2703 2715 deltamode=deltamode,
2704 2716 revisiondata=revisiondata,
2705 2717 assumehaveparentrevisions=assumehaveparentrevisions,
2706 2718 sidedata_helpers=sidedata_helpers,
2707 2719 )
2708 2720
2709 2721 DELTAREUSEALWAYS = b'always'
2710 2722 DELTAREUSESAMEREVS = b'samerevs'
2711 2723 DELTAREUSENEVER = b'never'
2712 2724
2713 2725 DELTAREUSEFULLADD = b'fulladd'
2714 2726
2715 2727 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2716 2728
2717 2729 def clone(
2718 2730 self,
2719 2731 tr,
2720 2732 destrevlog,
2721 2733 addrevisioncb=None,
2722 2734 deltareuse=DELTAREUSESAMEREVS,
2723 2735 forcedeltabothparents=None,
2724 2736 sidedata_helpers=None,
2725 2737 ):
2726 2738 """Copy this revlog to another, possibly with format changes.
2727 2739
2728 2740 The destination revlog will contain the same revisions and nodes.
2729 2741 However, it may not be bit-for-bit identical due to e.g. delta encoding
2730 2742 differences.
2731 2743
2732 2744 The ``deltareuse`` argument control how deltas from the existing revlog
2733 2745 are preserved in the destination revlog. The argument can have the
2734 2746 following values:
2735 2747
2736 2748 DELTAREUSEALWAYS
2737 2749 Deltas will always be reused (if possible), even if the destination
2738 2750 revlog would not select the same revisions for the delta. This is the
2739 2751 fastest mode of operation.
2740 2752 DELTAREUSESAMEREVS
2741 2753 Deltas will be reused if the destination revlog would pick the same
2742 2754 revisions for the delta. This mode strikes a balance between speed
2743 2755 and optimization.
2744 2756 DELTAREUSENEVER
2745 2757 Deltas will never be reused. This is the slowest mode of execution.
2746 2758 This mode can be used to recompute deltas (e.g. if the diff/delta
2747 2759 algorithm changes).
2748 2760 DELTAREUSEFULLADD
2749 2761 Revision will be re-added as if their were new content. This is
2750 2762 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2751 2763 eg: large file detection and handling.
2752 2764
2753 2765 Delta computation can be slow, so the choice of delta reuse policy can
2754 2766 significantly affect run time.
2755 2767
2756 2768 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2757 2769 two extremes. Deltas will be reused if they are appropriate. But if the
2758 2770 delta could choose a better revision, it will do so. This means if you
2759 2771 are converting a non-generaldelta revlog to a generaldelta revlog,
2760 2772 deltas will be recomputed if the delta's parent isn't a parent of the
2761 2773 revision.
2762 2774
2763 2775 In addition to the delta policy, the ``forcedeltabothparents``
2764 2776 argument controls whether to force compute deltas against both parents
2765 2777 for merges. By default, the current default is used.
2766 2778
2767 2779 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2768 2780 `sidedata_helpers`.
2769 2781 """
2770 2782 if deltareuse not in self.DELTAREUSEALL:
2771 2783 raise ValueError(
2772 2784 _(b'value for deltareuse invalid: %s') % deltareuse
2773 2785 )
2774 2786
2775 2787 if len(destrevlog):
2776 2788 raise ValueError(_(b'destination revlog is not empty'))
2777 2789
2778 2790 if getattr(self, 'filteredrevs', None):
2779 2791 raise ValueError(_(b'source revlog has filtered revisions'))
2780 2792 if getattr(destrevlog, 'filteredrevs', None):
2781 2793 raise ValueError(_(b'destination revlog has filtered revisions'))
2782 2794
2783 2795 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2784 2796 # if possible.
2785 2797 oldlazydelta = destrevlog._lazydelta
2786 2798 oldlazydeltabase = destrevlog._lazydeltabase
2787 2799 oldamd = destrevlog._deltabothparents
2788 2800
2789 2801 try:
2790 2802 if deltareuse == self.DELTAREUSEALWAYS:
2791 2803 destrevlog._lazydeltabase = True
2792 2804 destrevlog._lazydelta = True
2793 2805 elif deltareuse == self.DELTAREUSESAMEREVS:
2794 2806 destrevlog._lazydeltabase = False
2795 2807 destrevlog._lazydelta = True
2796 2808 elif deltareuse == self.DELTAREUSENEVER:
2797 2809 destrevlog._lazydeltabase = False
2798 2810 destrevlog._lazydelta = False
2799 2811
2800 2812 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2801 2813
2802 2814 self._clone(
2803 2815 tr,
2804 2816 destrevlog,
2805 2817 addrevisioncb,
2806 2818 deltareuse,
2807 2819 forcedeltabothparents,
2808 2820 sidedata_helpers,
2809 2821 )
2810 2822
2811 2823 finally:
2812 2824 destrevlog._lazydelta = oldlazydelta
2813 2825 destrevlog._lazydeltabase = oldlazydeltabase
2814 2826 destrevlog._deltabothparents = oldamd
2815 2827
2816 2828 def _clone(
2817 2829 self,
2818 2830 tr,
2819 2831 destrevlog,
2820 2832 addrevisioncb,
2821 2833 deltareuse,
2822 2834 forcedeltabothparents,
2823 2835 sidedata_helpers,
2824 2836 ):
2825 2837 """perform the core duty of `revlog.clone` after parameter processing"""
2826 2838 deltacomputer = deltautil.deltacomputer(destrevlog)
2827 2839 index = self.index
2828 2840 for rev in self:
2829 2841 entry = index[rev]
2830 2842
2831 2843 # Some classes override linkrev to take filtered revs into
2832 2844 # account. Use raw entry from index.
2833 2845 flags = entry[0] & 0xFFFF
2834 2846 linkrev = entry[4]
2835 2847 p1 = index[entry[5]][7]
2836 2848 p2 = index[entry[6]][7]
2837 2849 node = entry[7]
2838 2850
2839 2851 # (Possibly) reuse the delta from the revlog if allowed and
2840 2852 # the revlog chunk is a delta.
2841 2853 cachedelta = None
2842 2854 rawtext = None
2843 2855 if deltareuse == self.DELTAREUSEFULLADD:
2844 2856 text, sidedata = self._revisiondata(rev)
2845 2857
2846 2858 if sidedata_helpers is not None:
2847 2859 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2848 2860 self, sidedata_helpers, sidedata, rev
2849 2861 )
2850 2862 flags = flags | new_flags[0] & ~new_flags[1]
2851 2863
2852 2864 destrevlog.addrevision(
2853 2865 text,
2854 2866 tr,
2855 2867 linkrev,
2856 2868 p1,
2857 2869 p2,
2858 2870 cachedelta=cachedelta,
2859 2871 node=node,
2860 2872 flags=flags,
2861 2873 deltacomputer=deltacomputer,
2862 2874 sidedata=sidedata,
2863 2875 )
2864 2876 else:
2865 2877 if destrevlog._lazydelta:
2866 2878 dp = self.deltaparent(rev)
2867 2879 if dp != nullrev:
2868 2880 cachedelta = (dp, bytes(self._chunk(rev)))
2869 2881
2870 2882 sidedata = None
2871 2883 if not cachedelta:
2872 2884 rawtext, sidedata = self._revisiondata(rev)
2873 2885 if sidedata is None:
2874 2886 sidedata = self.sidedata(rev)
2875 2887
2876 2888 if sidedata_helpers is not None:
2877 2889 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2878 2890 self, sidedata_helpers, sidedata, rev
2879 2891 )
2880 2892 flags = flags | new_flags[0] & ~new_flags[1]
2881 2893
2882 2894 with destrevlog._writing(tr):
2883 2895 destrevlog._addrevision(
2884 2896 node,
2885 2897 rawtext,
2886 2898 tr,
2887 2899 linkrev,
2888 2900 p1,
2889 2901 p2,
2890 2902 flags,
2891 2903 cachedelta,
2892 2904 deltacomputer=deltacomputer,
2893 2905 sidedata=sidedata,
2894 2906 )
2895 2907
2896 2908 if addrevisioncb:
2897 2909 addrevisioncb(self, rev, node)
2898 2910
2899 2911 def censorrevision(self, tr, censornode, tombstone=b''):
2900 2912 if self._format_version == REVLOGV0:
2901 2913 raise error.RevlogError(
2902 2914 _(b'cannot censor with version %d revlogs')
2903 2915 % self._format_version
2904 2916 )
2905 2917
2906 2918 censorrev = self.rev(censornode)
2907 2919 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2908 2920
2909 2921 if len(tombstone) > self.rawsize(censorrev):
2910 2922 raise error.Abort(
2911 2923 _(b'censor tombstone must be no longer than censored data')
2912 2924 )
2913 2925
2914 2926 # Rewriting the revlog in place is hard. Our strategy for censoring is
2915 2927 # to create a new revlog, copy all revisions to it, then replace the
2916 2928 # revlogs on transaction close.
2917 2929 #
2918 2930 # This is a bit dangerous. We could easily have a mismatch of state.
2919 2931 newrl = revlog(
2920 2932 self.opener,
2921 2933 target=self.target,
2922 2934 radix=self.radix,
2923 2935 postfix=b'tmpcensored',
2924 2936 censorable=True,
2925 2937 )
2926 2938 newrl._format_version = self._format_version
2927 2939 newrl._format_flags = self._format_flags
2928 2940 newrl._generaldelta = self._generaldelta
2929 2941 newrl._parse_index = self._parse_index
2930 2942
2931 2943 for rev in self.revs():
2932 2944 node = self.node(rev)
2933 2945 p1, p2 = self.parents(node)
2934 2946
2935 2947 if rev == censorrev:
2936 2948 newrl.addrawrevision(
2937 2949 tombstone,
2938 2950 tr,
2939 2951 self.linkrev(censorrev),
2940 2952 p1,
2941 2953 p2,
2942 2954 censornode,
2943 2955 REVIDX_ISCENSORED,
2944 2956 )
2945 2957
2946 2958 if newrl.deltaparent(rev) != nullrev:
2947 2959 raise error.Abort(
2948 2960 _(
2949 2961 b'censored revision stored as delta; '
2950 2962 b'cannot censor'
2951 2963 ),
2952 2964 hint=_(
2953 2965 b'censoring of revlogs is not '
2954 2966 b'fully implemented; please report '
2955 2967 b'this bug'
2956 2968 ),
2957 2969 )
2958 2970 continue
2959 2971
2960 2972 if self.iscensored(rev):
2961 2973 if self.deltaparent(rev) != nullrev:
2962 2974 raise error.Abort(
2963 2975 _(
2964 2976 b'cannot censor due to censored '
2965 2977 b'revision having delta stored'
2966 2978 )
2967 2979 )
2968 2980 rawtext = self._chunk(rev)
2969 2981 else:
2970 2982 rawtext = self.rawdata(rev)
2971 2983
2972 2984 newrl.addrawrevision(
2973 2985 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2974 2986 )
2975 2987
2976 2988 tr.addbackup(self._indexfile, location=b'store')
2977 2989 if not self._inline:
2978 2990 tr.addbackup(self._datafile, location=b'store')
2979 2991
2980 2992 self.opener.rename(newrl._indexfile, self._indexfile)
2981 2993 if not self._inline:
2982 2994 self.opener.rename(newrl._datafile, self._datafile)
2983 2995
2984 2996 self.clearcaches()
2985 2997 self._loadindex()
2986 2998
2987 2999 def verifyintegrity(self, state):
2988 3000 """Verifies the integrity of the revlog.
2989 3001
2990 3002 Yields ``revlogproblem`` instances describing problems that are
2991 3003 found.
2992 3004 """
2993 3005 dd, di = self.checksize()
2994 3006 if dd:
2995 3007 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2996 3008 if di:
2997 3009 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2998 3010
2999 3011 version = self._format_version
3000 3012
3001 3013 # The verifier tells us what version revlog we should be.
3002 3014 if version != state[b'expectedversion']:
3003 3015 yield revlogproblem(
3004 3016 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3005 3017 % (self.display_id, version, state[b'expectedversion'])
3006 3018 )
3007 3019
3008 3020 state[b'skipread'] = set()
3009 3021 state[b'safe_renamed'] = set()
3010 3022
3011 3023 for rev in self:
3012 3024 node = self.node(rev)
3013 3025
3014 3026 # Verify contents. 4 cases to care about:
3015 3027 #
3016 3028 # common: the most common case
3017 3029 # rename: with a rename
3018 3030 # meta: file content starts with b'\1\n', the metadata
3019 3031 # header defined in filelog.py, but without a rename
3020 3032 # ext: content stored externally
3021 3033 #
3022 3034 # More formally, their differences are shown below:
3023 3035 #
3024 3036 # | common | rename | meta | ext
3025 3037 # -------------------------------------------------------
3026 3038 # flags() | 0 | 0 | 0 | not 0
3027 3039 # renamed() | False | True | False | ?
3028 3040 # rawtext[0:2]=='\1\n'| False | True | True | ?
3029 3041 #
3030 3042 # "rawtext" means the raw text stored in revlog data, which
3031 3043 # could be retrieved by "rawdata(rev)". "text"
3032 3044 # mentioned below is "revision(rev)".
3033 3045 #
3034 3046 # There are 3 different lengths stored physically:
3035 3047 # 1. L1: rawsize, stored in revlog index
3036 3048 # 2. L2: len(rawtext), stored in revlog data
3037 3049 # 3. L3: len(text), stored in revlog data if flags==0, or
3038 3050 # possibly somewhere else if flags!=0
3039 3051 #
3040 3052 # L1 should be equal to L2. L3 could be different from them.
3041 3053 # "text" may or may not affect commit hash depending on flag
3042 3054 # processors (see flagutil.addflagprocessor).
3043 3055 #
3044 3056 # | common | rename | meta | ext
3045 3057 # -------------------------------------------------
3046 3058 # rawsize() | L1 | L1 | L1 | L1
3047 3059 # size() | L1 | L2-LM | L1(*) | L1 (?)
3048 3060 # len(rawtext) | L2 | L2 | L2 | L2
3049 3061 # len(text) | L2 | L2 | L2 | L3
3050 3062 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3051 3063 #
3052 3064 # LM: length of metadata, depending on rawtext
3053 3065 # (*): not ideal, see comment in filelog.size
3054 3066 # (?): could be "- len(meta)" if the resolved content has
3055 3067 # rename metadata
3056 3068 #
3057 3069 # Checks needed to be done:
3058 3070 # 1. length check: L1 == L2, in all cases.
3059 3071 # 2. hash check: depending on flag processor, we may need to
3060 3072 # use either "text" (external), or "rawtext" (in revlog).
3061 3073
3062 3074 try:
3063 3075 skipflags = state.get(b'skipflags', 0)
3064 3076 if skipflags:
3065 3077 skipflags &= self.flags(rev)
3066 3078
3067 3079 _verify_revision(self, skipflags, state, node)
3068 3080
3069 3081 l1 = self.rawsize(rev)
3070 3082 l2 = len(self.rawdata(node))
3071 3083
3072 3084 if l1 != l2:
3073 3085 yield revlogproblem(
3074 3086 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3075 3087 node=node,
3076 3088 )
3077 3089
3078 3090 except error.CensoredNodeError:
3079 3091 if state[b'erroroncensored']:
3080 3092 yield revlogproblem(
3081 3093 error=_(b'censored file data'), node=node
3082 3094 )
3083 3095 state[b'skipread'].add(node)
3084 3096 except Exception as e:
3085 3097 yield revlogproblem(
3086 3098 error=_(b'unpacking %s: %s')
3087 3099 % (short(node), stringutil.forcebytestr(e)),
3088 3100 node=node,
3089 3101 )
3090 3102 state[b'skipread'].add(node)
3091 3103
3092 3104 def storageinfo(
3093 3105 self,
3094 3106 exclusivefiles=False,
3095 3107 sharedfiles=False,
3096 3108 revisionscount=False,
3097 3109 trackedsize=False,
3098 3110 storedsize=False,
3099 3111 ):
3100 3112 d = {}
3101 3113
3102 3114 if exclusivefiles:
3103 3115 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3104 3116 if not self._inline:
3105 3117 d[b'exclusivefiles'].append((self.opener, self._datafile))
3106 3118
3107 3119 if sharedfiles:
3108 3120 d[b'sharedfiles'] = []
3109 3121
3110 3122 if revisionscount:
3111 3123 d[b'revisionscount'] = len(self)
3112 3124
3113 3125 if trackedsize:
3114 3126 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3115 3127
3116 3128 if storedsize:
3117 3129 d[b'storedsize'] = sum(
3118 3130 self.opener.stat(path).st_size for path in self.files()
3119 3131 )
3120 3132
3121 3133 return d
3122 3134
3123 3135 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3124 3136 if not self.hassidedata:
3125 3137 return
3126 3138 # inline are not yet supported because they suffer from an issue when
3127 3139 # rewriting them (since it's not an append-only operation).
3128 3140 # See issue6485.
3129 3141 assert not self._inline
3130 3142 if not helpers[1] and not helpers[2]:
3131 3143 # Nothing to generate or remove
3132 3144 return
3133 3145
3134 3146 # changelog implement some "delayed" writing mechanism that assume that
3135 3147 # all index data is writen in append mode and is therefor incompatible
3136 3148 # with the seeked write done in this method. The use of such "delayed"
3137 3149 # writing will soon be removed for revlog version that support side
3138 3150 # data, so for now, we only keep this simple assert to highlight the
3139 3151 # situation.
3140 3152 delayed = getattr(self, '_delayed', False)
3141 3153 diverted = getattr(self, '_divert', False)
3142 3154 if delayed and not diverted:
3143 3155 msg = "cannot rewrite_sidedata of a delayed revlog"
3144 3156 raise error.ProgrammingError(msg)
3145 3157
3146 3158 new_entries = []
3147 3159 # append the new sidedata
3148 3160 with self._datafp(b'a+') as dfh:
3149 3161 # Maybe this bug still exists, see revlog._writeentry
3150 3162 dfh.seek(0, os.SEEK_END)
3151 3163 current_offset = dfh.tell()
3152 3164 for rev in range(startrev, endrev + 1):
3153 3165 entry = self.index[rev]
3154 3166 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3155 3167 store=self,
3156 3168 sidedata_helpers=helpers,
3157 3169 sidedata={},
3158 3170 rev=rev,
3159 3171 )
3160 3172
3161 3173 serialized_sidedata = sidedatautil.serialize_sidedata(
3162 3174 new_sidedata
3163 3175 )
3164 3176 if entry[8] != 0 or entry[9] != 0:
3165 3177 # rewriting entries that already have sidedata is not
3166 3178 # supported yet, because it introduces garbage data in the
3167 3179 # revlog.
3168 3180 msg = b"Rewriting existing sidedata is not supported yet"
3169 3181 raise error.Abort(msg)
3170 3182
3171 3183 # Apply (potential) flags to add and to remove after running
3172 3184 # the sidedata helpers
3173 3185 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3174 3186 entry = (new_offset_flags,) + entry[1:8]
3175 3187 entry += (current_offset, len(serialized_sidedata))
3176 3188
3189 # the sidedata computation might have move the file cursors around
3190 dfh.seek(current_offset, os.SEEK_SET)
3177 3191 dfh.write(serialized_sidedata)
3178 3192 new_entries.append(entry)
3179 3193 current_offset += len(serialized_sidedata)
3180 3194
3181 3195 # rewrite the new index entries
3182 3196 with self._indexfp(b'r+') as ifh:
3183 3197 fp.seek(startrev * self.index.entry_size)
3184 3198 for i, e in enumerate(new_entries):
3185 3199 rev = startrev + i
3186 3200 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3187 3201 packed = self.index.entry_binary(rev)
3188 3202 if rev == 0:
3189 3203 header = self._format_flags | self._format_version
3190 3204 header = self.index.pack_header(header)
3191 3205 packed = header + packed
3192 3206 ifh.write(packed)
@@ -1,814 +1,814 b''
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import getattr
18 18 from .node import hex
19 19 from . import (
20 20 changelog,
21 21 error,
22 22 manifest,
23 23 policy,
24 24 pycompat,
25 25 util,
26 26 vfs as vfsmod,
27 27 )
28 28 from .utils import hashutil
29 29
30 30 parsers = policy.importmod('parsers')
31 31 # how much bytes should be read from fncache in one read
32 32 # It is done to prevent loading large fncache files into memory
33 33 fncache_chunksize = 10 ** 6
34 34
35 35
36 36 def _matchtrackedpath(path, matcher):
37 37 """parses a fncache entry and returns whether the entry is tracking a path
38 38 matched by matcher or not.
39 39
40 40 If matcher is None, returns True"""
41 41
42 42 if matcher is None:
43 43 return True
44 44 path = decodedir(path)
45 45 if path.startswith(b'data/'):
46 46 return matcher(path[len(b'data/') : -len(b'.i')])
47 47 elif path.startswith(b'meta/'):
48 48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
49 49
50 50 raise error.ProgrammingError(b"cannot decode path %s" % path)
51 51
52 52
53 53 # This avoids a collision between a file named foo and a dir named
54 54 # foo.i or foo.d
55 55 def _encodedir(path):
56 56 """
57 57 >>> _encodedir(b'data/foo.i')
58 58 'data/foo.i'
59 59 >>> _encodedir(b'data/foo.i/bla.i')
60 60 'data/foo.i.hg/bla.i'
61 61 >>> _encodedir(b'data/foo.i.hg/bla.i')
62 62 'data/foo.i.hg.hg/bla.i'
63 63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
64 64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
65 65 """
66 66 return (
67 67 path.replace(b".hg/", b".hg.hg/")
68 68 .replace(b".i/", b".i.hg/")
69 69 .replace(b".d/", b".d.hg/")
70 70 )
71 71
72 72
73 73 encodedir = getattr(parsers, 'encodedir', _encodedir)
74 74
75 75
76 76 def decodedir(path):
77 77 """
78 78 >>> decodedir(b'data/foo.i')
79 79 'data/foo.i'
80 80 >>> decodedir(b'data/foo.i.hg/bla.i')
81 81 'data/foo.i/bla.i'
82 82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
83 83 'data/foo.i.hg/bla.i'
84 84 """
85 85 if b".hg/" not in path:
86 86 return path
87 87 return (
88 88 path.replace(b".d.hg/", b".d/")
89 89 .replace(b".i.hg/", b".i/")
90 90 .replace(b".hg.hg/", b".hg/")
91 91 )
92 92
93 93
94 94 def _reserved():
95 95 """characters that are problematic for filesystems
96 96
97 97 * ascii escapes (0..31)
98 98 * ascii hi (126..255)
99 99 * windows specials
100 100
101 101 these characters will be escaped by encodefunctions
102 102 """
103 103 winreserved = [ord(x) for x in u'\\:*?"<>|']
104 104 for x in range(32):
105 105 yield x
106 106 for x in range(126, 256):
107 107 yield x
108 108 for x in winreserved:
109 109 yield x
110 110
111 111
112 112 def _buildencodefun():
113 113 """
114 114 >>> enc, dec = _buildencodefun()
115 115
116 116 >>> enc(b'nothing/special.txt')
117 117 'nothing/special.txt'
118 118 >>> dec(b'nothing/special.txt')
119 119 'nothing/special.txt'
120 120
121 121 >>> enc(b'HELLO')
122 122 '_h_e_l_l_o'
123 123 >>> dec(b'_h_e_l_l_o')
124 124 'HELLO'
125 125
126 126 >>> enc(b'hello:world?')
127 127 'hello~3aworld~3f'
128 128 >>> dec(b'hello~3aworld~3f')
129 129 'hello:world?'
130 130
131 131 >>> enc(b'the\\x07quick\\xADshot')
132 132 'the~07quick~adshot'
133 133 >>> dec(b'the~07quick~adshot')
134 134 'the\\x07quick\\xadshot'
135 135 """
136 136 e = b'_'
137 137 xchr = pycompat.bytechr
138 138 asciistr = list(map(xchr, range(127)))
139 139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
140 140
141 141 cmap = {x: x for x in asciistr}
142 142 for x in _reserved():
143 143 cmap[xchr(x)] = b"~%02x" % x
144 144 for x in capitals + [ord(e)]:
145 145 cmap[xchr(x)] = e + xchr(x).lower()
146 146
147 147 dmap = {}
148 148 for k, v in pycompat.iteritems(cmap):
149 149 dmap[v] = k
150 150
151 151 def decode(s):
152 152 i = 0
153 153 while i < len(s):
154 154 for l in pycompat.xrange(1, 4):
155 155 try:
156 156 yield dmap[s[i : i + l]]
157 157 i += l
158 158 break
159 159 except KeyError:
160 160 pass
161 161 else:
162 162 raise KeyError
163 163
164 164 return (
165 165 lambda s: b''.join(
166 166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
167 167 ),
168 168 lambda s: b''.join(list(decode(s))),
169 169 )
170 170
171 171
172 172 _encodefname, _decodefname = _buildencodefun()
173 173
174 174
175 175 def encodefilename(s):
176 176 """
177 177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
178 178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
179 179 """
180 180 return _encodefname(encodedir(s))
181 181
182 182
183 183 def decodefilename(s):
184 184 """
185 185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
186 186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
187 187 """
188 188 return decodedir(_decodefname(s))
189 189
190 190
191 191 def _buildlowerencodefun():
192 192 """
193 193 >>> f = _buildlowerencodefun()
194 194 >>> f(b'nothing/special.txt')
195 195 'nothing/special.txt'
196 196 >>> f(b'HELLO')
197 197 'hello'
198 198 >>> f(b'hello:world?')
199 199 'hello~3aworld~3f'
200 200 >>> f(b'the\\x07quick\\xADshot')
201 201 'the~07quick~adshot'
202 202 """
203 203 xchr = pycompat.bytechr
204 204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
205 205 for x in _reserved():
206 206 cmap[xchr(x)] = b"~%02x" % x
207 207 for x in range(ord(b"A"), ord(b"Z") + 1):
208 208 cmap[xchr(x)] = xchr(x).lower()
209 209
210 210 def lowerencode(s):
211 211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
212 212
213 213 return lowerencode
214 214
215 215
216 216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
217 217
218 218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
219 219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
220 220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
221 221
222 222
223 223 def _auxencode(path, dotencode):
224 224 """
225 225 Encodes filenames containing names reserved by Windows or which end in
226 226 period or space. Does not touch other single reserved characters c.
227 227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
228 228 Additionally encodes space or period at the beginning, if dotencode is
229 229 True. Parameter path is assumed to be all lowercase.
230 230 A segment only needs encoding if a reserved name appears as a
231 231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
232 232 doesn't need encoding.
233 233
234 234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
235 235 >>> _auxencode(s.split(b'/'), True)
236 236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
237 237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
238 238 >>> _auxencode(s.split(b'/'), False)
239 239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
240 240 >>> _auxencode([b'foo. '], True)
241 241 ['foo.~20']
242 242 >>> _auxencode([b' .foo'], True)
243 243 ['~20.foo']
244 244 """
245 245 for i, n in enumerate(path):
246 246 if not n:
247 247 continue
248 248 if dotencode and n[0] in b'. ':
249 249 n = b"~%02x" % ord(n[0:1]) + n[1:]
250 250 path[i] = n
251 251 else:
252 252 l = n.find(b'.')
253 253 if l == -1:
254 254 l = len(n)
255 255 if (l == 3 and n[:3] in _winres3) or (
256 256 l == 4
257 257 and n[3:4] <= b'9'
258 258 and n[3:4] >= b'1'
259 259 and n[:3] in _winres4
260 260 ):
261 261 # encode third letter ('aux' -> 'au~78')
262 262 ec = b"~%02x" % ord(n[2:3])
263 263 n = n[0:2] + ec + n[3:]
264 264 path[i] = n
265 265 if n[-1] in b'. ':
266 266 # encode last period or space ('foo...' -> 'foo..~2e')
267 267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
268 268 return path
269 269
270 270
271 271 _maxstorepathlen = 120
272 272 _dirprefixlen = 8
273 273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
274 274
275 275
276 276 def _hashencode(path, dotencode):
277 277 digest = hex(hashutil.sha1(path).digest())
278 278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
279 279 parts = _auxencode(le, dotencode)
280 280 basename = parts[-1]
281 281 _root, ext = os.path.splitext(basename)
282 282 sdirs = []
283 283 sdirslen = 0
284 284 for p in parts[:-1]:
285 285 d = p[:_dirprefixlen]
286 286 if d[-1] in b'. ':
287 287 # Windows can't access dirs ending in period or space
288 288 d = d[:-1] + b'_'
289 289 if sdirslen == 0:
290 290 t = len(d)
291 291 else:
292 292 t = sdirslen + 1 + len(d)
293 293 if t > _maxshortdirslen:
294 294 break
295 295 sdirs.append(d)
296 296 sdirslen = t
297 297 dirs = b'/'.join(sdirs)
298 298 if len(dirs) > 0:
299 299 dirs += b'/'
300 300 res = b'dh/' + dirs + digest + ext
301 301 spaceleft = _maxstorepathlen - len(res)
302 302 if spaceleft > 0:
303 303 filler = basename[:spaceleft]
304 304 res = b'dh/' + dirs + filler + digest + ext
305 305 return res
306 306
307 307
308 308 def _hybridencode(path, dotencode):
309 309 """encodes path with a length limit
310 310
311 311 Encodes all paths that begin with 'data/', according to the following.
312 312
313 313 Default encoding (reversible):
314 314
315 315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
316 316 characters are encoded as '~xx', where xx is the two digit hex code
317 317 of the character (see encodefilename).
318 318 Relevant path components consisting of Windows reserved filenames are
319 319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
320 320
321 321 Hashed encoding (not reversible):
322 322
323 323 If the default-encoded path is longer than _maxstorepathlen, a
324 324 non-reversible hybrid hashing of the path is done instead.
325 325 This encoding uses up to _dirprefixlen characters of all directory
326 326 levels of the lowerencoded path, but not more levels than can fit into
327 327 _maxshortdirslen.
328 328 Then follows the filler followed by the sha digest of the full path.
329 329 The filler is the beginning of the basename of the lowerencoded path
330 330 (the basename is everything after the last path separator). The filler
331 331 is as long as possible, filling in characters from the basename until
332 332 the encoded path has _maxstorepathlen characters (or all chars of the
333 333 basename have been taken).
334 334 The extension (e.g. '.i' or '.d') is preserved.
335 335
336 336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
337 337 encoding was used.
338 338 """
339 339 path = encodedir(path)
340 340 ef = _encodefname(path).split(b'/')
341 341 res = b'/'.join(_auxencode(ef, dotencode))
342 342 if len(res) > _maxstorepathlen:
343 343 res = _hashencode(path, dotencode)
344 344 return res
345 345
346 346
347 347 def _pathencode(path):
348 348 de = encodedir(path)
349 349 if len(path) > _maxstorepathlen:
350 350 return _hashencode(de, True)
351 351 ef = _encodefname(de).split(b'/')
352 352 res = b'/'.join(_auxencode(ef, True))
353 353 if len(res) > _maxstorepathlen:
354 354 return _hashencode(de, True)
355 355 return res
356 356
357 357
358 358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
359 359
360 360
361 361 def _plainhybridencode(f):
362 362 return _hybridencode(f, False)
363 363
364 364
365 365 def _calcmode(vfs):
366 366 try:
367 367 # files in .hg/ will be created using this mode
368 368 mode = vfs.stat().st_mode
369 369 # avoid some useless chmods
370 370 if (0o777 & ~util.umask) == (0o777 & mode):
371 371 mode = None
372 372 except OSError:
373 373 mode = None
374 374 return mode
375 375
376 376
377 377 _data = [
378 378 b'bookmarks',
379 379 b'narrowspec',
380 380 b'data',
381 381 b'meta',
382 382 b'00manifest.d',
383 383 b'00manifest.i',
384 384 b'00changelog.d',
385 385 b'00changelog.i',
386 386 b'phaseroots',
387 387 b'obsstore',
388 388 b'requires',
389 389 ]
390 390
391 391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
392 392 REVLOG_FILES_OTHER_EXT = (b'.d', b'.n', b'.nd', b'd.tmpcensored')
393 393 # files that are "volatile" and might change between listing and streaming
394 394 #
395 395 # note: the ".nd" file are nodemap data and won't "change" but they might be
396 396 # deleted.
397 397 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
398 398
399 399 # some exception to the above matching
400 400 EXCLUDED = re.compile(b'.*undo\.[^/]+\.nd?$')
401 401
402 402
403 403 def is_revlog(f, kind, st):
404 404 if kind != stat.S_IFREG:
405 405 return None
406 406 return revlog_type(f)
407 407
408 408
409 409 def revlog_type(f):
410 410 if f.endswith(REVLOG_FILES_MAIN_EXT):
411 411 return FILEFLAGS_REVLOG_MAIN
412 412 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
413 413 t = FILETYPE_FILELOG_OTHER
414 414 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
415 415 t |= FILEFLAGS_VOLATILE
416 416 return t
417 417
418 418
419 419 # the file is part of changelog data
420 420 FILEFLAGS_CHANGELOG = 1 << 13
421 421 # the file is part of manifest data
422 422 FILEFLAGS_MANIFESTLOG = 1 << 12
423 423 # the file is part of filelog data
424 424 FILEFLAGS_FILELOG = 1 << 11
425 425 # file that are not directly part of a revlog
426 426 FILEFLAGS_OTHER = 1 << 10
427 427
428 428 # the main entry point for a revlog
429 429 FILEFLAGS_REVLOG_MAIN = 1 << 1
430 430 # a secondary file for a revlog
431 431 FILEFLAGS_REVLOG_OTHER = 1 << 0
432 432
433 433 # files that are "volatile" and might change between listing and streaming
434 434 FILEFLAGS_VOLATILE = 1 << 20
435 435
436 436 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
437 437 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
438 438 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
439 439 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
440 440 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
441 441 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
442 442 FILETYPE_OTHER = FILEFLAGS_OTHER
443 443
444 444
445 445 class basicstore(object):
446 446 '''base class for local repository stores'''
447 447
448 448 def __init__(self, path, vfstype):
449 449 vfs = vfstype(path)
450 450 self.path = vfs.base
451 451 self.createmode = _calcmode(vfs)
452 452 vfs.createmode = self.createmode
453 453 self.rawvfs = vfs
454 454 self.vfs = vfsmod.filtervfs(vfs, encodedir)
455 455 self.opener = self.vfs
456 456
457 457 def join(self, f):
458 458 return self.path + b'/' + encodedir(f)
459 459
460 460 def _walk(self, relpath, recurse):
461 461 '''yields (unencoded, encoded, size)'''
462 462 path = self.path
463 463 if relpath:
464 464 path += b'/' + relpath
465 465 striplen = len(self.path) + 1
466 466 l = []
467 467 if self.rawvfs.isdir(path):
468 468 visit = [path]
469 469 readdir = self.rawvfs.readdir
470 470 while visit:
471 471 p = visit.pop()
472 472 for f, kind, st in readdir(p, stat=True):
473 473 fp = p + b'/' + f
474 474 rl_type = is_revlog(f, kind, st)
475 475 if rl_type is not None:
476 476 n = util.pconvert(fp[striplen:])
477 477 l.append((rl_type, decodedir(n), n, st.st_size))
478 478 elif kind == stat.S_IFDIR and recurse:
479 479 visit.append(fp)
480 480 l.sort()
481 481 return l
482 482
483 483 def changelog(self, trypending, concurrencychecker=None):
484 484 return changelog.changelog(
485 485 self.vfs,
486 486 trypending=trypending,
487 487 concurrencychecker=concurrencychecker,
488 488 )
489 489
490 490 def manifestlog(self, repo, storenarrowmatch):
491 491 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
492 492 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
493 493
494 494 def datafiles(self, matcher=None):
495 495 files = self._walk(b'data', True) + self._walk(b'meta', True)
496 496 for (t, u, e, s) in files:
497 497 yield (FILEFLAGS_FILELOG | t, u, e, s)
498 498
499 499 def topfiles(self):
500 500 # yield manifest before changelog
501 501 files = reversed(self._walk(b'', False))
502 502 for (t, u, e, s) in files:
503 503 if u.startswith(b'00changelog'):
504 504 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
505 505 elif u.startswith(b'00manifest'):
506 506 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
507 507 else:
508 508 yield (FILETYPE_OTHER | t, u, e, s)
509 509
510 510 def walk(self, matcher=None):
511 511 """return file related to data storage (ie: revlogs)
512 512
513 513 yields (file_type, unencoded, encoded, size)
514 514
515 515 if a matcher is passed, storage files of only those tracked paths
516 516 are passed with matches the matcher
517 517 """
518 518 # yield data files first
519 519 for x in self.datafiles(matcher):
520 520 yield x
521 521 for x in self.topfiles():
522 522 yield x
523 523
524 524 def copylist(self):
525 525 return _data
526 526
527 527 def write(self, tr):
528 528 pass
529 529
530 530 def invalidatecaches(self):
531 531 pass
532 532
533 533 def markremoved(self, fn):
534 534 pass
535 535
536 536 def __contains__(self, path):
537 537 '''Checks if the store contains path'''
538 538 path = b"/".join((b"data", path))
539 539 # file?
540 540 if self.vfs.exists(path + b".i"):
541 541 return True
542 542 # dir?
543 543 if not path.endswith(b"/"):
544 544 path = path + b"/"
545 545 return self.vfs.exists(path)
546 546
547 547
548 548 class encodedstore(basicstore):
549 549 def __init__(self, path, vfstype):
550 550 vfs = vfstype(path + b'/store')
551 551 self.path = vfs.base
552 552 self.createmode = _calcmode(vfs)
553 553 vfs.createmode = self.createmode
554 554 self.rawvfs = vfs
555 555 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
556 556 self.opener = self.vfs
557 557
558 558 def datafiles(self, matcher=None):
559 559 for t, a, b, size in super(encodedstore, self).datafiles():
560 560 try:
561 561 a = decodefilename(a)
562 562 except KeyError:
563 563 a = None
564 564 if a is not None and not _matchtrackedpath(a, matcher):
565 565 continue
566 566 yield t, a, b, size
567 567
568 568 def join(self, f):
569 569 return self.path + b'/' + encodefilename(f)
570 570
571 571 def copylist(self):
572 572 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
573 573
574 574
575 575 class fncache(object):
576 576 # the filename used to be partially encoded
577 577 # hence the encodedir/decodedir dance
578 578 def __init__(self, vfs):
579 579 self.vfs = vfs
580 580 self.entries = None
581 581 self._dirty = False
582 582 # set of new additions to fncache
583 583 self.addls = set()
584 584
585 585 def ensureloaded(self, warn=None):
586 586 """read the fncache file if not already read.
587 587
588 588 If the file on disk is corrupted, raise. If warn is provided,
589 589 warn and keep going instead."""
590 590 if self.entries is None:
591 591 self._load(warn)
592 592
593 593 def _load(self, warn=None):
594 594 '''fill the entries from the fncache file'''
595 595 self._dirty = False
596 596 try:
597 597 fp = self.vfs(b'fncache', mode=b'rb')
598 598 except IOError:
599 599 # skip nonexistent file
600 600 self.entries = set()
601 601 return
602 602
603 603 self.entries = set()
604 604 chunk = b''
605 605 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
606 606 chunk += c
607 607 try:
608 608 p = chunk.rindex(b'\n')
609 609 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
610 610 chunk = chunk[p + 1 :]
611 611 except ValueError:
612 612 # substring '\n' not found, maybe the entry is bigger than the
613 613 # chunksize, so let's keep iterating
614 614 pass
615 615
616 616 if chunk:
617 617 msg = _(b"fncache does not ends with a newline")
618 618 if warn:
619 619 warn(msg + b'\n')
620 620 else:
621 621 raise error.Abort(
622 622 msg,
623 623 hint=_(
624 624 b"use 'hg debugrebuildfncache' to "
625 625 b"rebuild the fncache"
626 626 ),
627 627 )
628 628 self._checkentries(fp, warn)
629 629 fp.close()
630 630
631 631 def _checkentries(self, fp, warn):
632 632 """make sure there is no empty string in entries"""
633 633 if b'' in self.entries:
634 634 fp.seek(0)
635 635 for n, line in enumerate(util.iterfile(fp)):
636 636 if not line.rstrip(b'\n'):
637 637 t = _(b'invalid entry in fncache, line %d') % (n + 1)
638 638 if warn:
639 639 warn(t + b'\n')
640 640 else:
641 641 raise error.Abort(t)
642 642
643 643 def write(self, tr):
644 644 if self._dirty:
645 645 assert self.entries is not None
646 646 self.entries = self.entries | self.addls
647 647 self.addls = set()
648 648 tr.addbackup(b'fncache')
649 649 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
650 650 if self.entries:
651 651 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
652 652 fp.close()
653 653 self._dirty = False
654 654 if self.addls:
655 655 # if we have just new entries, let's append them to the fncache
656 656 tr.addbackup(b'fncache')
657 657 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
658 658 if self.addls:
659 659 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
660 660 fp.close()
661 661 self.entries = None
662 662 self.addls = set()
663 663
664 664 def add(self, fn):
665 665 if self.entries is None:
666 666 self._load()
667 667 if fn not in self.entries:
668 668 self.addls.add(fn)
669 669
670 670 def remove(self, fn):
671 671 if self.entries is None:
672 672 self._load()
673 673 if fn in self.addls:
674 674 self.addls.remove(fn)
675 675 return
676 676 try:
677 677 self.entries.remove(fn)
678 678 self._dirty = True
679 679 except KeyError:
680 680 pass
681 681
682 682 def __contains__(self, fn):
683 683 if fn in self.addls:
684 684 return True
685 685 if self.entries is None:
686 686 self._load()
687 687 return fn in self.entries
688 688
689 689 def __iter__(self):
690 690 if self.entries is None:
691 691 self._load()
692 692 return iter(self.entries | self.addls)
693 693
694 694
695 695 class _fncachevfs(vfsmod.proxyvfs):
696 696 def __init__(self, vfs, fnc, encode):
697 697 vfsmod.proxyvfs.__init__(self, vfs)
698 698 self.fncache = fnc
699 699 self.encode = encode
700 700
701 701 def __call__(self, path, mode=b'r', *args, **kw):
702 702 encoded = self.encode(path)
703 703 if mode not in (b'r', b'rb') and (
704 704 path.startswith(b'data/') or path.startswith(b'meta/')
705 705 ):
706 706 # do not trigger a fncache load when adding a file that already is
707 707 # known to exist.
708 708 notload = self.fncache.entries is None and self.vfs.exists(encoded)
709 if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
709 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
710 710 # when appending to an existing file, if the file has size zero,
711 711 # it should be considered as missing. Such zero-size files are
712 712 # the result of truncation when a transaction is aborted.
713 713 notload = False
714 714 if not notload:
715 715 self.fncache.add(path)
716 716 return self.vfs(encoded, mode, *args, **kw)
717 717
718 718 def join(self, path):
719 719 if path:
720 720 return self.vfs.join(self.encode(path))
721 721 else:
722 722 return self.vfs.join(path)
723 723
724 724
725 725 class fncachestore(basicstore):
726 726 def __init__(self, path, vfstype, dotencode):
727 727 if dotencode:
728 728 encode = _pathencode
729 729 else:
730 730 encode = _plainhybridencode
731 731 self.encode = encode
732 732 vfs = vfstype(path + b'/store')
733 733 self.path = vfs.base
734 734 self.pathsep = self.path + b'/'
735 735 self.createmode = _calcmode(vfs)
736 736 vfs.createmode = self.createmode
737 737 self.rawvfs = vfs
738 738 fnc = fncache(vfs)
739 739 self.fncache = fnc
740 740 self.vfs = _fncachevfs(vfs, fnc, encode)
741 741 self.opener = self.vfs
742 742
743 743 def join(self, f):
744 744 return self.pathsep + self.encode(f)
745 745
746 746 def getsize(self, path):
747 747 return self.rawvfs.stat(path).st_size
748 748
749 749 def datafiles(self, matcher=None):
750 750 for f in sorted(self.fncache):
751 751 if not _matchtrackedpath(f, matcher):
752 752 continue
753 753 ef = self.encode(f)
754 754 try:
755 755 t = revlog_type(f)
756 756 t |= FILEFLAGS_FILELOG
757 757 yield t, f, ef, self.getsize(ef)
758 758 except OSError as err:
759 759 if err.errno != errno.ENOENT:
760 760 raise
761 761
762 762 def copylist(self):
763 763 d = (
764 764 b'bookmarks',
765 765 b'narrowspec',
766 766 b'data',
767 767 b'meta',
768 768 b'dh',
769 769 b'fncache',
770 770 b'phaseroots',
771 771 b'obsstore',
772 772 b'00manifest.d',
773 773 b'00manifest.i',
774 774 b'00changelog.d',
775 775 b'00changelog.i',
776 776 b'requires',
777 777 )
778 778 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
779 779
780 780 def write(self, tr):
781 781 self.fncache.write(tr)
782 782
783 783 def invalidatecaches(self):
784 784 self.fncache.entries = None
785 785 self.fncache.addls = set()
786 786
787 787 def markremoved(self, fn):
788 788 self.fncache.remove(fn)
789 789
790 790 def _exists(self, f):
791 791 ef = self.encode(f)
792 792 try:
793 793 self.getsize(ef)
794 794 return True
795 795 except OSError as err:
796 796 if err.errno != errno.ENOENT:
797 797 raise
798 798 # nonexistent entry
799 799 return False
800 800
801 801 def __contains__(self, path):
802 802 '''Checks if the store contains path'''
803 803 path = b"/".join((b"data", path))
804 804 # check for files (exact match)
805 805 e = path + b'.i'
806 806 if e in self.fncache and self._exists(e):
807 807 return True
808 808 # now check for directories (prefix match)
809 809 if not path.endswith(b'/'):
810 810 path += b'/'
811 811 for e in self.fncache:
812 812 if e.startswith(path) and self._exists(e):
813 813 return True
814 814 return False
General Comments 0
You need to be logged in to leave comments. Login now