##// END OF EJS Templates
revlog: drop some more error aliases (API)...
Gregory Szorc -
r39810:4a2466b2 default
parent child Browse files
Show More
@@ -1,2498 +1,2496 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import hashlib
20 20 import os
21 21 import re
22 22 import struct
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullhex,
30 30 nullid,
31 31 nullrev,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .revlogutils.constants import (
39 39 FLAG_GENERALDELTA,
40 40 FLAG_INLINE_DATA,
41 41 REVIDX_DEFAULT_FLAGS,
42 42 REVIDX_ELLIPSIS,
43 43 REVIDX_EXTSTORED,
44 44 REVIDX_FLAGS_ORDER,
45 45 REVIDX_ISCENSORED,
46 46 REVIDX_KNOWN_FLAGS,
47 47 REVIDX_RAWTEXT_CHANGING_FLAGS,
48 48 REVLOGV0,
49 49 REVLOGV1,
50 50 REVLOGV1_FLAGS,
51 51 REVLOGV2,
52 52 REVLOGV2_FLAGS,
53 53 REVLOG_DEFAULT_FLAGS,
54 54 REVLOG_DEFAULT_FORMAT,
55 55 REVLOG_DEFAULT_VERSION,
56 56 )
57 57 from .thirdparty import (
58 58 attr,
59 59 )
60 60 from . import (
61 61 ancestor,
62 62 error,
63 63 mdiff,
64 64 policy,
65 65 pycompat,
66 66 repository,
67 67 templatefilters,
68 68 util,
69 69 )
70 70 from .revlogutils import (
71 71 deltas as deltautil,
72 72 )
73 73 from .utils import (
74 74 interfaceutil,
75 75 stringutil,
76 76 )
77 77
78 78 # blanked usage of all the name to prevent pyflakes constraints
79 79 # We need these name available in the module for extensions.
80 80 REVLOGV0
81 81 REVLOGV1
82 82 REVLOGV2
83 83 FLAG_INLINE_DATA
84 84 FLAG_GENERALDELTA
85 85 REVLOG_DEFAULT_FLAGS
86 86 REVLOG_DEFAULT_FORMAT
87 87 REVLOG_DEFAULT_VERSION
88 88 REVLOGV1_FLAGS
89 89 REVLOGV2_FLAGS
90 90 REVIDX_ISCENSORED
91 91 REVIDX_ELLIPSIS
92 92 REVIDX_EXTSTORED
93 93 REVIDX_DEFAULT_FLAGS
94 94 REVIDX_FLAGS_ORDER
95 95 REVIDX_KNOWN_FLAGS
96 96 REVIDX_RAWTEXT_CHANGING_FLAGS
97 97
98 98 parsers = policy.importmod(r'parsers')
99 99
100 100 # Aliased for performance.
101 101 _zlibdecompress = zlib.decompress
102 102
103 103 # max size of revlog with inline data
104 104 _maxinline = 131072
105 105 _chunksize = 1048576
106 106
107 107 LookupError = error.LookupError
108 AmbiguousPrefixLookupError = error.AmbiguousPrefixLookupError
109 CensoredNodeError = error.CensoredNodeError
110 ProgrammingError = error.ProgrammingError
111 108
112 109 # Store flag processors (cf. 'addflagprocessor()' to register)
113 110 _flagprocessors = {
114 111 REVIDX_ISCENSORED: None,
115 112 }
116 113
117 114 # Flag processors for REVIDX_ELLIPSIS.
118 115 def ellipsisreadprocessor(rl, text):
119 116 return text, False
120 117
121 118 def ellipsiswriteprocessor(rl, text):
122 119 return text, False
123 120
124 121 def ellipsisrawprocessor(rl, text):
125 122 return False
126 123
127 124 ellipsisprocessor = (
128 125 ellipsisreadprocessor,
129 126 ellipsiswriteprocessor,
130 127 ellipsisrawprocessor,
131 128 )
132 129
133 130 _mdre = re.compile('\1\n')
134 131 def parsemeta(text):
135 132 """return (metadatadict, metadatasize)"""
136 133 # text can be buffer, so we can't use .startswith or .index
137 134 if text[:2] != '\1\n':
138 135 return None, None
139 136 s = _mdre.search(text, 2).start()
140 137 mtext = text[2:s]
141 138 meta = {}
142 139 for l in mtext.splitlines():
143 140 k, v = l.split(": ", 1)
144 141 meta[k] = v
145 142 return meta, (s + 2)
146 143
147 144 def packmeta(meta, text):
148 145 keys = sorted(meta)
149 146 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
150 147 return "\1\n%s\1\n%s" % (metatext, text)
151 148
152 149 def _censoredtext(text):
153 150 m, offs = parsemeta(text)
154 151 return m and "censored" in m
155 152
156 153 def addflagprocessor(flag, processor):
157 154 """Register a flag processor on a revision data flag.
158 155
159 156 Invariant:
160 157 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
161 158 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
162 159 - Only one flag processor can be registered on a specific flag.
163 160 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
164 161 following signatures:
165 162 - (read) f(self, rawtext) -> text, bool
166 163 - (write) f(self, text) -> rawtext, bool
167 164 - (raw) f(self, rawtext) -> bool
168 165 "text" is presented to the user. "rawtext" is stored in revlog data, not
169 166 directly visible to the user.
170 167 The boolean returned by these transforms is used to determine whether
171 168 the returned text can be used for hash integrity checking. For example,
172 169 if "write" returns False, then "text" is used to generate hash. If
173 170 "write" returns True, that basically means "rawtext" returned by "write"
174 171 should be used to generate hash. Usually, "write" and "read" return
175 172 different booleans. And "raw" returns a same boolean as "write".
176 173
177 174 Note: The 'raw' transform is used for changegroup generation and in some
178 175 debug commands. In this case the transform only indicates whether the
179 176 contents can be used for hash integrity checks.
180 177 """
181 178 if not flag & REVIDX_KNOWN_FLAGS:
182 179 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
183 raise ProgrammingError(msg)
180 raise error.ProgrammingError(msg)
184 181 if flag not in REVIDX_FLAGS_ORDER:
185 182 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
186 raise ProgrammingError(msg)
183 raise error.ProgrammingError(msg)
187 184 if flag in _flagprocessors:
188 185 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
189 186 raise error.Abort(msg)
190 187 _flagprocessors[flag] = processor
191 188
192 189 def getoffset(q):
193 190 return int(q >> 16)
194 191
195 192 def gettype(q):
196 193 return int(q & 0xFFFF)
197 194
198 195 def offset_type(offset, type):
199 196 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
200 197 raise ValueError('unknown revlog index flags')
201 198 return int(int(offset) << 16 | type)
202 199
203 200 _nullhash = hashlib.sha1(nullid)
204 201
205 202 def hash(text, p1, p2):
206 203 """generate a hash from the given text and its parent hashes
207 204
208 205 This hash combines both the current file contents and its history
209 206 in a manner that makes it easy to distinguish nodes with the same
210 207 content in the revision graph.
211 208 """
212 209 # As of now, if one of the parent node is null, p2 is null
213 210 if p2 == nullid:
214 211 # deep copy of a hash is faster than creating one
215 212 s = _nullhash.copy()
216 213 s.update(p1)
217 214 else:
218 215 # none of the parent nodes are nullid
219 216 if p1 < p2:
220 217 a = p1
221 218 b = p2
222 219 else:
223 220 a = p2
224 221 b = p1
225 222 s = hashlib.sha1(a)
226 223 s.update(b)
227 224 s.update(text)
228 225 return s.digest()
229 226
230 227 @attr.s(slots=True, frozen=True)
231 228 class _revisioninfo(object):
232 229 """Information about a revision that allows building its fulltext
233 230 node: expected hash of the revision
234 231 p1, p2: parent revs of the revision
235 232 btext: built text cache consisting of a one-element list
236 233 cachedelta: (baserev, uncompressed_delta) or None
237 234 flags: flags associated to the revision storage
238 235
239 236 One of btext[0] or cachedelta must be set.
240 237 """
241 238 node = attr.ib()
242 239 p1 = attr.ib()
243 240 p2 = attr.ib()
244 241 btext = attr.ib()
245 242 textlen = attr.ib()
246 243 cachedelta = attr.ib()
247 244 flags = attr.ib()
248 245
249 246 @interfaceutil.implementer(repository.irevisiondelta)
250 247 @attr.s(slots=True, frozen=True)
251 248 class revlogrevisiondelta(object):
252 249 node = attr.ib()
253 250 p1node = attr.ib()
254 251 p2node = attr.ib()
255 252 basenode = attr.ib()
256 253 linknode = attr.ib()
257 254 flags = attr.ib()
258 255 baserevisionsize = attr.ib()
259 256 revision = attr.ib()
260 257 delta = attr.ib()
261 258
262 259 # index v0:
263 260 # 4 bytes: offset
264 261 # 4 bytes: compressed length
265 262 # 4 bytes: base rev
266 263 # 4 bytes: link rev
267 264 # 20 bytes: parent 1 nodeid
268 265 # 20 bytes: parent 2 nodeid
269 266 # 20 bytes: nodeid
270 267 indexformatv0 = struct.Struct(">4l20s20s20s")
271 268 indexformatv0_pack = indexformatv0.pack
272 269 indexformatv0_unpack = indexformatv0.unpack
273 270
274 271 class revlogoldindex(list):
275 272 def __getitem__(self, i):
276 273 if i == -1:
277 274 return (0, 0, 0, -1, -1, -1, -1, nullid)
278 275 return list.__getitem__(self, i)
279 276
280 277 class revlogoldio(object):
281 278 def __init__(self):
282 279 self.size = indexformatv0.size
283 280
284 281 def parseindex(self, data, inline):
285 282 s = self.size
286 283 index = []
287 284 nodemap = {nullid: nullrev}
288 285 n = off = 0
289 286 l = len(data)
290 287 while off + s <= l:
291 288 cur = data[off:off + s]
292 289 off += s
293 290 e = indexformatv0_unpack(cur)
294 291 # transform to revlogv1 format
295 292 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
296 293 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
297 294 index.append(e2)
298 295 nodemap[e[6]] = n
299 296 n += 1
300 297
301 298 return revlogoldindex(index), nodemap, None
302 299
303 300 def packentry(self, entry, node, version, rev):
304 301 if gettype(entry[0]):
305 302 raise error.RevlogError(_('index entry flags need revlog '
306 303 'version 1'))
307 304 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
308 305 node(entry[5]), node(entry[6]), entry[7])
309 306 return indexformatv0_pack(*e2)
310 307
311 308 # index ng:
312 309 # 6 bytes: offset
313 310 # 2 bytes: flags
314 311 # 4 bytes: compressed length
315 312 # 4 bytes: uncompressed length
316 313 # 4 bytes: base rev
317 314 # 4 bytes: link rev
318 315 # 4 bytes: parent 1 rev
319 316 # 4 bytes: parent 2 rev
320 317 # 32 bytes: nodeid
321 318 indexformatng = struct.Struct(">Qiiiiii20s12x")
322 319 indexformatng_pack = indexformatng.pack
323 320 versionformat = struct.Struct(">I")
324 321 versionformat_pack = versionformat.pack
325 322 versionformat_unpack = versionformat.unpack
326 323
327 324 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
328 325 # signed integer)
329 326 _maxentrysize = 0x7fffffff
330 327
331 328 class revlogio(object):
332 329 def __init__(self):
333 330 self.size = indexformatng.size
334 331
335 332 def parseindex(self, data, inline):
336 333 # call the C implementation to parse the index data
337 334 index, cache = parsers.parse_index2(data, inline)
338 335 return index, getattr(index, 'nodemap', None), cache
339 336
340 337 def packentry(self, entry, node, version, rev):
341 338 p = indexformatng_pack(*entry)
342 339 if rev == 0:
343 340 p = versionformat_pack(version) + p[4:]
344 341 return p
345 342
346 343 class revlog(object):
347 344 """
348 345 the underlying revision storage object
349 346
350 347 A revlog consists of two parts, an index and the revision data.
351 348
352 349 The index is a file with a fixed record size containing
353 350 information on each revision, including its nodeid (hash), the
354 351 nodeids of its parents, the position and offset of its data within
355 352 the data file, and the revision it's based on. Finally, each entry
356 353 contains a linkrev entry that can serve as a pointer to external
357 354 data.
358 355
359 356 The revision data itself is a linear collection of data chunks.
360 357 Each chunk represents a revision and is usually represented as a
361 358 delta against the previous chunk. To bound lookup time, runs of
362 359 deltas are limited to about 2 times the length of the original
363 360 version data. This makes retrieval of a version proportional to
364 361 its size, or O(1) relative to the number of revisions.
365 362
366 363 Both pieces of the revlog are written to in an append-only
367 364 fashion, which means we never need to rewrite a file to insert or
368 365 remove data, and can use some simple techniques to avoid the need
369 366 for locking while reading.
370 367
371 368 If checkambig, indexfile is opened with checkambig=True at
372 369 writing, to avoid file stat ambiguity.
373 370
374 371 If mmaplargeindex is True, and an mmapindexthreshold is set, the
375 372 index will be mmapped rather than read if it is larger than the
376 373 configured threshold.
377 374
378 375 If censorable is True, the revlog can have censored revisions.
379 376 """
380 377 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
381 378 mmaplargeindex=False, censorable=False):
382 379 """
383 380 create a revlog object
384 381
385 382 opener is a function that abstracts the file opening operation
386 383 and can be used to implement COW semantics or the like.
387 384 """
388 385 self.indexfile = indexfile
389 386 self.datafile = datafile or (indexfile[:-2] + ".d")
390 387 self.opener = opener
391 388 # When True, indexfile is opened with checkambig=True at writing, to
392 389 # avoid file stat ambiguity.
393 390 self._checkambig = checkambig
394 391 self._censorable = censorable
395 392 # 3-tuple of (node, rev, text) for a raw revision.
396 393 self._cache = None
397 394 # Maps rev to chain base rev.
398 395 self._chainbasecache = util.lrucachedict(100)
399 396 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
400 397 self._chunkcache = (0, '')
401 398 # How much data to read and cache into the raw revlog data cache.
402 399 self._chunkcachesize = 65536
403 400 self._maxchainlen = None
404 401 self._deltabothparents = True
405 402 self.index = []
406 403 # Mapping of partial identifiers to full nodes.
407 404 self._pcache = {}
408 405 # Mapping of revision integer to full node.
409 406 self._nodecache = {nullid: nullrev}
410 407 self._nodepos = None
411 408 self._compengine = 'zlib'
412 409 self._maxdeltachainspan = -1
413 410 self._withsparseread = False
414 411 self._sparserevlog = False
415 412 self._srdensitythreshold = 0.50
416 413 self._srmingapsize = 262144
417 414
418 415 # Make copy of flag processors so each revlog instance can support
419 416 # custom flags.
420 417 self._flagprocessors = dict(_flagprocessors)
421 418
422 419 mmapindexthreshold = None
423 420 v = REVLOG_DEFAULT_VERSION
424 421 opts = getattr(opener, 'options', None)
425 422 if opts is not None:
426 423 if 'revlogv2' in opts:
427 424 # version 2 revlogs always use generaldelta.
428 425 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
429 426 elif 'revlogv1' in opts:
430 427 if 'generaldelta' in opts:
431 428 v |= FLAG_GENERALDELTA
432 429 else:
433 430 v = 0
434 431 if 'chunkcachesize' in opts:
435 432 self._chunkcachesize = opts['chunkcachesize']
436 433 if 'maxchainlen' in opts:
437 434 self._maxchainlen = opts['maxchainlen']
438 435 if 'deltabothparents' in opts:
439 436 self._deltabothparents = opts['deltabothparents']
440 437 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
441 438 if 'compengine' in opts:
442 439 self._compengine = opts['compengine']
443 440 if 'maxdeltachainspan' in opts:
444 441 self._maxdeltachainspan = opts['maxdeltachainspan']
445 442 if mmaplargeindex and 'mmapindexthreshold' in opts:
446 443 mmapindexthreshold = opts['mmapindexthreshold']
447 444 self._sparserevlog = bool(opts.get('sparse-revlog', False))
448 445 withsparseread = bool(opts.get('with-sparse-read', False))
449 446 # sparse-revlog forces sparse-read
450 447 self._withsparseread = self._sparserevlog or withsparseread
451 448 if 'sparse-read-density-threshold' in opts:
452 449 self._srdensitythreshold = opts['sparse-read-density-threshold']
453 450 if 'sparse-read-min-gap-size' in opts:
454 451 self._srmingapsize = opts['sparse-read-min-gap-size']
455 452 if opts.get('enableellipsis'):
456 453 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
457 454
458 455 if self._chunkcachesize <= 0:
459 456 raise error.RevlogError(_('revlog chunk cache size %r is not '
460 457 'greater than 0') % self._chunkcachesize)
461 458 elif self._chunkcachesize & (self._chunkcachesize - 1):
462 459 raise error.RevlogError(_('revlog chunk cache size %r is not a '
463 460 'power of 2') % self._chunkcachesize)
464 461
465 462 indexdata = ''
466 463 self._initempty = True
467 464 try:
468 465 with self._indexfp() as f:
469 466 if (mmapindexthreshold is not None and
470 467 self.opener.fstat(f).st_size >= mmapindexthreshold):
471 468 indexdata = util.buffer(util.mmapread(f))
472 469 else:
473 470 indexdata = f.read()
474 471 if len(indexdata) > 0:
475 472 v = versionformat_unpack(indexdata[:4])[0]
476 473 self._initempty = False
477 474 except IOError as inst:
478 475 if inst.errno != errno.ENOENT:
479 476 raise
480 477
481 478 self.version = v
482 479 self._inline = v & FLAG_INLINE_DATA
483 480 self._generaldelta = v & FLAG_GENERALDELTA
484 481 flags = v & ~0xFFFF
485 482 fmt = v & 0xFFFF
486 483 if fmt == REVLOGV0:
487 484 if flags:
488 485 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
489 486 'revlog %s') %
490 487 (flags >> 16, fmt, self.indexfile))
491 488 elif fmt == REVLOGV1:
492 489 if flags & ~REVLOGV1_FLAGS:
493 490 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
494 491 'revlog %s') %
495 492 (flags >> 16, fmt, self.indexfile))
496 493 elif fmt == REVLOGV2:
497 494 if flags & ~REVLOGV2_FLAGS:
498 495 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
499 496 'revlog %s') %
500 497 (flags >> 16, fmt, self.indexfile))
501 498 else:
502 499 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
503 500 (fmt, self.indexfile))
504 501
505 502 self._storedeltachains = True
506 503
507 504 self._io = revlogio()
508 505 if self.version == REVLOGV0:
509 506 self._io = revlogoldio()
510 507 try:
511 508 d = self._io.parseindex(indexdata, self._inline)
512 509 except (ValueError, IndexError):
513 510 raise error.RevlogError(_("index %s is corrupted") %
514 511 self.indexfile)
515 512 self.index, nodemap, self._chunkcache = d
516 513 if nodemap is not None:
517 514 self.nodemap = self._nodecache = nodemap
518 515 if not self._chunkcache:
519 516 self._chunkclear()
520 517 # revnum -> (chain-length, sum-delta-length)
521 518 self._chaininfocache = {}
522 519 # revlog header -> revlog compressor
523 520 self._decompressors = {}
524 521
525 522 @util.propertycache
526 523 def _compressor(self):
527 524 return util.compengines[self._compengine].revlogcompressor()
528 525
529 526 def _indexfp(self, mode='r'):
530 527 """file object for the revlog's index file"""
531 528 args = {r'mode': mode}
532 529 if mode != 'r':
533 530 args[r'checkambig'] = self._checkambig
534 531 if mode == 'w':
535 532 args[r'atomictemp'] = True
536 533 return self.opener(self.indexfile, **args)
537 534
538 535 def _datafp(self, mode='r'):
539 536 """file object for the revlog's data file"""
540 537 return self.opener(self.datafile, mode=mode)
541 538
542 539 @contextlib.contextmanager
543 540 def _datareadfp(self, existingfp=None):
544 541 """file object suitable to read data"""
545 542 if existingfp is not None:
546 543 yield existingfp
547 544 else:
548 545 if self._inline:
549 546 func = self._indexfp
550 547 else:
551 548 func = self._datafp
552 549 with func() as fp:
553 550 yield fp
554 551
555 552 def tip(self):
556 553 return self.node(len(self.index) - 1)
557 554 def __contains__(self, rev):
558 555 return 0 <= rev < len(self)
559 556 def __len__(self):
560 557 return len(self.index)
561 558 def __iter__(self):
562 559 return iter(pycompat.xrange(len(self)))
563 560 def revs(self, start=0, stop=None):
564 561 """iterate over all rev in this revlog (from start to stop)"""
565 562 step = 1
566 563 length = len(self)
567 564 if stop is not None:
568 565 if start > stop:
569 566 step = -1
570 567 stop += step
571 568 if stop > length:
572 569 stop = length
573 570 else:
574 571 stop = length
575 572 return pycompat.xrange(start, stop, step)
576 573
577 574 @util.propertycache
578 575 def nodemap(self):
579 576 if self.index:
580 577 # populate mapping down to the initial node
581 578 node0 = self.index[0][7] # get around changelog filtering
582 579 self.rev(node0)
583 580 return self._nodecache
584 581
585 582 def hasnode(self, node):
586 583 try:
587 584 self.rev(node)
588 585 return True
589 586 except KeyError:
590 587 return False
591 588
592 589 def candelta(self, baserev, rev):
593 590 """whether two revisions (baserev, rev) can be delta-ed or not"""
594 591 # Disable delta if either rev requires a content-changing flag
595 592 # processor (ex. LFS). This is because such flag processor can alter
596 593 # the rawtext content that the delta will be based on, and two clients
597 594 # could have a same revlog node with different flags (i.e. different
598 595 # rawtext contents) and the delta could be incompatible.
599 596 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
600 597 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
601 598 return False
602 599 return True
603 600
604 601 def clearcaches(self):
605 602 self._cache = None
606 603 self._chainbasecache.clear()
607 604 self._chunkcache = (0, '')
608 605 self._pcache = {}
609 606
610 607 try:
611 608 self._nodecache.clearcaches()
612 609 except AttributeError:
613 610 self._nodecache = {nullid: nullrev}
614 611 self._nodepos = None
615 612
616 613 def rev(self, node):
617 614 try:
618 615 return self._nodecache[node]
619 616 except TypeError:
620 617 raise
621 618 except error.RevlogError:
622 619 # parsers.c radix tree lookup failed
623 620 if node == wdirid or node in wdirfilenodeids:
624 621 raise error.WdirUnsupported
625 622 raise LookupError(node, self.indexfile, _('no node'))
626 623 except KeyError:
627 624 # pure python cache lookup failed
628 625 n = self._nodecache
629 626 i = self.index
630 627 p = self._nodepos
631 628 if p is None:
632 629 p = len(i) - 1
633 630 else:
634 631 assert p < len(i)
635 632 for r in pycompat.xrange(p, -1, -1):
636 633 v = i[r][7]
637 634 n[v] = r
638 635 if v == node:
639 636 self._nodepos = r - 1
640 637 return r
641 638 if node == wdirid or node in wdirfilenodeids:
642 639 raise error.WdirUnsupported
643 640 raise LookupError(node, self.indexfile, _('no node'))
644 641
645 642 # Accessors for index entries.
646 643
647 644 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
648 645 # are flags.
649 646 def start(self, rev):
650 647 return int(self.index[rev][0] >> 16)
651 648
652 649 def flags(self, rev):
653 650 return self.index[rev][0] & 0xFFFF
654 651
655 652 def length(self, rev):
656 653 return self.index[rev][1]
657 654
658 655 def rawsize(self, rev):
659 656 """return the length of the uncompressed text for a given revision"""
660 657 l = self.index[rev][2]
661 658 if l >= 0:
662 659 return l
663 660
664 661 t = self.revision(rev, raw=True)
665 662 return len(t)
666 663
667 664 def size(self, rev):
668 665 """length of non-raw text (processed by a "read" flag processor)"""
669 666 # fast path: if no "read" flag processor could change the content,
670 667 # size is rawsize. note: ELLIPSIS is known to not change the content.
671 668 flags = self.flags(rev)
672 669 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
673 670 return self.rawsize(rev)
674 671
675 672 return len(self.revision(rev, raw=False))
676 673
677 674 def chainbase(self, rev):
678 675 base = self._chainbasecache.get(rev)
679 676 if base is not None:
680 677 return base
681 678
682 679 index = self.index
683 680 iterrev = rev
684 681 base = index[iterrev][3]
685 682 while base != iterrev:
686 683 iterrev = base
687 684 base = index[iterrev][3]
688 685
689 686 self._chainbasecache[rev] = base
690 687 return base
691 688
692 689 def linkrev(self, rev):
693 690 return self.index[rev][4]
694 691
695 692 def parentrevs(self, rev):
696 693 try:
697 694 entry = self.index[rev]
698 695 except IndexError:
699 696 if rev == wdirrev:
700 697 raise error.WdirUnsupported
701 698 raise
702 699
703 700 return entry[5], entry[6]
704 701
705 702 def node(self, rev):
706 703 try:
707 704 return self.index[rev][7]
708 705 except IndexError:
709 706 if rev == wdirrev:
710 707 raise error.WdirUnsupported
711 708 raise
712 709
713 710 # Derived from index values.
714 711
715 712 def end(self, rev):
716 713 return self.start(rev) + self.length(rev)
717 714
718 715 def parents(self, node):
719 716 i = self.index
720 717 d = i[self.rev(node)]
721 718 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
722 719
723 720 def chainlen(self, rev):
724 721 return self._chaininfo(rev)[0]
725 722
726 723 def _chaininfo(self, rev):
727 724 chaininfocache = self._chaininfocache
728 725 if rev in chaininfocache:
729 726 return chaininfocache[rev]
730 727 index = self.index
731 728 generaldelta = self._generaldelta
732 729 iterrev = rev
733 730 e = index[iterrev]
734 731 clen = 0
735 732 compresseddeltalen = 0
736 733 while iterrev != e[3]:
737 734 clen += 1
738 735 compresseddeltalen += e[1]
739 736 if generaldelta:
740 737 iterrev = e[3]
741 738 else:
742 739 iterrev -= 1
743 740 if iterrev in chaininfocache:
744 741 t = chaininfocache[iterrev]
745 742 clen += t[0]
746 743 compresseddeltalen += t[1]
747 744 break
748 745 e = index[iterrev]
749 746 else:
750 747 # Add text length of base since decompressing that also takes
751 748 # work. For cache hits the length is already included.
752 749 compresseddeltalen += e[1]
753 750 r = (clen, compresseddeltalen)
754 751 chaininfocache[rev] = r
755 752 return r
756 753
757 754 def _deltachain(self, rev, stoprev=None):
758 755 """Obtain the delta chain for a revision.
759 756
760 757 ``stoprev`` specifies a revision to stop at. If not specified, we
761 758 stop at the base of the chain.
762 759
763 760 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
764 761 revs in ascending order and ``stopped`` is a bool indicating whether
765 762 ``stoprev`` was hit.
766 763 """
767 764 # Try C implementation.
768 765 try:
769 766 return self.index.deltachain(rev, stoprev, self._generaldelta)
770 767 except AttributeError:
771 768 pass
772 769
773 770 chain = []
774 771
775 772 # Alias to prevent attribute lookup in tight loop.
776 773 index = self.index
777 774 generaldelta = self._generaldelta
778 775
779 776 iterrev = rev
780 777 e = index[iterrev]
781 778 while iterrev != e[3] and iterrev != stoprev:
782 779 chain.append(iterrev)
783 780 if generaldelta:
784 781 iterrev = e[3]
785 782 else:
786 783 iterrev -= 1
787 784 e = index[iterrev]
788 785
789 786 if iterrev == stoprev:
790 787 stopped = True
791 788 else:
792 789 chain.append(iterrev)
793 790 stopped = False
794 791
795 792 chain.reverse()
796 793 return chain, stopped
797 794
798 795 def ancestors(self, revs, stoprev=0, inclusive=False):
799 796 """Generate the ancestors of 'revs' in reverse topological order.
800 797 Does not generate revs lower than stoprev.
801 798
802 799 See the documentation for ancestor.lazyancestors for more details."""
803 800
804 801 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
805 802 inclusive=inclusive)
806 803
807 804 def descendants(self, revs):
808 805 """Generate the descendants of 'revs' in revision order.
809 806
810 807 Yield a sequence of revision numbers starting with a child of
811 808 some rev in revs, i.e., each revision is *not* considered a
812 809 descendant of itself. Results are ordered by revision number (a
813 810 topological sort)."""
814 811 first = min(revs)
815 812 if first == nullrev:
816 813 for i in self:
817 814 yield i
818 815 return
819 816
820 817 seen = set(revs)
821 818 for i in self.revs(start=first + 1):
822 819 for x in self.parentrevs(i):
823 820 if x != nullrev and x in seen:
824 821 seen.add(i)
825 822 yield i
826 823 break
827 824
828 825 def findcommonmissing(self, common=None, heads=None):
829 826 """Return a tuple of the ancestors of common and the ancestors of heads
830 827 that are not ancestors of common. In revset terminology, we return the
831 828 tuple:
832 829
833 830 ::common, (::heads) - (::common)
834 831
835 832 The list is sorted by revision number, meaning it is
836 833 topologically sorted.
837 834
838 835 'heads' and 'common' are both lists of node IDs. If heads is
839 836 not supplied, uses all of the revlog's heads. If common is not
840 837 supplied, uses nullid."""
841 838 if common is None:
842 839 common = [nullid]
843 840 if heads is None:
844 841 heads = self.heads()
845 842
846 843 common = [self.rev(n) for n in common]
847 844 heads = [self.rev(n) for n in heads]
848 845
849 846 # we want the ancestors, but inclusive
850 847 class lazyset(object):
851 848 def __init__(self, lazyvalues):
852 849 self.addedvalues = set()
853 850 self.lazyvalues = lazyvalues
854 851
855 852 def __contains__(self, value):
856 853 return value in self.addedvalues or value in self.lazyvalues
857 854
858 855 def __iter__(self):
859 856 added = self.addedvalues
860 857 for r in added:
861 858 yield r
862 859 for r in self.lazyvalues:
863 860 if not r in added:
864 861 yield r
865 862
866 863 def add(self, value):
867 864 self.addedvalues.add(value)
868 865
869 866 def update(self, values):
870 867 self.addedvalues.update(values)
871 868
872 869 has = lazyset(self.ancestors(common))
873 870 has.add(nullrev)
874 871 has.update(common)
875 872
876 873 # take all ancestors from heads that aren't in has
877 874 missing = set()
878 875 visit = collections.deque(r for r in heads if r not in has)
879 876 while visit:
880 877 r = visit.popleft()
881 878 if r in missing:
882 879 continue
883 880 else:
884 881 missing.add(r)
885 882 for p in self.parentrevs(r):
886 883 if p not in has:
887 884 visit.append(p)
888 885 missing = list(missing)
889 886 missing.sort()
890 887 return has, [self.node(miss) for miss in missing]
891 888
892 889 def incrementalmissingrevs(self, common=None):
893 890 """Return an object that can be used to incrementally compute the
894 891 revision numbers of the ancestors of arbitrary sets that are not
895 892 ancestors of common. This is an ancestor.incrementalmissingancestors
896 893 object.
897 894
898 895 'common' is a list of revision numbers. If common is not supplied, uses
899 896 nullrev.
900 897 """
901 898 if common is None:
902 899 common = [nullrev]
903 900
904 901 return ancestor.incrementalmissingancestors(self.parentrevs, common)
905 902
906 903 def findmissingrevs(self, common=None, heads=None):
907 904 """Return the revision numbers of the ancestors of heads that
908 905 are not ancestors of common.
909 906
910 907 More specifically, return a list of revision numbers corresponding to
911 908 nodes N such that every N satisfies the following constraints:
912 909
913 910 1. N is an ancestor of some node in 'heads'
914 911 2. N is not an ancestor of any node in 'common'
915 912
916 913 The list is sorted by revision number, meaning it is
917 914 topologically sorted.
918 915
919 916 'heads' and 'common' are both lists of revision numbers. If heads is
920 917 not supplied, uses all of the revlog's heads. If common is not
921 918 supplied, uses nullid."""
922 919 if common is None:
923 920 common = [nullrev]
924 921 if heads is None:
925 922 heads = self.headrevs()
926 923
927 924 inc = self.incrementalmissingrevs(common=common)
928 925 return inc.missingancestors(heads)
929 926
930 927 def findmissing(self, common=None, heads=None):
931 928 """Return the ancestors of heads that are not ancestors of common.
932 929
933 930 More specifically, return a list of nodes N such that every N
934 931 satisfies the following constraints:
935 932
936 933 1. N is an ancestor of some node in 'heads'
937 934 2. N is not an ancestor of any node in 'common'
938 935
939 936 The list is sorted by revision number, meaning it is
940 937 topologically sorted.
941 938
942 939 'heads' and 'common' are both lists of node IDs. If heads is
943 940 not supplied, uses all of the revlog's heads. If common is not
944 941 supplied, uses nullid."""
945 942 if common is None:
946 943 common = [nullid]
947 944 if heads is None:
948 945 heads = self.heads()
949 946
950 947 common = [self.rev(n) for n in common]
951 948 heads = [self.rev(n) for n in heads]
952 949
953 950 inc = self.incrementalmissingrevs(common=common)
954 951 return [self.node(r) for r in inc.missingancestors(heads)]
955 952
956 953 def nodesbetween(self, roots=None, heads=None):
957 954 """Return a topological path from 'roots' to 'heads'.
958 955
959 956 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
960 957 topologically sorted list of all nodes N that satisfy both of
961 958 these constraints:
962 959
963 960 1. N is a descendant of some node in 'roots'
964 961 2. N is an ancestor of some node in 'heads'
965 962
966 963 Every node is considered to be both a descendant and an ancestor
967 964 of itself, so every reachable node in 'roots' and 'heads' will be
968 965 included in 'nodes'.
969 966
970 967 'outroots' is the list of reachable nodes in 'roots', i.e., the
971 968 subset of 'roots' that is returned in 'nodes'. Likewise,
972 969 'outheads' is the subset of 'heads' that is also in 'nodes'.
973 970
974 971 'roots' and 'heads' are both lists of node IDs. If 'roots' is
975 972 unspecified, uses nullid as the only root. If 'heads' is
976 973 unspecified, uses list of all of the revlog's heads."""
977 974 nonodes = ([], [], [])
978 975 if roots is not None:
979 976 roots = list(roots)
980 977 if not roots:
981 978 return nonodes
982 979 lowestrev = min([self.rev(n) for n in roots])
983 980 else:
984 981 roots = [nullid] # Everybody's a descendant of nullid
985 982 lowestrev = nullrev
986 983 if (lowestrev == nullrev) and (heads is None):
987 984 # We want _all_ the nodes!
988 985 return ([self.node(r) for r in self], [nullid], list(self.heads()))
989 986 if heads is None:
990 987 # All nodes are ancestors, so the latest ancestor is the last
991 988 # node.
992 989 highestrev = len(self) - 1
993 990 # Set ancestors to None to signal that every node is an ancestor.
994 991 ancestors = None
995 992 # Set heads to an empty dictionary for later discovery of heads
996 993 heads = {}
997 994 else:
998 995 heads = list(heads)
999 996 if not heads:
1000 997 return nonodes
1001 998 ancestors = set()
1002 999 # Turn heads into a dictionary so we can remove 'fake' heads.
1003 1000 # Also, later we will be using it to filter out the heads we can't
1004 1001 # find from roots.
1005 1002 heads = dict.fromkeys(heads, False)
1006 1003 # Start at the top and keep marking parents until we're done.
1007 1004 nodestotag = set(heads)
1008 1005 # Remember where the top was so we can use it as a limit later.
1009 1006 highestrev = max([self.rev(n) for n in nodestotag])
1010 1007 while nodestotag:
1011 1008 # grab a node to tag
1012 1009 n = nodestotag.pop()
1013 1010 # Never tag nullid
1014 1011 if n == nullid:
1015 1012 continue
1016 1013 # A node's revision number represents its place in a
1017 1014 # topologically sorted list of nodes.
1018 1015 r = self.rev(n)
1019 1016 if r >= lowestrev:
1020 1017 if n not in ancestors:
1021 1018 # If we are possibly a descendant of one of the roots
1022 1019 # and we haven't already been marked as an ancestor
1023 1020 ancestors.add(n) # Mark as ancestor
1024 1021 # Add non-nullid parents to list of nodes to tag.
1025 1022 nodestotag.update([p for p in self.parents(n) if
1026 1023 p != nullid])
1027 1024 elif n in heads: # We've seen it before, is it a fake head?
1028 1025 # So it is, real heads should not be the ancestors of
1029 1026 # any other heads.
1030 1027 heads.pop(n)
1031 1028 if not ancestors:
1032 1029 return nonodes
1033 1030 # Now that we have our set of ancestors, we want to remove any
1034 1031 # roots that are not ancestors.
1035 1032
1036 1033 # If one of the roots was nullid, everything is included anyway.
1037 1034 if lowestrev > nullrev:
1038 1035 # But, since we weren't, let's recompute the lowest rev to not
1039 1036 # include roots that aren't ancestors.
1040 1037
1041 1038 # Filter out roots that aren't ancestors of heads
1042 1039 roots = [root for root in roots if root in ancestors]
1043 1040 # Recompute the lowest revision
1044 1041 if roots:
1045 1042 lowestrev = min([self.rev(root) for root in roots])
1046 1043 else:
1047 1044 # No more roots? Return empty list
1048 1045 return nonodes
1049 1046 else:
1050 1047 # We are descending from nullid, and don't need to care about
1051 1048 # any other roots.
1052 1049 lowestrev = nullrev
1053 1050 roots = [nullid]
1054 1051 # Transform our roots list into a set.
1055 1052 descendants = set(roots)
1056 1053 # Also, keep the original roots so we can filter out roots that aren't
1057 1054 # 'real' roots (i.e. are descended from other roots).
1058 1055 roots = descendants.copy()
1059 1056 # Our topologically sorted list of output nodes.
1060 1057 orderedout = []
1061 1058 # Don't start at nullid since we don't want nullid in our output list,
1062 1059 # and if nullid shows up in descendants, empty parents will look like
1063 1060 # they're descendants.
1064 1061 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1065 1062 n = self.node(r)
1066 1063 isdescendant = False
1067 1064 if lowestrev == nullrev: # Everybody is a descendant of nullid
1068 1065 isdescendant = True
1069 1066 elif n in descendants:
1070 1067 # n is already a descendant
1071 1068 isdescendant = True
1072 1069 # This check only needs to be done here because all the roots
1073 1070 # will start being marked is descendants before the loop.
1074 1071 if n in roots:
1075 1072 # If n was a root, check if it's a 'real' root.
1076 1073 p = tuple(self.parents(n))
1077 1074 # If any of its parents are descendants, it's not a root.
1078 1075 if (p[0] in descendants) or (p[1] in descendants):
1079 1076 roots.remove(n)
1080 1077 else:
1081 1078 p = tuple(self.parents(n))
1082 1079 # A node is a descendant if either of its parents are
1083 1080 # descendants. (We seeded the dependents list with the roots
1084 1081 # up there, remember?)
1085 1082 if (p[0] in descendants) or (p[1] in descendants):
1086 1083 descendants.add(n)
1087 1084 isdescendant = True
1088 1085 if isdescendant and ((ancestors is None) or (n in ancestors)):
1089 1086 # Only include nodes that are both descendants and ancestors.
1090 1087 orderedout.append(n)
1091 1088 if (ancestors is not None) and (n in heads):
1092 1089 # We're trying to figure out which heads are reachable
1093 1090 # from roots.
1094 1091 # Mark this head as having been reached
1095 1092 heads[n] = True
1096 1093 elif ancestors is None:
1097 1094 # Otherwise, we're trying to discover the heads.
1098 1095 # Assume this is a head because if it isn't, the next step
1099 1096 # will eventually remove it.
1100 1097 heads[n] = True
1101 1098 # But, obviously its parents aren't.
1102 1099 for p in self.parents(n):
1103 1100 heads.pop(p, None)
1104 1101 heads = [head for head, flag in heads.iteritems() if flag]
1105 1102 roots = list(roots)
1106 1103 assert orderedout
1107 1104 assert roots
1108 1105 assert heads
1109 1106 return (orderedout, roots, heads)
1110 1107
1111 1108 def headrevs(self):
1112 1109 try:
1113 1110 return self.index.headrevs()
1114 1111 except AttributeError:
1115 1112 return self._headrevs()
1116 1113
1117 1114 def computephases(self, roots):
1118 1115 return self.index.computephasesmapsets(roots)
1119 1116
1120 1117 def _headrevs(self):
1121 1118 count = len(self)
1122 1119 if not count:
1123 1120 return [nullrev]
1124 1121 # we won't iter over filtered rev so nobody is a head at start
1125 1122 ishead = [0] * (count + 1)
1126 1123 index = self.index
1127 1124 for r in self:
1128 1125 ishead[r] = 1 # I may be an head
1129 1126 e = index[r]
1130 1127 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1131 1128 return [r for r, val in enumerate(ishead) if val]
1132 1129
1133 1130 def heads(self, start=None, stop=None):
1134 1131 """return the list of all nodes that have no children
1135 1132
1136 1133 if start is specified, only heads that are descendants of
1137 1134 start will be returned
1138 1135 if stop is specified, it will consider all the revs from stop
1139 1136 as if they had no children
1140 1137 """
1141 1138 if start is None and stop is None:
1142 1139 if not len(self):
1143 1140 return [nullid]
1144 1141 return [self.node(r) for r in self.headrevs()]
1145 1142
1146 1143 if start is None:
1147 1144 start = nullid
1148 1145 if stop is None:
1149 1146 stop = []
1150 1147 stoprevs = set([self.rev(n) for n in stop])
1151 1148 startrev = self.rev(start)
1152 1149 reachable = {startrev}
1153 1150 heads = {startrev}
1154 1151
1155 1152 parentrevs = self.parentrevs
1156 1153 for r in self.revs(start=startrev + 1):
1157 1154 for p in parentrevs(r):
1158 1155 if p in reachable:
1159 1156 if r not in stoprevs:
1160 1157 reachable.add(r)
1161 1158 heads.add(r)
1162 1159 if p in heads and p not in stoprevs:
1163 1160 heads.remove(p)
1164 1161
1165 1162 return [self.node(r) for r in heads]
1166 1163
1167 1164 def children(self, node):
1168 1165 """find the children of a given node"""
1169 1166 c = []
1170 1167 p = self.rev(node)
1171 1168 for r in self.revs(start=p + 1):
1172 1169 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1173 1170 if prevs:
1174 1171 for pr in prevs:
1175 1172 if pr == p:
1176 1173 c.append(self.node(r))
1177 1174 elif p == nullrev:
1178 1175 c.append(self.node(r))
1179 1176 return c
1180 1177
1181 1178 def commonancestorsheads(self, a, b):
1182 1179 """calculate all the heads of the common ancestors of nodes a and b"""
1183 1180 a, b = self.rev(a), self.rev(b)
1184 1181 ancs = self._commonancestorsheads(a, b)
1185 1182 return pycompat.maplist(self.node, ancs)
1186 1183
1187 1184 def _commonancestorsheads(self, *revs):
1188 1185 """calculate all the heads of the common ancestors of revs"""
1189 1186 try:
1190 1187 ancs = self.index.commonancestorsheads(*revs)
1191 1188 except (AttributeError, OverflowError): # C implementation failed
1192 1189 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1193 1190 return ancs
1194 1191
1195 1192 def isancestor(self, a, b):
1196 1193 """return True if node a is an ancestor of node b
1197 1194
1198 1195 A revision is considered an ancestor of itself."""
1199 1196 a, b = self.rev(a), self.rev(b)
1200 1197 return self.isancestorrev(a, b)
1201 1198
1202 1199 def isancestorrev(self, a, b):
1203 1200 """return True if revision a is an ancestor of revision b
1204 1201
1205 1202 A revision is considered an ancestor of itself.
1206 1203
1207 1204 The implementation of this is trivial but the use of
1208 1205 commonancestorsheads is not."""
1209 1206 if a == nullrev:
1210 1207 return True
1211 1208 elif a == b:
1212 1209 return True
1213 1210 elif a > b:
1214 1211 return False
1215 1212 return a in self._commonancestorsheads(a, b)
1216 1213
1217 1214 def ancestor(self, a, b):
1218 1215 """calculate the "best" common ancestor of nodes a and b"""
1219 1216
1220 1217 a, b = self.rev(a), self.rev(b)
1221 1218 try:
1222 1219 ancs = self.index.ancestors(a, b)
1223 1220 except (AttributeError, OverflowError):
1224 1221 ancs = ancestor.ancestors(self.parentrevs, a, b)
1225 1222 if ancs:
1226 1223 # choose a consistent winner when there's a tie
1227 1224 return min(map(self.node, ancs))
1228 1225 return nullid
1229 1226
1230 1227 def _match(self, id):
1231 1228 if isinstance(id, int):
1232 1229 # rev
1233 1230 return self.node(id)
1234 1231 if len(id) == 20:
1235 1232 # possibly a binary node
1236 1233 # odds of a binary node being all hex in ASCII are 1 in 10**25
1237 1234 try:
1238 1235 node = id
1239 1236 self.rev(node) # quick search the index
1240 1237 return node
1241 1238 except LookupError:
1242 1239 pass # may be partial hex id
1243 1240 try:
1244 1241 # str(rev)
1245 1242 rev = int(id)
1246 1243 if "%d" % rev != id:
1247 1244 raise ValueError
1248 1245 if rev < 0:
1249 1246 rev = len(self) + rev
1250 1247 if rev < 0 or rev >= len(self):
1251 1248 raise ValueError
1252 1249 return self.node(rev)
1253 1250 except (ValueError, OverflowError):
1254 1251 pass
1255 1252 if len(id) == 40:
1256 1253 try:
1257 1254 # a full hex nodeid?
1258 1255 node = bin(id)
1259 1256 self.rev(node)
1260 1257 return node
1261 1258 except (TypeError, LookupError):
1262 1259 pass
1263 1260
1264 1261 def _partialmatch(self, id):
1265 1262 # we don't care wdirfilenodeids as they should be always full hash
1266 1263 maybewdir = wdirhex.startswith(id)
1267 1264 try:
1268 1265 partial = self.index.partialmatch(id)
1269 1266 if partial and self.hasnode(partial):
1270 1267 if maybewdir:
1271 1268 # single 'ff...' match in radix tree, ambiguous with wdir
1272 1269 raise error.RevlogError
1273 1270 return partial
1274 1271 if maybewdir:
1275 1272 # no 'ff...' match in radix tree, wdir identified
1276 1273 raise error.WdirUnsupported
1277 1274 return None
1278 1275 except error.RevlogError:
1279 1276 # parsers.c radix tree lookup gave multiple matches
1280 1277 # fast path: for unfiltered changelog, radix tree is accurate
1281 1278 if not getattr(self, 'filteredrevs', None):
1282 raise AmbiguousPrefixLookupError(id, self.indexfile,
1283 _('ambiguous identifier'))
1279 raise error.AmbiguousPrefixLookupError(
1280 id, self.indexfile, _('ambiguous identifier'))
1284 1281 # fall through to slow path that filters hidden revisions
1285 1282 except (AttributeError, ValueError):
1286 1283 # we are pure python, or key was too short to search radix tree
1287 1284 pass
1288 1285
1289 1286 if id in self._pcache:
1290 1287 return self._pcache[id]
1291 1288
1292 1289 if len(id) <= 40:
1293 1290 try:
1294 1291 # hex(node)[:...]
1295 1292 l = len(id) // 2 # grab an even number of digits
1296 1293 prefix = bin(id[:l * 2])
1297 1294 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1298 1295 nl = [n for n in nl if hex(n).startswith(id) and
1299 1296 self.hasnode(n)]
1300 1297 if nullhex.startswith(id):
1301 1298 nl.append(nullid)
1302 1299 if len(nl) > 0:
1303 1300 if len(nl) == 1 and not maybewdir:
1304 1301 self._pcache[id] = nl[0]
1305 1302 return nl[0]
1306 raise AmbiguousPrefixLookupError(id, self.indexfile,
1307 _('ambiguous identifier'))
1303 raise error.AmbiguousPrefixLookupError(
1304 id, self.indexfile, _('ambiguous identifier'))
1308 1305 if maybewdir:
1309 1306 raise error.WdirUnsupported
1310 1307 return None
1311 1308 except TypeError:
1312 1309 pass
1313 1310
1314 1311 def lookup(self, id):
1315 1312 """locate a node based on:
1316 1313 - revision number or str(revision number)
1317 1314 - nodeid or subset of hex nodeid
1318 1315 """
1319 1316 n = self._match(id)
1320 1317 if n is not None:
1321 1318 return n
1322 1319 n = self._partialmatch(id)
1323 1320 if n:
1324 1321 return n
1325 1322
1326 1323 raise LookupError(id, self.indexfile, _('no match found'))
1327 1324
1328 1325 def shortest(self, node, minlength=1):
1329 1326 """Find the shortest unambiguous prefix that matches node."""
1330 1327 def isvalid(prefix):
1331 1328 try:
1332 1329 node = self._partialmatch(prefix)
1333 1330 except error.RevlogError:
1334 1331 return False
1335 1332 except error.WdirUnsupported:
1336 1333 # single 'ff...' match
1337 1334 return True
1338 1335 if node is None:
1339 1336 raise LookupError(node, self.indexfile, _('no node'))
1340 1337 return True
1341 1338
1342 1339 def maybewdir(prefix):
1343 1340 return all(c == 'f' for c in prefix)
1344 1341
1345 1342 hexnode = hex(node)
1346 1343
1347 1344 def disambiguate(hexnode, minlength):
1348 1345 """Disambiguate against wdirid."""
1349 1346 for length in range(minlength, 41):
1350 1347 prefix = hexnode[:length]
1351 1348 if not maybewdir(prefix):
1352 1349 return prefix
1353 1350
1354 1351 if not getattr(self, 'filteredrevs', None):
1355 1352 try:
1356 1353 length = max(self.index.shortest(node), minlength)
1357 1354 return disambiguate(hexnode, length)
1358 1355 except error.RevlogError:
1359 1356 if node != wdirid:
1360 1357 raise LookupError(node, self.indexfile, _('no node'))
1361 1358 except AttributeError:
1362 1359 # Fall through to pure code
1363 1360 pass
1364 1361
1365 1362 if node == wdirid:
1366 1363 for length in range(minlength, 41):
1367 1364 prefix = hexnode[:length]
1368 1365 if isvalid(prefix):
1369 1366 return prefix
1370 1367
1371 1368 for length in range(minlength, 41):
1372 1369 prefix = hexnode[:length]
1373 1370 if isvalid(prefix):
1374 1371 return disambiguate(hexnode, length)
1375 1372
1376 1373 def cmp(self, node, text):
1377 1374 """compare text with a given file revision
1378 1375
1379 1376 returns True if text is different than what is stored.
1380 1377 """
1381 1378 p1, p2 = self.parents(node)
1382 1379 return hash(text, p1, p2) != node
1383 1380
1384 1381 def _cachesegment(self, offset, data):
1385 1382 """Add a segment to the revlog cache.
1386 1383
1387 1384 Accepts an absolute offset and the data that is at that location.
1388 1385 """
1389 1386 o, d = self._chunkcache
1390 1387 # try to add to existing cache
1391 1388 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1392 1389 self._chunkcache = o, d + data
1393 1390 else:
1394 1391 self._chunkcache = offset, data
1395 1392
1396 1393 def _readsegment(self, offset, length, df=None):
1397 1394 """Load a segment of raw data from the revlog.
1398 1395
1399 1396 Accepts an absolute offset, length to read, and an optional existing
1400 1397 file handle to read from.
1401 1398
1402 1399 If an existing file handle is passed, it will be seeked and the
1403 1400 original seek position will NOT be restored.
1404 1401
1405 1402 Returns a str or buffer of raw byte data.
1406 1403 """
1407 1404 # Cache data both forward and backward around the requested
1408 1405 # data, in a fixed size window. This helps speed up operations
1409 1406 # involving reading the revlog backwards.
1410 1407 cachesize = self._chunkcachesize
1411 1408 realoffset = offset & ~(cachesize - 1)
1412 1409 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1413 1410 - realoffset)
1414 1411 with self._datareadfp(df) as df:
1415 1412 df.seek(realoffset)
1416 1413 d = df.read(reallength)
1417 1414 self._cachesegment(realoffset, d)
1418 1415 if offset != realoffset or reallength != length:
1419 1416 return util.buffer(d, offset - realoffset, length)
1420 1417 return d
1421 1418
1422 1419 def _getsegment(self, offset, length, df=None):
1423 1420 """Obtain a segment of raw data from the revlog.
1424 1421
1425 1422 Accepts an absolute offset, length of bytes to obtain, and an
1426 1423 optional file handle to the already-opened revlog. If the file
1427 1424 handle is used, it's original seek position will not be preserved.
1428 1425
1429 1426 Requests for data may be returned from a cache.
1430 1427
1431 1428 Returns a str or a buffer instance of raw byte data.
1432 1429 """
1433 1430 o, d = self._chunkcache
1434 1431 l = len(d)
1435 1432
1436 1433 # is it in the cache?
1437 1434 cachestart = offset - o
1438 1435 cacheend = cachestart + length
1439 1436 if cachestart >= 0 and cacheend <= l:
1440 1437 if cachestart == 0 and cacheend == l:
1441 1438 return d # avoid a copy
1442 1439 return util.buffer(d, cachestart, cacheend - cachestart)
1443 1440
1444 1441 return self._readsegment(offset, length, df=df)
1445 1442
1446 1443 def _getsegmentforrevs(self, startrev, endrev, df=None):
1447 1444 """Obtain a segment of raw data corresponding to a range of revisions.
1448 1445
1449 1446 Accepts the start and end revisions and an optional already-open
1450 1447 file handle to be used for reading. If the file handle is read, its
1451 1448 seek position will not be preserved.
1452 1449
1453 1450 Requests for data may be satisfied by a cache.
1454 1451
1455 1452 Returns a 2-tuple of (offset, data) for the requested range of
1456 1453 revisions. Offset is the integer offset from the beginning of the
1457 1454 revlog and data is a str or buffer of the raw byte data.
1458 1455
1459 1456 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1460 1457 to determine where each revision's data begins and ends.
1461 1458 """
1462 1459 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1463 1460 # (functions are expensive).
1464 1461 index = self.index
1465 1462 istart = index[startrev]
1466 1463 start = int(istart[0] >> 16)
1467 1464 if startrev == endrev:
1468 1465 end = start + istart[1]
1469 1466 else:
1470 1467 iend = index[endrev]
1471 1468 end = int(iend[0] >> 16) + iend[1]
1472 1469
1473 1470 if self._inline:
1474 1471 start += (startrev + 1) * self._io.size
1475 1472 end += (endrev + 1) * self._io.size
1476 1473 length = end - start
1477 1474
1478 1475 return start, self._getsegment(start, length, df=df)
1479 1476
1480 1477 def _chunk(self, rev, df=None):
1481 1478 """Obtain a single decompressed chunk for a revision.
1482 1479
1483 1480 Accepts an integer revision and an optional already-open file handle
1484 1481 to be used for reading. If used, the seek position of the file will not
1485 1482 be preserved.
1486 1483
1487 1484 Returns a str holding uncompressed data for the requested revision.
1488 1485 """
1489 1486 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1490 1487
1491 1488 def _chunks(self, revs, df=None, targetsize=None):
1492 1489 """Obtain decompressed chunks for the specified revisions.
1493 1490
1494 1491 Accepts an iterable of numeric revisions that are assumed to be in
1495 1492 ascending order. Also accepts an optional already-open file handle
1496 1493 to be used for reading. If used, the seek position of the file will
1497 1494 not be preserved.
1498 1495
1499 1496 This function is similar to calling ``self._chunk()`` multiple times,
1500 1497 but is faster.
1501 1498
1502 1499 Returns a list with decompressed data for each requested revision.
1503 1500 """
1504 1501 if not revs:
1505 1502 return []
1506 1503 start = self.start
1507 1504 length = self.length
1508 1505 inline = self._inline
1509 1506 iosize = self._io.size
1510 1507 buffer = util.buffer
1511 1508
1512 1509 l = []
1513 1510 ladd = l.append
1514 1511
1515 1512 if not self._withsparseread:
1516 1513 slicedchunks = (revs,)
1517 1514 else:
1518 1515 slicedchunks = deltautil.slicechunk(self, revs,
1519 1516 targetsize=targetsize)
1520 1517
1521 1518 for revschunk in slicedchunks:
1522 1519 firstrev = revschunk[0]
1523 1520 # Skip trailing revisions with empty diff
1524 1521 for lastrev in revschunk[::-1]:
1525 1522 if length(lastrev) != 0:
1526 1523 break
1527 1524
1528 1525 try:
1529 1526 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1530 1527 except OverflowError:
1531 1528 # issue4215 - we can't cache a run of chunks greater than
1532 1529 # 2G on Windows
1533 1530 return [self._chunk(rev, df=df) for rev in revschunk]
1534 1531
1535 1532 decomp = self.decompress
1536 1533 for rev in revschunk:
1537 1534 chunkstart = start(rev)
1538 1535 if inline:
1539 1536 chunkstart += (rev + 1) * iosize
1540 1537 chunklength = length(rev)
1541 1538 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1542 1539
1543 1540 return l
1544 1541
1545 1542 def _chunkclear(self):
1546 1543 """Clear the raw chunk cache."""
1547 1544 self._chunkcache = (0, '')
1548 1545
1549 1546 def deltaparent(self, rev):
1550 1547 """return deltaparent of the given revision"""
1551 1548 base = self.index[rev][3]
1552 1549 if base == rev:
1553 1550 return nullrev
1554 1551 elif self._generaldelta:
1555 1552 return base
1556 1553 else:
1557 1554 return rev - 1
1558 1555
1559 1556 def issnapshot(self, rev):
1560 1557 """tells whether rev is a snapshot
1561 1558 """
1562 1559 if rev == nullrev:
1563 1560 return True
1564 1561 deltap = self.deltaparent(rev)
1565 1562 if deltap == nullrev:
1566 1563 return True
1567 1564 p1, p2 = self.parentrevs(rev)
1568 1565 if deltap in (p1, p2):
1569 1566 return False
1570 1567 return self.issnapshot(deltap)
1571 1568
1572 1569 def snapshotdepth(self, rev):
1573 1570 """number of snapshot in the chain before this one"""
1574 1571 if not self.issnapshot(rev):
1575 raise ProgrammingError('revision %d not a snapshot')
1572 raise error.ProgrammingError('revision %d not a snapshot')
1576 1573 return len(self._deltachain(rev)[0]) - 1
1577 1574
1578 1575 def revdiff(self, rev1, rev2):
1579 1576 """return or calculate a delta between two revisions
1580 1577
1581 1578 The delta calculated is in binary form and is intended to be written to
1582 1579 revlog data directly. So this function needs raw revision data.
1583 1580 """
1584 1581 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1585 1582 return bytes(self._chunk(rev2))
1586 1583
1587 1584 return mdiff.textdiff(self.revision(rev1, raw=True),
1588 1585 self.revision(rev2, raw=True))
1589 1586
1590 1587 def revision(self, nodeorrev, _df=None, raw=False):
1591 1588 """return an uncompressed revision of a given node or revision
1592 1589 number.
1593 1590
1594 1591 _df - an existing file handle to read from. (internal-only)
1595 1592 raw - an optional argument specifying if the revision data is to be
1596 1593 treated as raw data when applying flag transforms. 'raw' should be set
1597 1594 to True when generating changegroups or in debug commands.
1598 1595 """
1599 1596 if isinstance(nodeorrev, int):
1600 1597 rev = nodeorrev
1601 1598 node = self.node(rev)
1602 1599 else:
1603 1600 node = nodeorrev
1604 1601 rev = None
1605 1602
1606 1603 cachedrev = None
1607 1604 flags = None
1608 1605 rawtext = None
1609 1606 if node == nullid:
1610 1607 return ""
1611 1608 if self._cache:
1612 1609 if self._cache[0] == node:
1613 1610 # _cache only stores rawtext
1614 1611 if raw:
1615 1612 return self._cache[2]
1616 1613 # duplicated, but good for perf
1617 1614 if rev is None:
1618 1615 rev = self.rev(node)
1619 1616 if flags is None:
1620 1617 flags = self.flags(rev)
1621 1618 # no extra flags set, no flag processor runs, text = rawtext
1622 1619 if flags == REVIDX_DEFAULT_FLAGS:
1623 1620 return self._cache[2]
1624 1621 # rawtext is reusable. need to run flag processor
1625 1622 rawtext = self._cache[2]
1626 1623
1627 1624 cachedrev = self._cache[1]
1628 1625
1629 1626 # look up what we need to read
1630 1627 if rawtext is None:
1631 1628 if rev is None:
1632 1629 rev = self.rev(node)
1633 1630
1634 1631 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1635 1632 if stopped:
1636 1633 rawtext = self._cache[2]
1637 1634
1638 1635 # drop cache to save memory
1639 1636 self._cache = None
1640 1637
1641 1638 targetsize = None
1642 1639 rawsize = self.index[rev][2]
1643 1640 if 0 <= rawsize:
1644 1641 targetsize = 4 * rawsize
1645 1642
1646 1643 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1647 1644 if rawtext is None:
1648 1645 rawtext = bytes(bins[0])
1649 1646 bins = bins[1:]
1650 1647
1651 1648 rawtext = mdiff.patches(rawtext, bins)
1652 1649 self._cache = (node, rev, rawtext)
1653 1650
1654 1651 if flags is None:
1655 1652 if rev is None:
1656 1653 rev = self.rev(node)
1657 1654 flags = self.flags(rev)
1658 1655
1659 1656 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1660 1657 if validatehash:
1661 1658 self.checkhash(text, node, rev=rev)
1662 1659
1663 1660 return text
1664 1661
1665 1662 def hash(self, text, p1, p2):
1666 1663 """Compute a node hash.
1667 1664
1668 1665 Available as a function so that subclasses can replace the hash
1669 1666 as needed.
1670 1667 """
1671 1668 return hash(text, p1, p2)
1672 1669
1673 1670 def _processflags(self, text, flags, operation, raw=False):
1674 1671 """Inspect revision data flags and applies transforms defined by
1675 1672 registered flag processors.
1676 1673
1677 1674 ``text`` - the revision data to process
1678 1675 ``flags`` - the revision flags
1679 1676 ``operation`` - the operation being performed (read or write)
1680 1677 ``raw`` - an optional argument describing if the raw transform should be
1681 1678 applied.
1682 1679
1683 1680 This method processes the flags in the order (or reverse order if
1684 1681 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1685 1682 flag processors registered for present flags. The order of flags defined
1686 1683 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1687 1684
1688 1685 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1689 1686 processed text and ``validatehash`` is a bool indicating whether the
1690 1687 returned text should be checked for hash integrity.
1691 1688
1692 1689 Note: If the ``raw`` argument is set, it has precedence over the
1693 1690 operation and will only update the value of ``validatehash``.
1694 1691 """
1695 1692 # fast path: no flag processors will run
1696 1693 if flags == 0:
1697 1694 return text, True
1698 1695 if not operation in ('read', 'write'):
1699 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1696 raise error.ProgrammingError(_("invalid '%s' operation") %
1697 operation)
1700 1698 # Check all flags are known.
1701 1699 if flags & ~REVIDX_KNOWN_FLAGS:
1702 1700 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1703 1701 (flags & ~REVIDX_KNOWN_FLAGS))
1704 1702 validatehash = True
1705 1703 # Depending on the operation (read or write), the order might be
1706 1704 # reversed due to non-commutative transforms.
1707 1705 orderedflags = REVIDX_FLAGS_ORDER
1708 1706 if operation == 'write':
1709 1707 orderedflags = reversed(orderedflags)
1710 1708
1711 1709 for flag in orderedflags:
1712 1710 # If a flagprocessor has been registered for a known flag, apply the
1713 1711 # related operation transform and update result tuple.
1714 1712 if flag & flags:
1715 1713 vhash = True
1716 1714
1717 1715 if flag not in self._flagprocessors:
1718 1716 message = _("missing processor for flag '%#x'") % (flag)
1719 1717 raise error.RevlogError(message)
1720 1718
1721 1719 processor = self._flagprocessors[flag]
1722 1720 if processor is not None:
1723 1721 readtransform, writetransform, rawtransform = processor
1724 1722
1725 1723 if raw:
1726 1724 vhash = rawtransform(self, text)
1727 1725 elif operation == 'read':
1728 1726 text, vhash = readtransform(self, text)
1729 1727 else: # write operation
1730 1728 text, vhash = writetransform(self, text)
1731 1729 validatehash = validatehash and vhash
1732 1730
1733 1731 return text, validatehash
1734 1732
1735 1733 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1736 1734 """Check node hash integrity.
1737 1735
1738 1736 Available as a function so that subclasses can extend hash mismatch
1739 1737 behaviors as needed.
1740 1738 """
1741 1739 try:
1742 1740 if p1 is None and p2 is None:
1743 1741 p1, p2 = self.parents(node)
1744 1742 if node != self.hash(text, p1, p2):
1745 1743 revornode = rev
1746 1744 if revornode is None:
1747 1745 revornode = templatefilters.short(hex(node))
1748 1746 raise error.RevlogError(_("integrity check failed on %s:%s")
1749 1747 % (self.indexfile, pycompat.bytestr(revornode)))
1750 1748 except error.RevlogError:
1751 1749 if self._censorable and _censoredtext(text):
1752 1750 raise error.CensoredNodeError(self.indexfile, node, text)
1753 1751 raise
1754 1752
1755 1753 def _enforceinlinesize(self, tr, fp=None):
1756 1754 """Check if the revlog is too big for inline and convert if so.
1757 1755
1758 1756 This should be called after revisions are added to the revlog. If the
1759 1757 revlog has grown too large to be an inline revlog, it will convert it
1760 1758 to use multiple index and data files.
1761 1759 """
1762 1760 tiprev = len(self) - 1
1763 1761 if (not self._inline or
1764 1762 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1765 1763 return
1766 1764
1767 1765 trinfo = tr.find(self.indexfile)
1768 1766 if trinfo is None:
1769 1767 raise error.RevlogError(_("%s not found in the transaction")
1770 1768 % self.indexfile)
1771 1769
1772 1770 trindex = trinfo[2]
1773 1771 if trindex is not None:
1774 1772 dataoff = self.start(trindex)
1775 1773 else:
1776 1774 # revlog was stripped at start of transaction, use all leftover data
1777 1775 trindex = len(self) - 1
1778 1776 dataoff = self.end(tiprev)
1779 1777
1780 1778 tr.add(self.datafile, dataoff)
1781 1779
1782 1780 if fp:
1783 1781 fp.flush()
1784 1782 fp.close()
1785 1783
1786 1784 with self._datafp('w') as df:
1787 1785 for r in self:
1788 1786 df.write(self._getsegmentforrevs(r, r)[1])
1789 1787
1790 1788 with self._indexfp('w') as fp:
1791 1789 self.version &= ~FLAG_INLINE_DATA
1792 1790 self._inline = False
1793 1791 io = self._io
1794 1792 for i in self:
1795 1793 e = io.packentry(self.index[i], self.node, self.version, i)
1796 1794 fp.write(e)
1797 1795
1798 1796 # the temp file replace the real index when we exit the context
1799 1797 # manager
1800 1798
1801 1799 tr.replace(self.indexfile, trindex * self._io.size)
1802 1800 self._chunkclear()
1803 1801
1804 1802 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1805 1803 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1806 1804 """add a revision to the log
1807 1805
1808 1806 text - the revision data to add
1809 1807 transaction - the transaction object used for rollback
1810 1808 link - the linkrev data to add
1811 1809 p1, p2 - the parent nodeids of the revision
1812 1810 cachedelta - an optional precomputed delta
1813 1811 node - nodeid of revision; typically node is not specified, and it is
1814 1812 computed by default as hash(text, p1, p2), however subclasses might
1815 1813 use different hashing method (and override checkhash() in such case)
1816 1814 flags - the known flags to set on the revision
1817 1815 deltacomputer - an optional deltacomputer instance shared between
1818 1816 multiple calls
1819 1817 """
1820 1818 if link == nullrev:
1821 1819 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1822 1820 % self.indexfile)
1823 1821
1824 1822 if flags:
1825 1823 node = node or self.hash(text, p1, p2)
1826 1824
1827 1825 rawtext, validatehash = self._processflags(text, flags, 'write')
1828 1826
1829 1827 # If the flag processor modifies the revision data, ignore any provided
1830 1828 # cachedelta.
1831 1829 if rawtext != text:
1832 1830 cachedelta = None
1833 1831
1834 1832 if len(rawtext) > _maxentrysize:
1835 1833 raise error.RevlogError(
1836 1834 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1837 1835 % (self.indexfile, len(rawtext)))
1838 1836
1839 1837 node = node or self.hash(rawtext, p1, p2)
1840 1838 if node in self.nodemap:
1841 1839 return node
1842 1840
1843 1841 if validatehash:
1844 1842 self.checkhash(rawtext, node, p1=p1, p2=p2)
1845 1843
1846 1844 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1847 1845 flags, cachedelta=cachedelta,
1848 1846 deltacomputer=deltacomputer)
1849 1847
1850 1848 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1851 1849 cachedelta=None, deltacomputer=None):
1852 1850 """add a raw revision with known flags, node and parents
1853 1851 useful when reusing a revision not stored in this revlog (ex: received
1854 1852 over wire, or read from an external bundle).
1855 1853 """
1856 1854 dfh = None
1857 1855 if not self._inline:
1858 1856 dfh = self._datafp("a+")
1859 1857 ifh = self._indexfp("a+")
1860 1858 try:
1861 1859 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1862 1860 flags, cachedelta, ifh, dfh,
1863 1861 deltacomputer=deltacomputer)
1864 1862 finally:
1865 1863 if dfh:
1866 1864 dfh.close()
1867 1865 ifh.close()
1868 1866
1869 1867 def compress(self, data):
1870 1868 """Generate a possibly-compressed representation of data."""
1871 1869 if not data:
1872 1870 return '', data
1873 1871
1874 1872 compressed = self._compressor.compress(data)
1875 1873
1876 1874 if compressed:
1877 1875 # The revlog compressor added the header in the returned data.
1878 1876 return '', compressed
1879 1877
1880 1878 if data[0:1] == '\0':
1881 1879 return '', data
1882 1880 return 'u', data
1883 1881
1884 1882 def decompress(self, data):
1885 1883 """Decompress a revlog chunk.
1886 1884
1887 1885 The chunk is expected to begin with a header identifying the
1888 1886 format type so it can be routed to an appropriate decompressor.
1889 1887 """
1890 1888 if not data:
1891 1889 return data
1892 1890
1893 1891 # Revlogs are read much more frequently than they are written and many
1894 1892 # chunks only take microseconds to decompress, so performance is
1895 1893 # important here.
1896 1894 #
1897 1895 # We can make a few assumptions about revlogs:
1898 1896 #
1899 1897 # 1) the majority of chunks will be compressed (as opposed to inline
1900 1898 # raw data).
1901 1899 # 2) decompressing *any* data will likely by at least 10x slower than
1902 1900 # returning raw inline data.
1903 1901 # 3) we want to prioritize common and officially supported compression
1904 1902 # engines
1905 1903 #
1906 1904 # It follows that we want to optimize for "decompress compressed data
1907 1905 # when encoded with common and officially supported compression engines"
1908 1906 # case over "raw data" and "data encoded by less common or non-official
1909 1907 # compression engines." That is why we have the inline lookup first
1910 1908 # followed by the compengines lookup.
1911 1909 #
1912 1910 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1913 1911 # compressed chunks. And this matters for changelog and manifest reads.
1914 1912 t = data[0:1]
1915 1913
1916 1914 if t == 'x':
1917 1915 try:
1918 1916 return _zlibdecompress(data)
1919 1917 except zlib.error as e:
1920 1918 raise error.RevlogError(_('revlog decompress error: %s') %
1921 1919 stringutil.forcebytestr(e))
1922 1920 # '\0' is more common than 'u' so it goes first.
1923 1921 elif t == '\0':
1924 1922 return data
1925 1923 elif t == 'u':
1926 1924 return util.buffer(data, 1)
1927 1925
1928 1926 try:
1929 1927 compressor = self._decompressors[t]
1930 1928 except KeyError:
1931 1929 try:
1932 1930 engine = util.compengines.forrevlogheader(t)
1933 1931 compressor = engine.revlogcompressor()
1934 1932 self._decompressors[t] = compressor
1935 1933 except KeyError:
1936 1934 raise error.RevlogError(_('unknown compression type %r') % t)
1937 1935
1938 1936 return compressor.decompress(data)
1939 1937
1940 1938 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1941 1939 cachedelta, ifh, dfh, alwayscache=False,
1942 1940 deltacomputer=None):
1943 1941 """internal function to add revisions to the log
1944 1942
1945 1943 see addrevision for argument descriptions.
1946 1944
1947 1945 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1948 1946
1949 1947 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1950 1948 be used.
1951 1949
1952 1950 invariants:
1953 1951 - rawtext is optional (can be None); if not set, cachedelta must be set.
1954 1952 if both are set, they must correspond to each other.
1955 1953 """
1956 1954 if node == nullid:
1957 1955 raise error.RevlogError(_("%s: attempt to add null revision") %
1958 1956 self.indexfile)
1959 1957 if node == wdirid or node in wdirfilenodeids:
1960 1958 raise error.RevlogError(_("%s: attempt to add wdir revision") %
1961 1959 self.indexfile)
1962 1960
1963 1961 if self._inline:
1964 1962 fh = ifh
1965 1963 else:
1966 1964 fh = dfh
1967 1965
1968 1966 btext = [rawtext]
1969 1967
1970 1968 curr = len(self)
1971 1969 prev = curr - 1
1972 1970 offset = self.end(prev)
1973 1971 p1r, p2r = self.rev(p1), self.rev(p2)
1974 1972
1975 1973 # full versions are inserted when the needed deltas
1976 1974 # become comparable to the uncompressed text
1977 1975 if rawtext is None:
1978 1976 # need rawtext size, before changed by flag processors, which is
1979 1977 # the non-raw size. use revlog explicitly to avoid filelog's extra
1980 1978 # logic that might remove metadata size.
1981 1979 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
1982 1980 cachedelta[1])
1983 1981 else:
1984 1982 textlen = len(rawtext)
1985 1983
1986 1984 if deltacomputer is None:
1987 1985 deltacomputer = deltautil.deltacomputer(self)
1988 1986
1989 1987 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
1990 1988
1991 1989 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
1992 1990
1993 1991 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
1994 1992 deltainfo.base, link, p1r, p2r, node)
1995 1993 self.index.append(e)
1996 1994 self.nodemap[node] = curr
1997 1995
1998 1996 entry = self._io.packentry(e, self.node, self.version, curr)
1999 1997 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2000 1998 link, offset)
2001 1999
2002 2000 rawtext = btext[0]
2003 2001
2004 2002 if alwayscache and rawtext is None:
2005 2003 rawtext = deltacomputer.buildtext(revinfo, fh)
2006 2004
2007 2005 if type(rawtext) == bytes: # only accept immutable objects
2008 2006 self._cache = (node, curr, rawtext)
2009 2007 self._chainbasecache[curr] = deltainfo.chainbase
2010 2008 return node
2011 2009
2012 2010 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2013 2011 # Files opened in a+ mode have inconsistent behavior on various
2014 2012 # platforms. Windows requires that a file positioning call be made
2015 2013 # when the file handle transitions between reads and writes. See
2016 2014 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2017 2015 # platforms, Python or the platform itself can be buggy. Some versions
2018 2016 # of Solaris have been observed to not append at the end of the file
2019 2017 # if the file was seeked to before the end. See issue4943 for more.
2020 2018 #
2021 2019 # We work around this issue by inserting a seek() before writing.
2022 2020 # Note: This is likely not necessary on Python 3.
2023 2021 ifh.seek(0, os.SEEK_END)
2024 2022 if dfh:
2025 2023 dfh.seek(0, os.SEEK_END)
2026 2024
2027 2025 curr = len(self) - 1
2028 2026 if not self._inline:
2029 2027 transaction.add(self.datafile, offset)
2030 2028 transaction.add(self.indexfile, curr * len(entry))
2031 2029 if data[0]:
2032 2030 dfh.write(data[0])
2033 2031 dfh.write(data[1])
2034 2032 ifh.write(entry)
2035 2033 else:
2036 2034 offset += curr * self._io.size
2037 2035 transaction.add(self.indexfile, offset, curr)
2038 2036 ifh.write(entry)
2039 2037 ifh.write(data[0])
2040 2038 ifh.write(data[1])
2041 2039 self._enforceinlinesize(transaction, ifh)
2042 2040
2043 2041 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2044 2042 """
2045 2043 add a delta group
2046 2044
2047 2045 given a set of deltas, add them to the revision log. the
2048 2046 first delta is against its parent, which should be in our
2049 2047 log, the rest are against the previous delta.
2050 2048
2051 2049 If ``addrevisioncb`` is defined, it will be called with arguments of
2052 2050 this revlog and the node that was added.
2053 2051 """
2054 2052
2055 2053 nodes = []
2056 2054
2057 2055 r = len(self)
2058 2056 end = 0
2059 2057 if r:
2060 2058 end = self.end(r - 1)
2061 2059 ifh = self._indexfp("a+")
2062 2060 isize = r * self._io.size
2063 2061 if self._inline:
2064 2062 transaction.add(self.indexfile, end + isize, r)
2065 2063 dfh = None
2066 2064 else:
2067 2065 transaction.add(self.indexfile, isize, r)
2068 2066 transaction.add(self.datafile, end)
2069 2067 dfh = self._datafp("a+")
2070 2068 def flush():
2071 2069 if dfh:
2072 2070 dfh.flush()
2073 2071 ifh.flush()
2074 2072 try:
2075 2073 deltacomputer = deltautil.deltacomputer(self)
2076 2074 # loop through our set of deltas
2077 2075 for data in deltas:
2078 2076 node, p1, p2, linknode, deltabase, delta, flags = data
2079 2077 link = linkmapper(linknode)
2080 2078 flags = flags or REVIDX_DEFAULT_FLAGS
2081 2079
2082 2080 nodes.append(node)
2083 2081
2084 2082 if node in self.nodemap:
2085 2083 # this can happen if two branches make the same change
2086 2084 continue
2087 2085
2088 2086 for p in (p1, p2):
2089 2087 if p not in self.nodemap:
2090 2088 raise LookupError(p, self.indexfile,
2091 2089 _('unknown parent'))
2092 2090
2093 2091 if deltabase not in self.nodemap:
2094 2092 raise LookupError(deltabase, self.indexfile,
2095 2093 _('unknown delta base'))
2096 2094
2097 2095 baserev = self.rev(deltabase)
2098 2096
2099 2097 if baserev != nullrev and self.iscensored(baserev):
2100 2098 # if base is censored, delta must be full replacement in a
2101 2099 # single patch operation
2102 2100 hlen = struct.calcsize(">lll")
2103 2101 oldlen = self.rawsize(baserev)
2104 2102 newlen = len(delta) - hlen
2105 2103 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2106 2104 raise error.CensoredBaseError(self.indexfile,
2107 2105 self.node(baserev))
2108 2106
2109 2107 if not flags and self._peek_iscensored(baserev, delta, flush):
2110 2108 flags |= REVIDX_ISCENSORED
2111 2109
2112 2110 # We assume consumers of addrevisioncb will want to retrieve
2113 2111 # the added revision, which will require a call to
2114 2112 # revision(). revision() will fast path if there is a cache
2115 2113 # hit. So, we tell _addrevision() to always cache in this case.
2116 2114 # We're only using addgroup() in the context of changegroup
2117 2115 # generation so the revision data can always be handled as raw
2118 2116 # by the flagprocessor.
2119 2117 self._addrevision(node, None, transaction, link,
2120 2118 p1, p2, flags, (baserev, delta),
2121 2119 ifh, dfh,
2122 2120 alwayscache=bool(addrevisioncb),
2123 2121 deltacomputer=deltacomputer)
2124 2122
2125 2123 if addrevisioncb:
2126 2124 addrevisioncb(self, node)
2127 2125
2128 2126 if not dfh and not self._inline:
2129 2127 # addrevision switched from inline to conventional
2130 2128 # reopen the index
2131 2129 ifh.close()
2132 2130 dfh = self._datafp("a+")
2133 2131 ifh = self._indexfp("a+")
2134 2132 finally:
2135 2133 if dfh:
2136 2134 dfh.close()
2137 2135 ifh.close()
2138 2136
2139 2137 return nodes
2140 2138
2141 2139 def iscensored(self, rev):
2142 2140 """Check if a file revision is censored."""
2143 2141 if not self._censorable:
2144 2142 return False
2145 2143
2146 2144 return self.flags(rev) & REVIDX_ISCENSORED
2147 2145
2148 2146 def _peek_iscensored(self, baserev, delta, flush):
2149 2147 """Quickly check if a delta produces a censored revision."""
2150 2148 if not self._censorable:
2151 2149 return False
2152 2150
2153 2151 # Fragile heuristic: unless new file meta keys are added alphabetically
2154 2152 # preceding "censored", all censored revisions are prefixed by
2155 2153 # "\1\ncensored:". A delta producing such a censored revision must be a
2156 2154 # full-replacement delta, so we inspect the first and only patch in the
2157 2155 # delta for this prefix.
2158 2156 hlen = struct.calcsize(">lll")
2159 2157 if len(delta) <= hlen:
2160 2158 return False
2161 2159
2162 2160 oldlen = self.rawsize(baserev)
2163 2161 newlen = len(delta) - hlen
2164 2162 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2165 2163 return False
2166 2164
2167 2165 add = "\1\ncensored:"
2168 2166 addlen = len(add)
2169 2167 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2170 2168
2171 2169 def getstrippoint(self, minlink):
2172 2170 """find the minimum rev that must be stripped to strip the linkrev
2173 2171
2174 2172 Returns a tuple containing the minimum rev and a set of all revs that
2175 2173 have linkrevs that will be broken by this strip.
2176 2174 """
2177 2175 brokenrevs = set()
2178 2176 strippoint = len(self)
2179 2177
2180 2178 heads = {}
2181 2179 futurelargelinkrevs = set()
2182 2180 for head in self.headrevs():
2183 2181 headlinkrev = self.linkrev(head)
2184 2182 heads[head] = headlinkrev
2185 2183 if headlinkrev >= minlink:
2186 2184 futurelargelinkrevs.add(headlinkrev)
2187 2185
2188 2186 # This algorithm involves walking down the rev graph, starting at the
2189 2187 # heads. Since the revs are topologically sorted according to linkrev,
2190 2188 # once all head linkrevs are below the minlink, we know there are
2191 2189 # no more revs that could have a linkrev greater than minlink.
2192 2190 # So we can stop walking.
2193 2191 while futurelargelinkrevs:
2194 2192 strippoint -= 1
2195 2193 linkrev = heads.pop(strippoint)
2196 2194
2197 2195 if linkrev < minlink:
2198 2196 brokenrevs.add(strippoint)
2199 2197 else:
2200 2198 futurelargelinkrevs.remove(linkrev)
2201 2199
2202 2200 for p in self.parentrevs(strippoint):
2203 2201 if p != nullrev:
2204 2202 plinkrev = self.linkrev(p)
2205 2203 heads[p] = plinkrev
2206 2204 if plinkrev >= minlink:
2207 2205 futurelargelinkrevs.add(plinkrev)
2208 2206
2209 2207 return strippoint, brokenrevs
2210 2208
2211 2209 def strip(self, minlink, transaction):
2212 2210 """truncate the revlog on the first revision with a linkrev >= minlink
2213 2211
2214 2212 This function is called when we're stripping revision minlink and
2215 2213 its descendants from the repository.
2216 2214
2217 2215 We have to remove all revisions with linkrev >= minlink, because
2218 2216 the equivalent changelog revisions will be renumbered after the
2219 2217 strip.
2220 2218
2221 2219 So we truncate the revlog on the first of these revisions, and
2222 2220 trust that the caller has saved the revisions that shouldn't be
2223 2221 removed and that it'll re-add them after this truncation.
2224 2222 """
2225 2223 if len(self) == 0:
2226 2224 return
2227 2225
2228 2226 rev, _ = self.getstrippoint(minlink)
2229 2227 if rev == len(self):
2230 2228 return
2231 2229
2232 2230 # first truncate the files on disk
2233 2231 end = self.start(rev)
2234 2232 if not self._inline:
2235 2233 transaction.add(self.datafile, end)
2236 2234 end = rev * self._io.size
2237 2235 else:
2238 2236 end += rev * self._io.size
2239 2237
2240 2238 transaction.add(self.indexfile, end)
2241 2239
2242 2240 # then reset internal state in memory to forget those revisions
2243 2241 self._cache = None
2244 2242 self._chaininfocache = {}
2245 2243 self._chunkclear()
2246 2244 for x in pycompat.xrange(rev, len(self)):
2247 2245 del self.nodemap[self.node(x)]
2248 2246
2249 2247 del self.index[rev:-1]
2250 2248 self._nodepos = None
2251 2249
2252 2250 def checksize(self):
2253 2251 expected = 0
2254 2252 if len(self):
2255 2253 expected = max(0, self.end(len(self) - 1))
2256 2254
2257 2255 try:
2258 2256 with self._datafp() as f:
2259 2257 f.seek(0, 2)
2260 2258 actual = f.tell()
2261 2259 dd = actual - expected
2262 2260 except IOError as inst:
2263 2261 if inst.errno != errno.ENOENT:
2264 2262 raise
2265 2263 dd = 0
2266 2264
2267 2265 try:
2268 2266 f = self.opener(self.indexfile)
2269 2267 f.seek(0, 2)
2270 2268 actual = f.tell()
2271 2269 f.close()
2272 2270 s = self._io.size
2273 2271 i = max(0, actual // s)
2274 2272 di = actual - (i * s)
2275 2273 if self._inline:
2276 2274 databytes = 0
2277 2275 for r in self:
2278 2276 databytes += max(0, self.length(r))
2279 2277 dd = 0
2280 2278 di = actual - len(self) * s - databytes
2281 2279 except IOError as inst:
2282 2280 if inst.errno != errno.ENOENT:
2283 2281 raise
2284 2282 di = 0
2285 2283
2286 2284 return (dd, di)
2287 2285
2288 2286 def files(self):
2289 2287 res = [self.indexfile]
2290 2288 if not self._inline:
2291 2289 res.append(self.datafile)
2292 2290 return res
2293 2291
2294 2292 def emitrevisiondeltas(self, requests):
2295 2293 frev = self.rev
2296 2294
2297 2295 prevrev = None
2298 2296 for request in requests:
2299 2297 node = request.node
2300 2298 rev = frev(node)
2301 2299
2302 2300 if prevrev is None:
2303 2301 prevrev = self.index[rev][5]
2304 2302
2305 2303 # Requesting a full revision.
2306 2304 if request.basenode == nullid:
2307 2305 baserev = nullrev
2308 2306 # Requesting an explicit revision.
2309 2307 elif request.basenode is not None:
2310 2308 baserev = frev(request.basenode)
2311 2309 # Allowing us to choose.
2312 2310 else:
2313 2311 p1rev, p2rev = self.parentrevs(rev)
2314 2312 deltaparentrev = self.deltaparent(rev)
2315 2313
2316 2314 # Avoid sending full revisions when delta parent is null. Pick
2317 2315 # prev in that case. It's tempting to pick p1 in this case, as
2318 2316 # p1 will be smaller in the common case. However, computing a
2319 2317 # delta against p1 may require resolving the raw text of p1,
2320 2318 # which could be expensive. The revlog caches should have prev
2321 2319 # cached, meaning less CPU for delta generation. There is
2322 2320 # likely room to add a flag and/or config option to control this
2323 2321 # behavior.
2324 2322 if deltaparentrev == nullrev and self._storedeltachains:
2325 2323 baserev = prevrev
2326 2324
2327 2325 # Revlog is configured to use full snapshot for a reason.
2328 2326 # Stick to full snapshot.
2329 2327 elif deltaparentrev == nullrev:
2330 2328 baserev = nullrev
2331 2329
2332 2330 # Pick previous when we can't be sure the base is available
2333 2331 # on consumer.
2334 2332 elif deltaparentrev not in (p1rev, p2rev, prevrev):
2335 2333 baserev = prevrev
2336 2334 else:
2337 2335 baserev = deltaparentrev
2338 2336
2339 2337 if baserev != nullrev and not self.candelta(baserev, rev):
2340 2338 baserev = nullrev
2341 2339
2342 2340 revision = None
2343 2341 delta = None
2344 2342 baserevisionsize = None
2345 2343
2346 2344 if self.iscensored(baserev) or self.iscensored(rev):
2347 2345 try:
2348 2346 revision = self.revision(node, raw=True)
2349 2347 except error.CensoredNodeError as e:
2350 2348 revision = e.tombstone
2351 2349
2352 2350 if baserev != nullrev:
2353 2351 baserevisionsize = self.rawsize(baserev)
2354 2352
2355 2353 elif baserev == nullrev:
2356 2354 revision = self.revision(node, raw=True)
2357 2355 else:
2358 2356 delta = self.revdiff(baserev, rev)
2359 2357
2360 2358 extraflags = REVIDX_ELLIPSIS if request.ellipsis else 0
2361 2359
2362 2360 yield revlogrevisiondelta(
2363 2361 node=node,
2364 2362 p1node=request.p1node,
2365 2363 p2node=request.p2node,
2366 2364 linknode=request.linknode,
2367 2365 basenode=self.node(baserev),
2368 2366 flags=self.flags(rev) | extraflags,
2369 2367 baserevisionsize=baserevisionsize,
2370 2368 revision=revision,
2371 2369 delta=delta)
2372 2370
2373 2371 prevrev = rev
2374 2372
2375 2373 DELTAREUSEALWAYS = 'always'
2376 2374 DELTAREUSESAMEREVS = 'samerevs'
2377 2375 DELTAREUSENEVER = 'never'
2378 2376
2379 2377 DELTAREUSEFULLADD = 'fulladd'
2380 2378
2381 2379 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2382 2380
2383 2381 def clone(self, tr, destrevlog, addrevisioncb=None,
2384 2382 deltareuse=DELTAREUSESAMEREVS, deltabothparents=None):
2385 2383 """Copy this revlog to another, possibly with format changes.
2386 2384
2387 2385 The destination revlog will contain the same revisions and nodes.
2388 2386 However, it may not be bit-for-bit identical due to e.g. delta encoding
2389 2387 differences.
2390 2388
2391 2389 The ``deltareuse`` argument control how deltas from the existing revlog
2392 2390 are preserved in the destination revlog. The argument can have the
2393 2391 following values:
2394 2392
2395 2393 DELTAREUSEALWAYS
2396 2394 Deltas will always be reused (if possible), even if the destination
2397 2395 revlog would not select the same revisions for the delta. This is the
2398 2396 fastest mode of operation.
2399 2397 DELTAREUSESAMEREVS
2400 2398 Deltas will be reused if the destination revlog would pick the same
2401 2399 revisions for the delta. This mode strikes a balance between speed
2402 2400 and optimization.
2403 2401 DELTAREUSENEVER
2404 2402 Deltas will never be reused. This is the slowest mode of execution.
2405 2403 This mode can be used to recompute deltas (e.g. if the diff/delta
2406 2404 algorithm changes).
2407 2405
2408 2406 Delta computation can be slow, so the choice of delta reuse policy can
2409 2407 significantly affect run time.
2410 2408
2411 2409 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2412 2410 two extremes. Deltas will be reused if they are appropriate. But if the
2413 2411 delta could choose a better revision, it will do so. This means if you
2414 2412 are converting a non-generaldelta revlog to a generaldelta revlog,
2415 2413 deltas will be recomputed if the delta's parent isn't a parent of the
2416 2414 revision.
2417 2415
2418 2416 In addition to the delta policy, the ``deltabothparents`` argument
2419 2417 controls whether to compute deltas against both parents for merges.
2420 2418 By default, the current default is used.
2421 2419 """
2422 2420 if deltareuse not in self.DELTAREUSEALL:
2423 2421 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2424 2422
2425 2423 if len(destrevlog):
2426 2424 raise ValueError(_('destination revlog is not empty'))
2427 2425
2428 2426 if getattr(self, 'filteredrevs', None):
2429 2427 raise ValueError(_('source revlog has filtered revisions'))
2430 2428 if getattr(destrevlog, 'filteredrevs', None):
2431 2429 raise ValueError(_('destination revlog has filtered revisions'))
2432 2430
2433 2431 # lazydeltabase controls whether to reuse a cached delta, if possible.
2434 2432 oldlazydeltabase = destrevlog._lazydeltabase
2435 2433 oldamd = destrevlog._deltabothparents
2436 2434
2437 2435 try:
2438 2436 if deltareuse == self.DELTAREUSEALWAYS:
2439 2437 destrevlog._lazydeltabase = True
2440 2438 elif deltareuse == self.DELTAREUSESAMEREVS:
2441 2439 destrevlog._lazydeltabase = False
2442 2440
2443 2441 destrevlog._deltabothparents = deltabothparents or oldamd
2444 2442
2445 2443 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2446 2444 self.DELTAREUSESAMEREVS)
2447 2445
2448 2446 deltacomputer = deltautil.deltacomputer(destrevlog)
2449 2447 index = self.index
2450 2448 for rev in self:
2451 2449 entry = index[rev]
2452 2450
2453 2451 # Some classes override linkrev to take filtered revs into
2454 2452 # account. Use raw entry from index.
2455 2453 flags = entry[0] & 0xffff
2456 2454 linkrev = entry[4]
2457 2455 p1 = index[entry[5]][7]
2458 2456 p2 = index[entry[6]][7]
2459 2457 node = entry[7]
2460 2458
2461 2459 # (Possibly) reuse the delta from the revlog if allowed and
2462 2460 # the revlog chunk is a delta.
2463 2461 cachedelta = None
2464 2462 rawtext = None
2465 2463 if populatecachedelta:
2466 2464 dp = self.deltaparent(rev)
2467 2465 if dp != nullrev:
2468 2466 cachedelta = (dp, bytes(self._chunk(rev)))
2469 2467
2470 2468 if not cachedelta:
2471 2469 rawtext = self.revision(rev, raw=True)
2472 2470
2473 2471
2474 2472 if deltareuse == self.DELTAREUSEFULLADD:
2475 2473 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2476 2474 cachedelta=cachedelta,
2477 2475 node=node, flags=flags,
2478 2476 deltacomputer=deltacomputer)
2479 2477 else:
2480 2478 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2481 2479 checkambig=False)
2482 2480 dfh = None
2483 2481 if not destrevlog._inline:
2484 2482 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2485 2483 try:
2486 2484 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2487 2485 p2, flags, cachedelta, ifh, dfh,
2488 2486 deltacomputer=deltacomputer)
2489 2487 finally:
2490 2488 if dfh:
2491 2489 dfh.close()
2492 2490 ifh.close()
2493 2491
2494 2492 if addrevisioncb:
2495 2493 addrevisioncb(self, rev, node)
2496 2494 finally:
2497 2495 destrevlog._lazydeltabase = oldlazydeltabase
2498 2496 destrevlog._deltabothparents = oldamd
@@ -1,940 +1,938 b''
1 1 # revlogdeltas.py - Logic around delta computation for revlog
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2018 Octobus <contact@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """Helper class to compute deltas stored inside revlogs"""
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import collections
13 13 import heapq
14 14 import struct
15 15
16 16 # import stuff from node for others to import from revlog
17 17 from ..node import (
18 18 nullrev,
19 19 )
20 20 from ..i18n import _
21 21
22 22 from .constants import (
23 23 REVIDX_ISCENSORED,
24 24 REVIDX_RAWTEXT_CHANGING_FLAGS,
25 25 )
26 26
27 27 from ..thirdparty import (
28 28 attr,
29 29 )
30 30
31 31 from .. import (
32 32 error,
33 33 mdiff,
34 34 )
35 35
36 CensoredNodeError = error.CensoredNodeError
37
38 36 # maximum <delta-chain-data>/<revision-text-length> ratio
39 37 LIMIT_DELTA2TEXT = 2
40 38
41 39 class _testrevlog(object):
42 40 """minimalist fake revlog to use in doctests"""
43 41
44 42 def __init__(self, data, density=0.5, mingap=0):
45 43 """data is an list of revision payload boundaries"""
46 44 self._data = data
47 45 self._srdensitythreshold = density
48 46 self._srmingapsize = mingap
49 47
50 48 def start(self, rev):
51 49 if rev == 0:
52 50 return 0
53 51 return self._data[rev - 1]
54 52
55 53 def end(self, rev):
56 54 return self._data[rev]
57 55
58 56 def length(self, rev):
59 57 return self.end(rev) - self.start(rev)
60 58
61 59 def __len__(self):
62 60 return len(self._data)
63 61
64 62 def slicechunk(revlog, revs, deltainfo=None, targetsize=None):
65 63 """slice revs to reduce the amount of unrelated data to be read from disk.
66 64
67 65 ``revs`` is sliced into groups that should be read in one time.
68 66 Assume that revs are sorted.
69 67
70 68 The initial chunk is sliced until the overall density (payload/chunks-span
71 69 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
72 70 `revlog._srmingapsize` is skipped.
73 71
74 72 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
75 73 For consistency with other slicing choice, this limit won't go lower than
76 74 `revlog._srmingapsize`.
77 75
78 76 If individual revisions chunk are larger than this limit, they will still
79 77 be raised individually.
80 78
81 79 >>> revlog = _testrevlog([
82 80 ... 5, #00 (5)
83 81 ... 10, #01 (5)
84 82 ... 12, #02 (2)
85 83 ... 12, #03 (empty)
86 84 ... 27, #04 (15)
87 85 ... 31, #05 (4)
88 86 ... 31, #06 (empty)
89 87 ... 42, #07 (11)
90 88 ... 47, #08 (5)
91 89 ... 47, #09 (empty)
92 90 ... 48, #10 (1)
93 91 ... 51, #11 (3)
94 92 ... 74, #12 (23)
95 93 ... 85, #13 (11)
96 94 ... 86, #14 (1)
97 95 ... 91, #15 (5)
98 96 ... ])
99 97
100 98 >>> list(slicechunk(revlog, list(range(16))))
101 99 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
102 100 >>> list(slicechunk(revlog, [0, 15]))
103 101 [[0], [15]]
104 102 >>> list(slicechunk(revlog, [0, 11, 15]))
105 103 [[0], [11], [15]]
106 104 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
107 105 [[0], [11, 13, 15]]
108 106 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
109 107 [[1, 2], [5, 8, 10, 11], [14]]
110 108
111 109 Slicing with a maximum chunk size
112 110 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
113 111 [[0], [11], [13], [15]]
114 112 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
115 113 [[0], [11], [13, 15]]
116 114 """
117 115 if targetsize is not None:
118 116 targetsize = max(targetsize, revlog._srmingapsize)
119 117 # targetsize should not be specified when evaluating delta candidates:
120 118 # * targetsize is used to ensure we stay within specification when reading,
121 119 # * deltainfo is used to pick are good delta chain when writing.
122 120 if not (deltainfo is None or targetsize is None):
123 121 msg = 'cannot use `targetsize` with a `deltainfo`'
124 122 raise error.ProgrammingError(msg)
125 123 for chunk in _slicechunktodensity(revlog, revs,
126 124 deltainfo,
127 125 revlog._srdensitythreshold,
128 126 revlog._srmingapsize):
129 127 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
130 128 yield subchunk
131 129
132 130 def _slicechunktosize(revlog, revs, targetsize=None):
133 131 """slice revs to match the target size
134 132
135 133 This is intended to be used on chunk that density slicing selected by that
136 134 are still too large compared to the read garantee of revlog. This might
137 135 happens when "minimal gap size" interrupted the slicing or when chain are
138 136 built in a way that create large blocks next to each other.
139 137
140 138 >>> revlog = _testrevlog([
141 139 ... 3, #0 (3)
142 140 ... 5, #1 (2)
143 141 ... 6, #2 (1)
144 142 ... 8, #3 (2)
145 143 ... 8, #4 (empty)
146 144 ... 11, #5 (3)
147 145 ... 12, #6 (1)
148 146 ... 13, #7 (1)
149 147 ... 14, #8 (1)
150 148 ... ])
151 149
152 150 Cases where chunk is already small enough
153 151 >>> list(_slicechunktosize(revlog, [0], 3))
154 152 [[0]]
155 153 >>> list(_slicechunktosize(revlog, [6, 7], 3))
156 154 [[6, 7]]
157 155 >>> list(_slicechunktosize(revlog, [0], None))
158 156 [[0]]
159 157 >>> list(_slicechunktosize(revlog, [6, 7], None))
160 158 [[6, 7]]
161 159
162 160 cases where we need actual slicing
163 161 >>> list(_slicechunktosize(revlog, [0, 1], 3))
164 162 [[0], [1]]
165 163 >>> list(_slicechunktosize(revlog, [1, 3], 3))
166 164 [[1], [3]]
167 165 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
168 166 [[1, 2], [3]]
169 167 >>> list(_slicechunktosize(revlog, [3, 5], 3))
170 168 [[3], [5]]
171 169 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
172 170 [[3], [5]]
173 171 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
174 172 [[5], [6, 7, 8]]
175 173 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
176 174 [[0], [1, 2], [3], [5], [6, 7, 8]]
177 175
178 176 Case with too large individual chunk (must return valid chunk)
179 177 >>> list(_slicechunktosize(revlog, [0, 1], 2))
180 178 [[0], [1]]
181 179 >>> list(_slicechunktosize(revlog, [1, 3], 1))
182 180 [[1], [3]]
183 181 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
184 182 [[3], [5]]
185 183 """
186 184 assert targetsize is None or 0 <= targetsize
187 185 if targetsize is None or segmentspan(revlog, revs) <= targetsize:
188 186 yield revs
189 187 return
190 188
191 189 startrevidx = 0
192 190 startdata = revlog.start(revs[0])
193 191 endrevidx = 0
194 192 iterrevs = enumerate(revs)
195 193 next(iterrevs) # skip first rev.
196 194 for idx, r in iterrevs:
197 195 span = revlog.end(r) - startdata
198 196 if span <= targetsize:
199 197 endrevidx = idx
200 198 else:
201 199 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
202 200 if chunk:
203 201 yield chunk
204 202 startrevidx = idx
205 203 startdata = revlog.start(r)
206 204 endrevidx = idx
207 205 yield _trimchunk(revlog, revs, startrevidx)
208 206
209 207 def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
210 208 mingapsize=0):
211 209 """slice revs to reduce the amount of unrelated data to be read from disk.
212 210
213 211 ``revs`` is sliced into groups that should be read in one time.
214 212 Assume that revs are sorted.
215 213
216 214 ``deltainfo`` is a _deltainfo instance of a revision that we would append
217 215 to the top of the revlog.
218 216
219 217 The initial chunk is sliced until the overall density (payload/chunks-span
220 218 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
221 219 skipped.
222 220
223 221 >>> revlog = _testrevlog([
224 222 ... 5, #00 (5)
225 223 ... 10, #01 (5)
226 224 ... 12, #02 (2)
227 225 ... 12, #03 (empty)
228 226 ... 27, #04 (15)
229 227 ... 31, #05 (4)
230 228 ... 31, #06 (empty)
231 229 ... 42, #07 (11)
232 230 ... 47, #08 (5)
233 231 ... 47, #09 (empty)
234 232 ... 48, #10 (1)
235 233 ... 51, #11 (3)
236 234 ... 74, #12 (23)
237 235 ... 85, #13 (11)
238 236 ... 86, #14 (1)
239 237 ... 91, #15 (5)
240 238 ... ])
241 239
242 240 >>> list(_slicechunktodensity(revlog, list(range(16))))
243 241 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
244 242 >>> list(_slicechunktodensity(revlog, [0, 15]))
245 243 [[0], [15]]
246 244 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
247 245 [[0], [11], [15]]
248 246 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
249 247 [[0], [11, 13, 15]]
250 248 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
251 249 [[1, 2], [5, 8, 10, 11], [14]]
252 250 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
253 251 ... mingapsize=20))
254 252 [[1, 2, 3, 5, 8, 10, 11], [14]]
255 253 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
256 254 ... targetdensity=0.95))
257 255 [[1, 2], [5], [8, 10, 11], [14]]
258 256 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
259 257 ... targetdensity=0.95, mingapsize=12))
260 258 [[1, 2], [5, 8, 10, 11], [14]]
261 259 """
262 260 start = revlog.start
263 261 length = revlog.length
264 262
265 263 if len(revs) <= 1:
266 264 yield revs
267 265 return
268 266
269 267 nextrev = len(revlog)
270 268 nextoffset = revlog.end(nextrev - 1)
271 269
272 270 if deltainfo is None:
273 271 deltachainspan = segmentspan(revlog, revs)
274 272 chainpayload = sum(length(r) for r in revs)
275 273 else:
276 274 deltachainspan = deltainfo.distance
277 275 chainpayload = deltainfo.compresseddeltalen
278 276
279 277 if deltachainspan < mingapsize:
280 278 yield revs
281 279 return
282 280
283 281 readdata = deltachainspan
284 282
285 283 if deltachainspan:
286 284 density = chainpayload / float(deltachainspan)
287 285 else:
288 286 density = 1.0
289 287
290 288 if density >= targetdensity:
291 289 yield revs
292 290 return
293 291
294 292 if deltainfo is not None and deltainfo.deltalen:
295 293 revs = list(revs)
296 294 revs.append(nextrev)
297 295
298 296 # Store the gaps in a heap to have them sorted by decreasing size
299 297 gapsheap = []
300 298 heapq.heapify(gapsheap)
301 299 prevend = None
302 300 for i, rev in enumerate(revs):
303 301 if rev < nextrev:
304 302 revstart = start(rev)
305 303 revlen = length(rev)
306 304 else:
307 305 revstart = nextoffset
308 306 revlen = deltainfo.deltalen
309 307
310 308 # Skip empty revisions to form larger holes
311 309 if revlen == 0:
312 310 continue
313 311
314 312 if prevend is not None:
315 313 gapsize = revstart - prevend
316 314 # only consider holes that are large enough
317 315 if gapsize > mingapsize:
318 316 heapq.heappush(gapsheap, (-gapsize, i))
319 317
320 318 prevend = revstart + revlen
321 319
322 320 # Collect the indices of the largest holes until the density is acceptable
323 321 indicesheap = []
324 322 heapq.heapify(indicesheap)
325 323 while gapsheap and density < targetdensity:
326 324 oppgapsize, gapidx = heapq.heappop(gapsheap)
327 325
328 326 heapq.heappush(indicesheap, gapidx)
329 327
330 328 # the gap sizes are stored as negatives to be sorted decreasingly
331 329 # by the heap
332 330 readdata -= (-oppgapsize)
333 331 if readdata > 0:
334 332 density = chainpayload / float(readdata)
335 333 else:
336 334 density = 1.0
337 335
338 336 # Cut the revs at collected indices
339 337 previdx = 0
340 338 while indicesheap:
341 339 idx = heapq.heappop(indicesheap)
342 340
343 341 chunk = _trimchunk(revlog, revs, previdx, idx)
344 342 if chunk:
345 343 yield chunk
346 344
347 345 previdx = idx
348 346
349 347 chunk = _trimchunk(revlog, revs, previdx)
350 348 if chunk:
351 349 yield chunk
352 350
353 351 def _trimchunk(revlog, revs, startidx, endidx=None):
354 352 """returns revs[startidx:endidx] without empty trailing revs
355 353
356 354 Doctest Setup
357 355 >>> revlog = _testrevlog([
358 356 ... 5, #0
359 357 ... 10, #1
360 358 ... 12, #2
361 359 ... 12, #3 (empty)
362 360 ... 17, #4
363 361 ... 21, #5
364 362 ... 21, #6 (empty)
365 363 ... ])
366 364
367 365 Contiguous cases:
368 366 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
369 367 [0, 1, 2, 3, 4, 5]
370 368 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
371 369 [0, 1, 2, 3, 4]
372 370 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
373 371 [0, 1, 2]
374 372 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
375 373 [2]
376 374 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
377 375 [3, 4, 5]
378 376 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
379 377 [3, 4]
380 378
381 379 Discontiguous cases:
382 380 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
383 381 [1, 3, 5]
384 382 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
385 383 [1]
386 384 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
387 385 [3, 5]
388 386 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
389 387 [3, 5]
390 388 """
391 389 length = revlog.length
392 390
393 391 if endidx is None:
394 392 endidx = len(revs)
395 393
396 394 # If we have a non-emtpy delta candidate, there are nothing to trim
397 395 if revs[endidx - 1] < len(revlog):
398 396 # Trim empty revs at the end, except the very first revision of a chain
399 397 while (endidx > 1
400 398 and endidx > startidx
401 399 and length(revs[endidx - 1]) == 0):
402 400 endidx -= 1
403 401
404 402 return revs[startidx:endidx]
405 403
406 404 def segmentspan(revlog, revs, deltainfo=None):
407 405 """Get the byte span of a segment of revisions
408 406
409 407 revs is a sorted array of revision numbers
410 408
411 409 >>> revlog = _testrevlog([
412 410 ... 5, #0
413 411 ... 10, #1
414 412 ... 12, #2
415 413 ... 12, #3 (empty)
416 414 ... 17, #4
417 415 ... ])
418 416
419 417 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
420 418 17
421 419 >>> segmentspan(revlog, [0, 4])
422 420 17
423 421 >>> segmentspan(revlog, [3, 4])
424 422 5
425 423 >>> segmentspan(revlog, [1, 2, 3,])
426 424 7
427 425 >>> segmentspan(revlog, [1, 3])
428 426 7
429 427 """
430 428 if not revs:
431 429 return 0
432 430 if deltainfo is not None and len(revlog) <= revs[-1]:
433 431 if len(revs) == 1:
434 432 return deltainfo.deltalen
435 433 offset = revlog.end(len(revlog) - 1)
436 434 end = deltainfo.deltalen + offset
437 435 else:
438 436 end = revlog.end(revs[-1])
439 437 return end - revlog.start(revs[0])
440 438
441 439 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
442 440 """build full text from a (base, delta) pair and other metadata"""
443 441 # special case deltas which replace entire base; no need to decode
444 442 # base revision. this neatly avoids censored bases, which throw when
445 443 # they're decoded.
446 444 hlen = struct.calcsize(">lll")
447 445 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
448 446 len(delta) - hlen):
449 447 fulltext = delta[hlen:]
450 448 else:
451 449 # deltabase is rawtext before changed by flag processors, which is
452 450 # equivalent to non-raw text
453 451 basetext = revlog.revision(baserev, _df=fh, raw=False)
454 452 fulltext = mdiff.patch(basetext, delta)
455 453
456 454 try:
457 455 res = revlog._processflags(fulltext, flags, 'read', raw=True)
458 456 fulltext, validatehash = res
459 457 if validatehash:
460 458 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
461 459 if flags & REVIDX_ISCENSORED:
462 460 raise error.RevlogError(_('node %s is not censored') % expectednode)
463 except CensoredNodeError:
461 except error.CensoredNodeError:
464 462 # must pass the censored index flag to add censored revisions
465 463 if not flags & REVIDX_ISCENSORED:
466 464 raise
467 465 return fulltext
468 466
469 467 @attr.s(slots=True, frozen=True)
470 468 class _deltainfo(object):
471 469 distance = attr.ib()
472 470 deltalen = attr.ib()
473 471 data = attr.ib()
474 472 base = attr.ib()
475 473 chainbase = attr.ib()
476 474 chainlen = attr.ib()
477 475 compresseddeltalen = attr.ib()
478 476 snapshotdepth = attr.ib()
479 477
480 478 def isgooddeltainfo(revlog, deltainfo, revinfo):
481 479 """Returns True if the given delta is good. Good means that it is within
482 480 the disk span, disk size, and chain length bounds that we know to be
483 481 performant."""
484 482 if deltainfo is None:
485 483 return False
486 484
487 485 # - 'deltainfo.distance' is the distance from the base revision --
488 486 # bounding it limits the amount of I/O we need to do.
489 487 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
490 488 # deltas we need to apply -- bounding it limits the amount of CPU
491 489 # we consume.
492 490
493 491 if revlog._sparserevlog:
494 492 # As sparse-read will be used, we can consider that the distance,
495 493 # instead of being the span of the whole chunk,
496 494 # is the span of the largest read chunk
497 495 base = deltainfo.base
498 496
499 497 if base != nullrev:
500 498 deltachain = revlog._deltachain(base)[0]
501 499 else:
502 500 deltachain = []
503 501
504 502 # search for the first non-snapshot revision
505 503 for idx, r in enumerate(deltachain):
506 504 if not revlog.issnapshot(r):
507 505 break
508 506 deltachain = deltachain[idx:]
509 507 chunks = slicechunk(revlog, deltachain, deltainfo)
510 508 all_span = [segmentspan(revlog, revs, deltainfo)
511 509 for revs in chunks]
512 510 distance = max(all_span)
513 511 else:
514 512 distance = deltainfo.distance
515 513
516 514 textlen = revinfo.textlen
517 515 defaultmax = textlen * 4
518 516 maxdist = revlog._maxdeltachainspan
519 517 if not maxdist:
520 518 maxdist = distance # ensure the conditional pass
521 519 maxdist = max(maxdist, defaultmax)
522 520 if revlog._sparserevlog and maxdist < revlog._srmingapsize:
523 521 # In multiple place, we are ignoring irrelevant data range below a
524 522 # certain size. Be also apply this tradeoff here and relax span
525 523 # constraint for small enought content.
526 524 maxdist = revlog._srmingapsize
527 525
528 526 # Bad delta from read span:
529 527 #
530 528 # If the span of data read is larger than the maximum allowed.
531 529 if maxdist < distance:
532 530 return False
533 531
534 532 # Bad delta from new delta size:
535 533 #
536 534 # If the delta size is larger than the target text, storing the
537 535 # delta will be inefficient.
538 536 if textlen < deltainfo.deltalen:
539 537 return False
540 538
541 539 # Bad delta from cumulated payload size:
542 540 #
543 541 # If the sum of delta get larger than K * target text length.
544 542 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
545 543 return False
546 544
547 545 # Bad delta from chain length:
548 546 #
549 547 # If the number of delta in the chain gets too high.
550 548 if (revlog._maxchainlen
551 549 and revlog._maxchainlen < deltainfo.chainlen):
552 550 return False
553 551
554 552 # bad delta from intermediate snapshot size limit
555 553 #
556 554 # If an intermediate snapshot size is higher than the limit. The
557 555 # limit exist to prevent endless chain of intermediate delta to be
558 556 # created.
559 557 if (deltainfo.snapshotdepth is not None and
560 558 (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
561 559 return False
562 560
563 561 # bad delta if new intermediate snapshot is larger than the previous
564 562 # snapshot
565 563 if (deltainfo.snapshotdepth
566 564 and revlog.length(deltainfo.base) < deltainfo.deltalen):
567 565 return False
568 566
569 567 return True
570 568
571 569 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
572 570 """Provides group of revision to be tested as delta base
573 571
574 572 This top level function focus on emitting groups with unique and worthwhile
575 573 content. See _raw_candidate_groups for details about the group order.
576 574 """
577 575 # should we try to build a delta?
578 576 if not (len(revlog) and revlog._storedeltachains):
579 577 yield None
580 578 return
581 579
582 580 deltalength = revlog.length
583 581 deltaparent = revlog.deltaparent
584 582 good = None
585 583
586 584 deltas_limit = textlen * LIMIT_DELTA2TEXT
587 585
588 586 tested = set([nullrev])
589 587 candidates = _refinedgroups(revlog, p1, p2, cachedelta)
590 588 while True:
591 589 temptative = candidates.send(good)
592 590 if temptative is None:
593 591 break
594 592 group = []
595 593 for rev in temptative:
596 594 # skip over empty delta (no need to include them in a chain)
597 595 while not (rev == nullrev or rev in tested or deltalength(rev)):
598 596 tested.add(rev)
599 597 rev = deltaparent(rev)
600 598 # filter out revision we tested already
601 599 if rev in tested:
602 600 continue
603 601 tested.add(rev)
604 602 # filter out delta base that will never produce good delta
605 603 if deltas_limit < revlog.length(rev):
606 604 continue
607 605 # no need to try a delta against nullrev, this will be done as a
608 606 # last resort.
609 607 if rev == nullrev:
610 608 continue
611 609 # no delta for rawtext-changing revs (see "candelta" for why)
612 610 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
613 611 continue
614 612 group.append(rev)
615 613 if group:
616 614 # XXX: in the sparse revlog case, group can become large,
617 615 # impacting performances. Some bounding or slicing mecanism
618 616 # would help to reduce this impact.
619 617 good = yield tuple(group)
620 618 yield None
621 619
622 620 def _findsnapshots(revlog, cache, start_rev):
623 621 """find snapshot from start_rev to tip"""
624 622 deltaparent = revlog.deltaparent
625 623 issnapshot = revlog.issnapshot
626 624 for rev in revlog.revs(start_rev):
627 625 if issnapshot(rev):
628 626 cache[deltaparent(rev)].append(rev)
629 627
630 628 def _refinedgroups(revlog, p1, p2, cachedelta):
631 629 good = None
632 630 # First we try to reuse a the delta contained in the bundle.
633 631 # (or from the source revlog)
634 632 #
635 633 # This logic only applies to general delta repositories and can be disabled
636 634 # through configuration. Disabling reuse source delta is useful when
637 635 # we want to make sure we recomputed "optimal" deltas.
638 636 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
639 637 # Assume what we received from the server is a good choice
640 638 # build delta will reuse the cache
641 639 good = yield (cachedelta[0],)
642 640 if good is not None:
643 641 yield None
644 642 return
645 643 for candidates in _rawgroups(revlog, p1, p2, cachedelta):
646 644 good = yield candidates
647 645 if good is not None:
648 646 break
649 647
650 648 # if we have a refinable value, try to refine it
651 649 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
652 650 # refine snapshot down
653 651 previous = None
654 652 while previous != good:
655 653 previous = good
656 654 base = revlog.deltaparent(good)
657 655 if base == nullrev:
658 656 break
659 657 good = yield (base,)
660 658 # refine snapshot up
661 659 #
662 660 # XXX the _findsnapshots call can be expensive and is "duplicated" with
663 661 # the one done in `_rawgroups`. Once we start working on performance,
664 662 # we should make the two logics share this computation.
665 663 snapshots = collections.defaultdict(list)
666 664 _findsnapshots(revlog, snapshots, good + 1)
667 665 previous = None
668 666 while good != previous:
669 667 previous = good
670 668 children = tuple(sorted(c for c in snapshots[good]))
671 669 good = yield children
672 670
673 671 # we have found nothing
674 672 yield None
675 673
676 674 def _rawgroups(revlog, p1, p2, cachedelta):
677 675 """Provides group of revision to be tested as delta base
678 676
679 677 This lower level function focus on emitting delta theorically interresting
680 678 without looking it any practical details.
681 679
682 680 The group order aims at providing fast or small candidates first.
683 681 """
684 682 gdelta = revlog._generaldelta
685 683 sparse = revlog._sparserevlog
686 684 curr = len(revlog)
687 685 prev = curr - 1
688 686 deltachain = lambda rev: revlog._deltachain(rev)[0]
689 687
690 688 if gdelta:
691 689 # exclude already lazy tested base if any
692 690 parents = [p for p in (p1, p2) if p != nullrev]
693 691
694 692 if not revlog._deltabothparents and len(parents) == 2:
695 693 parents.sort()
696 694 # To minimize the chance of having to build a fulltext,
697 695 # pick first whichever parent is closest to us (max rev)
698 696 yield (parents[1],)
699 697 # then the other one (min rev) if the first did not fit
700 698 yield (parents[0],)
701 699 elif len(parents) > 0:
702 700 # Test all parents (1 or 2), and keep the best candidate
703 701 yield parents
704 702
705 703 if sparse and parents:
706 704 snapshots = collections.defaultdict(list) # map: base-rev: snapshot-rev
707 705 # See if we can use an existing snapshot in the parent chains to use as
708 706 # a base for a new intermediate-snapshot
709 707 #
710 708 # search for snapshot in parents delta chain
711 709 # map: snapshot-level: snapshot-rev
712 710 parents_snaps = collections.defaultdict(set)
713 711 candidate_chains = [deltachain(p) for p in parents]
714 712 for chain in candidate_chains:
715 713 for idx, s in enumerate(chain):
716 714 if not revlog.issnapshot(s):
717 715 break
718 716 parents_snaps[idx].add(s)
719 717 snapfloor = min(parents_snaps[0]) + 1
720 718 _findsnapshots(revlog, snapshots, snapfloor)
721 719 # search for the highest "unrelated" revision
722 720 #
723 721 # Adding snapshots used by "unrelated" revision increase the odd we
724 722 # reuse an independant, yet better snapshot chain.
725 723 #
726 724 # XXX instead of building a set of revisions, we could lazily enumerate
727 725 # over the chains. That would be more efficient, however we stick to
728 726 # simple code for now.
729 727 all_revs = set()
730 728 for chain in candidate_chains:
731 729 all_revs.update(chain)
732 730 other = None
733 731 for r in revlog.revs(prev, snapfloor):
734 732 if r not in all_revs:
735 733 other = r
736 734 break
737 735 if other is not None:
738 736 # To avoid unfair competition, we won't use unrelated intermediate
739 737 # snapshot that are deeper than the ones from the parent delta
740 738 # chain.
741 739 max_depth = max(parents_snaps.keys())
742 740 chain = deltachain(other)
743 741 for idx, s in enumerate(chain):
744 742 if s < snapfloor:
745 743 continue
746 744 if max_depth < idx:
747 745 break
748 746 if not revlog.issnapshot(s):
749 747 break
750 748 parents_snaps[idx].add(s)
751 749 # Test them as possible intermediate snapshot base
752 750 # We test them from highest to lowest level. High level one are more
753 751 # likely to result in small delta
754 752 floor = None
755 753 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
756 754 siblings = set()
757 755 for s in snaps:
758 756 siblings.update(snapshots[s])
759 757 # Before considering making a new intermediate snapshot, we check
760 758 # if an existing snapshot, children of base we consider, would be
761 759 # suitable.
762 760 #
763 761 # It give a change to reuse a delta chain "unrelated" to the
764 762 # current revision instead of starting our own. Without such
765 763 # re-use, topological branches would keep reopening new chains.
766 764 # Creating more and more snapshot as the repository grow.
767 765
768 766 if floor is not None:
769 767 # We only do this for siblings created after the one in our
770 768 # parent's delta chain. Those created before has less chances
771 769 # to be valid base since our ancestors had to create a new
772 770 # snapshot.
773 771 siblings = [r for r in siblings if floor < r]
774 772 yield tuple(sorted(siblings))
775 773 # then test the base from our parent's delta chain.
776 774 yield tuple(sorted(snaps))
777 775 floor = min(snaps)
778 776 # No suitable base found in the parent chain, search if any full
779 777 # snapshots emitted since parent's base would be a suitable base for an
780 778 # intermediate snapshot.
781 779 #
782 780 # It give a chance to reuse a delta chain unrelated to the current
783 781 # revisions instead of starting our own. Without such re-use,
784 782 # topological branches would keep reopening new full chains. Creating
785 783 # more and more snapshot as the repository grow.
786 784 yield tuple(snapshots[nullrev])
787 785
788 786 if not sparse:
789 787 # other approach failed try against prev to hopefully save us a
790 788 # fulltext.
791 789 yield (prev,)
792 790
793 791 class deltacomputer(object):
794 792 def __init__(self, revlog):
795 793 self.revlog = revlog
796 794
797 795 def buildtext(self, revinfo, fh):
798 796 """Builds a fulltext version of a revision
799 797
800 798 revinfo: _revisioninfo instance that contains all needed info
801 799 fh: file handle to either the .i or the .d revlog file,
802 800 depending on whether it is inlined or not
803 801 """
804 802 btext = revinfo.btext
805 803 if btext[0] is not None:
806 804 return btext[0]
807 805
808 806 revlog = self.revlog
809 807 cachedelta = revinfo.cachedelta
810 808 baserev = cachedelta[0]
811 809 delta = cachedelta[1]
812 810
813 811 fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta,
814 812 revinfo.p1, revinfo.p2,
815 813 revinfo.flags, revinfo.node)
816 814 return fulltext
817 815
818 816 def _builddeltadiff(self, base, revinfo, fh):
819 817 revlog = self.revlog
820 818 t = self.buildtext(revinfo, fh)
821 819 if revlog.iscensored(base):
822 820 # deltas based on a censored revision must replace the
823 821 # full content in one patch, so delta works everywhere
824 822 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
825 823 delta = header + t
826 824 else:
827 825 ptext = revlog.revision(base, _df=fh, raw=True)
828 826 delta = mdiff.textdiff(ptext, t)
829 827
830 828 return delta
831 829
832 830 def _builddeltainfo(self, revinfo, base, fh):
833 831 # can we use the cached delta?
834 832 delta = None
835 833 if revinfo.cachedelta:
836 834 cachebase, cachediff = revinfo.cachedelta
837 835 #check if the diff still apply
838 836 currentbase = cachebase
839 837 while (currentbase != nullrev
840 838 and currentbase != base
841 839 and self.revlog.length(currentbase) == 0):
842 840 currentbase = self.revlog.deltaparent(currentbase)
843 841 if currentbase == base:
844 842 delta = revinfo.cachedelta[1]
845 843 if delta is None:
846 844 delta = self._builddeltadiff(base, revinfo, fh)
847 845 revlog = self.revlog
848 846 header, data = revlog.compress(delta)
849 847 deltalen = len(header) + len(data)
850 848 chainbase = revlog.chainbase(base)
851 849 offset = revlog.end(len(revlog) - 1)
852 850 dist = deltalen + offset - revlog.start(chainbase)
853 851 if revlog._generaldelta:
854 852 deltabase = base
855 853 else:
856 854 deltabase = chainbase
857 855 chainlen, compresseddeltalen = revlog._chaininfo(base)
858 856 chainlen += 1
859 857 compresseddeltalen += deltalen
860 858
861 859 revlog = self.revlog
862 860 snapshotdepth = None
863 861 if deltabase == nullrev:
864 862 snapshotdepth = 0
865 863 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
866 864 # A delta chain should always be one full snapshot,
867 865 # zero or more semi-snapshots, and zero or more deltas
868 866 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
869 867 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
870 868 snapshotdepth = len(revlog._deltachain(deltabase)[0])
871 869
872 870 return _deltainfo(dist, deltalen, (header, data), deltabase,
873 871 chainbase, chainlen, compresseddeltalen,
874 872 snapshotdepth)
875 873
876 874 def _fullsnapshotinfo(self, fh, revinfo):
877 875 curr = len(self.revlog)
878 876 rawtext = self.buildtext(revinfo, fh)
879 877 data = self.revlog.compress(rawtext)
880 878 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
881 879 deltabase = chainbase = curr
882 880 snapshotdepth = 0
883 881 chainlen = 1
884 882
885 883 return _deltainfo(dist, deltalen, data, deltabase,
886 884 chainbase, chainlen, compresseddeltalen,
887 885 snapshotdepth)
888 886
889 887 def finddeltainfo(self, revinfo, fh):
890 888 """Find an acceptable delta against a candidate revision
891 889
892 890 revinfo: information about the revision (instance of _revisioninfo)
893 891 fh: file handle to either the .i or the .d revlog file,
894 892 depending on whether it is inlined or not
895 893
896 894 Returns the first acceptable candidate revision, as ordered by
897 895 _candidategroups
898 896
899 897 If no suitable deltabase is found, we return delta info for a full
900 898 snapshot.
901 899 """
902 900 if not revinfo.textlen:
903 901 return self._fullsnapshotinfo(fh, revinfo)
904 902
905 903 # no delta for flag processor revision (see "candelta" for why)
906 904 # not calling candelta since only one revision needs test, also to
907 905 # avoid overhead fetching flags again.
908 906 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
909 907 return self._fullsnapshotinfo(fh, revinfo)
910 908
911 909 cachedelta = revinfo.cachedelta
912 910 p1 = revinfo.p1
913 911 p2 = revinfo.p2
914 912 revlog = self.revlog
915 913
916 914 deltainfo = None
917 915 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
918 916 groups = _candidategroups(self.revlog, revinfo.textlen,
919 917 p1r, p2r, cachedelta)
920 918 candidaterevs = next(groups)
921 919 while candidaterevs is not None:
922 920 nominateddeltas = []
923 921 if deltainfo is not None:
924 922 # if we already found a good delta,
925 923 # challenge it against refined candidates
926 924 nominateddeltas.append(deltainfo)
927 925 for candidaterev in candidaterevs:
928 926 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
929 927 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
930 928 nominateddeltas.append(candidatedelta)
931 929 if nominateddeltas:
932 930 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
933 931 if deltainfo is not None:
934 932 candidaterevs = groups.send(deltainfo.base)
935 933 else:
936 934 candidaterevs = next(groups)
937 935
938 936 if deltainfo is None:
939 937 deltainfo = self._fullsnapshotinfo(fh, revinfo)
940 938 return deltainfo
General Comments 0
You need to be logged in to leave comments. Login now