##// END OF EJS Templates
revlog: skip over empty revision when looking for delta base...
Boris Feld -
r39118:8f83a953 default
parent child Browse files
Show More
@@ -1,2964 +1,2974
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import hashlib
20 20 import heapq
21 21 import os
22 22 import re
23 23 import struct
24 24 import zlib
25 25
26 26 # import stuff from node for others to import from revlog
27 27 from .node import (
28 28 bin,
29 29 hex,
30 30 nullid,
31 31 nullrev,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .thirdparty import (
39 39 attr,
40 40 )
41 41 from . import (
42 42 ancestor,
43 43 error,
44 44 mdiff,
45 45 policy,
46 46 pycompat,
47 47 templatefilters,
48 48 util,
49 49 )
50 50 from .utils import (
51 51 stringutil,
52 52 )
53 53
54 54 parsers = policy.importmod(r'parsers')
55 55
56 56 # Aliased for performance.
57 57 _zlibdecompress = zlib.decompress
58 58
59 59 # revlog header flags
60 60 REVLOGV0 = 0
61 61 REVLOGV1 = 1
62 62 # Dummy value until file format is finalized.
63 63 # Reminder: change the bounds check in revlog.__init__ when this is changed.
64 64 REVLOGV2 = 0xDEAD
65 65 FLAG_INLINE_DATA = (1 << 16)
66 66 FLAG_GENERALDELTA = (1 << 17)
67 67 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
68 68 REVLOG_DEFAULT_FORMAT = REVLOGV1
69 69 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
70 70 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
71 71 REVLOGV2_FLAGS = REVLOGV1_FLAGS
72 72
73 73 # revlog index flags
74 74 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
75 75 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
76 76 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
77 77 REVIDX_DEFAULT_FLAGS = 0
78 78 # stable order in which flags need to be processed and their processors applied
79 79 REVIDX_FLAGS_ORDER = [
80 80 REVIDX_ISCENSORED,
81 81 REVIDX_ELLIPSIS,
82 82 REVIDX_EXTSTORED,
83 83 ]
84 84 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
85 85 # bitmark for flags that could cause rawdata content change
86 86 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
87 87
88 88 # max size of revlog with inline data
89 89 _maxinline = 131072
90 90 _chunksize = 1048576
91 91
92 92 RevlogError = error.RevlogError
93 93 LookupError = error.LookupError
94 94 AmbiguousPrefixLookupError = error.AmbiguousPrefixLookupError
95 95 CensoredNodeError = error.CensoredNodeError
96 96 ProgrammingError = error.ProgrammingError
97 97
98 98 # Store flag processors (cf. 'addflagprocessor()' to register)
99 99 _flagprocessors = {
100 100 REVIDX_ISCENSORED: None,
101 101 }
102 102
103 103 _mdre = re.compile('\1\n')
104 104 def parsemeta(text):
105 105 """return (metadatadict, metadatasize)"""
106 106 # text can be buffer, so we can't use .startswith or .index
107 107 if text[:2] != '\1\n':
108 108 return None, None
109 109 s = _mdre.search(text, 2).start()
110 110 mtext = text[2:s]
111 111 meta = {}
112 112 for l in mtext.splitlines():
113 113 k, v = l.split(": ", 1)
114 114 meta[k] = v
115 115 return meta, (s + 2)
116 116
117 117 def packmeta(meta, text):
118 118 keys = sorted(meta)
119 119 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
120 120 return "\1\n%s\1\n%s" % (metatext, text)
121 121
122 122 def _censoredtext(text):
123 123 m, offs = parsemeta(text)
124 124 return m and "censored" in m
125 125
126 126 def addflagprocessor(flag, processor):
127 127 """Register a flag processor on a revision data flag.
128 128
129 129 Invariant:
130 130 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
131 131 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
132 132 - Only one flag processor can be registered on a specific flag.
133 133 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
134 134 following signatures:
135 135 - (read) f(self, rawtext) -> text, bool
136 136 - (write) f(self, text) -> rawtext, bool
137 137 - (raw) f(self, rawtext) -> bool
138 138 "text" is presented to the user. "rawtext" is stored in revlog data, not
139 139 directly visible to the user.
140 140 The boolean returned by these transforms is used to determine whether
141 141 the returned text can be used for hash integrity checking. For example,
142 142 if "write" returns False, then "text" is used to generate hash. If
143 143 "write" returns True, that basically means "rawtext" returned by "write"
144 144 should be used to generate hash. Usually, "write" and "read" return
145 145 different booleans. And "raw" returns a same boolean as "write".
146 146
147 147 Note: The 'raw' transform is used for changegroup generation and in some
148 148 debug commands. In this case the transform only indicates whether the
149 149 contents can be used for hash integrity checks.
150 150 """
151 151 if not flag & REVIDX_KNOWN_FLAGS:
152 152 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
153 153 raise ProgrammingError(msg)
154 154 if flag not in REVIDX_FLAGS_ORDER:
155 155 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
156 156 raise ProgrammingError(msg)
157 157 if flag in _flagprocessors:
158 158 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
159 159 raise error.Abort(msg)
160 160 _flagprocessors[flag] = processor
161 161
162 162 def getoffset(q):
163 163 return int(q >> 16)
164 164
165 165 def gettype(q):
166 166 return int(q & 0xFFFF)
167 167
168 168 def offset_type(offset, type):
169 169 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
170 170 raise ValueError('unknown revlog index flags')
171 171 return int(int(offset) << 16 | type)
172 172
173 173 _nullhash = hashlib.sha1(nullid)
174 174
175 175 def hash(text, p1, p2):
176 176 """generate a hash from the given text and its parent hashes
177 177
178 178 This hash combines both the current file contents and its history
179 179 in a manner that makes it easy to distinguish nodes with the same
180 180 content in the revision graph.
181 181 """
182 182 # As of now, if one of the parent node is null, p2 is null
183 183 if p2 == nullid:
184 184 # deep copy of a hash is faster than creating one
185 185 s = _nullhash.copy()
186 186 s.update(p1)
187 187 else:
188 188 # none of the parent nodes are nullid
189 189 if p1 < p2:
190 190 a = p1
191 191 b = p2
192 192 else:
193 193 a = p2
194 194 b = p1
195 195 s = hashlib.sha1(a)
196 196 s.update(b)
197 197 s.update(text)
198 198 return s.digest()
199 199
200 200 class _testrevlog(object):
201 201 """minimalist fake revlog to use in doctests"""
202 202
203 203 def __init__(self, data, density=0.5, mingap=0):
204 204 """data is an list of revision payload boundaries"""
205 205 self._data = data
206 206 self._srdensitythreshold = density
207 207 self._srmingapsize = mingap
208 208
209 209 def start(self, rev):
210 210 if rev == 0:
211 211 return 0
212 212 return self._data[rev - 1]
213 213
214 214 def end(self, rev):
215 215 return self._data[rev]
216 216
217 217 def length(self, rev):
218 218 return self.end(rev) - self.start(rev)
219 219
220 220 def __len__(self):
221 221 return len(self._data)
222 222
223 223 def _trimchunk(revlog, revs, startidx, endidx=None):
224 224 """returns revs[startidx:endidx] without empty trailing revs
225 225
226 226 Doctest Setup
227 227 >>> revlog = _testrevlog([
228 228 ... 5, #0
229 229 ... 10, #1
230 230 ... 12, #2
231 231 ... 12, #3 (empty)
232 232 ... 17, #4
233 233 ... 21, #5
234 234 ... 21, #6 (empty)
235 235 ... ])
236 236
237 237 Contiguous cases:
238 238 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
239 239 [0, 1, 2, 3, 4, 5]
240 240 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
241 241 [0, 1, 2, 3, 4]
242 242 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
243 243 [0, 1, 2]
244 244 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
245 245 [2]
246 246 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
247 247 [3, 4, 5]
248 248 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
249 249 [3, 4]
250 250
251 251 Discontiguous cases:
252 252 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
253 253 [1, 3, 5]
254 254 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
255 255 [1]
256 256 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
257 257 [3, 5]
258 258 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
259 259 [3, 5]
260 260 """
261 261 length = revlog.length
262 262
263 263 if endidx is None:
264 264 endidx = len(revs)
265 265
266 266 # Trim empty revs at the end, but never the very first revision of a chain
267 267 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
268 268 endidx -= 1
269 269
270 270 return revs[startidx:endidx]
271 271
272 272 def _segmentspan(revlog, revs):
273 273 """Get the byte span of a segment of revisions
274 274
275 275 revs is a sorted array of revision numbers
276 276
277 277 >>> revlog = _testrevlog([
278 278 ... 5, #0
279 279 ... 10, #1
280 280 ... 12, #2
281 281 ... 12, #3 (empty)
282 282 ... 17, #4
283 283 ... ])
284 284
285 285 >>> _segmentspan(revlog, [0, 1, 2, 3, 4])
286 286 17
287 287 >>> _segmentspan(revlog, [0, 4])
288 288 17
289 289 >>> _segmentspan(revlog, [3, 4])
290 290 5
291 291 >>> _segmentspan(revlog, [1, 2, 3,])
292 292 7
293 293 >>> _segmentspan(revlog, [1, 3])
294 294 7
295 295 """
296 296 if not revs:
297 297 return 0
298 298 return revlog.end(revs[-1]) - revlog.start(revs[0])
299 299
300 300 def _slicechunk(revlog, revs, deltainfo=None, targetsize=None):
301 301 """slice revs to reduce the amount of unrelated data to be read from disk.
302 302
303 303 ``revs`` is sliced into groups that should be read in one time.
304 304 Assume that revs are sorted.
305 305
306 306 The initial chunk is sliced until the overall density (payload/chunks-span
307 307 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
308 308 `revlog._srmingapsize` is skipped.
309 309
310 310 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
311 311 For consistency with other slicing choice, this limit won't go lower than
312 312 `revlog._srmingapsize`.
313 313
314 314 If individual revisions chunk are larger than this limit, they will still
315 315 be raised individually.
316 316
317 317 >>> revlog = _testrevlog([
318 318 ... 5, #00 (5)
319 319 ... 10, #01 (5)
320 320 ... 12, #02 (2)
321 321 ... 12, #03 (empty)
322 322 ... 27, #04 (15)
323 323 ... 31, #05 (4)
324 324 ... 31, #06 (empty)
325 325 ... 42, #07 (11)
326 326 ... 47, #08 (5)
327 327 ... 47, #09 (empty)
328 328 ... 48, #10 (1)
329 329 ... 51, #11 (3)
330 330 ... 74, #12 (23)
331 331 ... 85, #13 (11)
332 332 ... 86, #14 (1)
333 333 ... 91, #15 (5)
334 334 ... ])
335 335
336 336 >>> list(_slicechunk(revlog, list(range(16))))
337 337 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
338 338 >>> list(_slicechunk(revlog, [0, 15]))
339 339 [[0], [15]]
340 340 >>> list(_slicechunk(revlog, [0, 11, 15]))
341 341 [[0], [11], [15]]
342 342 >>> list(_slicechunk(revlog, [0, 11, 13, 15]))
343 343 [[0], [11, 13, 15]]
344 344 >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
345 345 [[1, 2], [5, 8, 10, 11], [14]]
346 346
347 347 Slicing with a maximum chunk size
348 348 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
349 349 [[0], [11], [13], [15]]
350 350 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
351 351 [[0], [11], [13, 15]]
352 352 """
353 353 if targetsize is not None:
354 354 targetsize = max(targetsize, revlog._srmingapsize)
355 355 # targetsize should not be specified when evaluating delta candidates:
356 356 # * targetsize is used to ensure we stay within specification when reading,
357 357 # * deltainfo is used to pick are good delta chain when writing.
358 358 if not (deltainfo is None or targetsize is None):
359 359 msg = 'cannot use `targetsize` with a `deltainfo`'
360 360 raise error.ProgrammingError(msg)
361 361 for chunk in _slicechunktodensity(revlog, revs,
362 362 deltainfo,
363 363 revlog._srdensitythreshold,
364 364 revlog._srmingapsize):
365 365 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
366 366 yield subchunk
367 367
368 368 def _slicechunktosize(revlog, revs, targetsize=None):
369 369 """slice revs to match the target size
370 370
371 371 This is intended to be used on chunk that density slicing selected by that
372 372 are still too large compared to the read garantee of revlog. This might
373 373 happens when "minimal gap size" interrupted the slicing or when chain are
374 374 built in a way that create large blocks next to each other.
375 375
376 376 >>> revlog = _testrevlog([
377 377 ... 3, #0 (3)
378 378 ... 5, #1 (2)
379 379 ... 6, #2 (1)
380 380 ... 8, #3 (2)
381 381 ... 8, #4 (empty)
382 382 ... 11, #5 (3)
383 383 ... 12, #6 (1)
384 384 ... 13, #7 (1)
385 385 ... 14, #8 (1)
386 386 ... ])
387 387
388 388 Cases where chunk is already small enough
389 389 >>> list(_slicechunktosize(revlog, [0], 3))
390 390 [[0]]
391 391 >>> list(_slicechunktosize(revlog, [6, 7], 3))
392 392 [[6, 7]]
393 393 >>> list(_slicechunktosize(revlog, [0], None))
394 394 [[0]]
395 395 >>> list(_slicechunktosize(revlog, [6, 7], None))
396 396 [[6, 7]]
397 397
398 398 cases where we need actual slicing
399 399 >>> list(_slicechunktosize(revlog, [0, 1], 3))
400 400 [[0], [1]]
401 401 >>> list(_slicechunktosize(revlog, [1, 3], 3))
402 402 [[1], [3]]
403 403 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
404 404 [[1, 2], [3]]
405 405 >>> list(_slicechunktosize(revlog, [3, 5], 3))
406 406 [[3], [5]]
407 407 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
408 408 [[3], [5]]
409 409 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
410 410 [[5], [6, 7, 8]]
411 411 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
412 412 [[0], [1, 2], [3], [5], [6, 7, 8]]
413 413
414 414 Case with too large individual chunk (must return valid chunk)
415 415 >>> list(_slicechunktosize(revlog, [0, 1], 2))
416 416 [[0], [1]]
417 417 >>> list(_slicechunktosize(revlog, [1, 3], 1))
418 418 [[1], [3]]
419 419 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
420 420 [[3], [5]]
421 421 """
422 422 assert targetsize is None or 0 <= targetsize
423 423 if targetsize is None or _segmentspan(revlog, revs) <= targetsize:
424 424 yield revs
425 425 return
426 426
427 427 startrevidx = 0
428 428 startdata = revlog.start(revs[0])
429 429 endrevidx = 0
430 430 iterrevs = enumerate(revs)
431 431 next(iterrevs) # skip first rev.
432 432 for idx, r in iterrevs:
433 433 span = revlog.end(r) - startdata
434 434 if span <= targetsize:
435 435 endrevidx = idx
436 436 else:
437 437 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
438 438 if chunk:
439 439 yield chunk
440 440 startrevidx = idx
441 441 startdata = revlog.start(r)
442 442 endrevidx = idx
443 443 yield _trimchunk(revlog, revs, startrevidx)
444 444
445 445 def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
446 446 mingapsize=0):
447 447 """slice revs to reduce the amount of unrelated data to be read from disk.
448 448
449 449 ``revs`` is sliced into groups that should be read in one time.
450 450 Assume that revs are sorted.
451 451
452 452 ``deltainfo`` is a _deltainfo instance of a revision that we would append
453 453 to the top of the revlog.
454 454
455 455 The initial chunk is sliced until the overall density (payload/chunks-span
456 456 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
457 457 skipped.
458 458
459 459 >>> revlog = _testrevlog([
460 460 ... 5, #00 (5)
461 461 ... 10, #01 (5)
462 462 ... 12, #02 (2)
463 463 ... 12, #03 (empty)
464 464 ... 27, #04 (15)
465 465 ... 31, #05 (4)
466 466 ... 31, #06 (empty)
467 467 ... 42, #07 (11)
468 468 ... 47, #08 (5)
469 469 ... 47, #09 (empty)
470 470 ... 48, #10 (1)
471 471 ... 51, #11 (3)
472 472 ... 74, #12 (23)
473 473 ... 85, #13 (11)
474 474 ... 86, #14 (1)
475 475 ... 91, #15 (5)
476 476 ... ])
477 477
478 478 >>> list(_slicechunktodensity(revlog, list(range(16))))
479 479 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
480 480 >>> list(_slicechunktodensity(revlog, [0, 15]))
481 481 [[0], [15]]
482 482 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
483 483 [[0], [11], [15]]
484 484 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
485 485 [[0], [11, 13, 15]]
486 486 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
487 487 [[1, 2], [5, 8, 10, 11], [14]]
488 488 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
489 489 ... mingapsize=20))
490 490 [[1, 2, 3, 5, 8, 10, 11], [14]]
491 491 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
492 492 ... targetdensity=0.95))
493 493 [[1, 2], [5], [8, 10, 11], [14]]
494 494 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
495 495 ... targetdensity=0.95, mingapsize=12))
496 496 [[1, 2], [5, 8, 10, 11], [14]]
497 497 """
498 498 start = revlog.start
499 499 length = revlog.length
500 500
501 501 if len(revs) <= 1:
502 502 yield revs
503 503 return
504 504
505 505 nextrev = len(revlog)
506 506 nextoffset = revlog.end(nextrev - 1)
507 507
508 508 if deltainfo is None:
509 509 deltachainspan = _segmentspan(revlog, revs)
510 510 chainpayload = sum(length(r) for r in revs)
511 511 else:
512 512 deltachainspan = deltainfo.distance
513 513 chainpayload = deltainfo.compresseddeltalen
514 514
515 515 if deltachainspan < mingapsize:
516 516 yield revs
517 517 return
518 518
519 519 readdata = deltachainspan
520 520
521 521 if deltachainspan:
522 522 density = chainpayload / float(deltachainspan)
523 523 else:
524 524 density = 1.0
525 525
526 526 if density >= targetdensity:
527 527 yield revs
528 528 return
529 529
530 530 if deltainfo is not None:
531 531 revs = list(revs)
532 532 revs.append(nextrev)
533 533
534 534 # Store the gaps in a heap to have them sorted by decreasing size
535 535 gapsheap = []
536 536 heapq.heapify(gapsheap)
537 537 prevend = None
538 538 for i, rev in enumerate(revs):
539 539 if rev < nextrev:
540 540 revstart = start(rev)
541 541 revlen = length(rev)
542 542 else:
543 543 revstart = nextoffset
544 544 revlen = deltainfo.deltalen
545 545
546 546 # Skip empty revisions to form larger holes
547 547 if revlen == 0:
548 548 continue
549 549
550 550 if prevend is not None:
551 551 gapsize = revstart - prevend
552 552 # only consider holes that are large enough
553 553 if gapsize > mingapsize:
554 554 heapq.heappush(gapsheap, (-gapsize, i))
555 555
556 556 prevend = revstart + revlen
557 557
558 558 # Collect the indices of the largest holes until the density is acceptable
559 559 indicesheap = []
560 560 heapq.heapify(indicesheap)
561 561 while gapsheap and density < targetdensity:
562 562 oppgapsize, gapidx = heapq.heappop(gapsheap)
563 563
564 564 heapq.heappush(indicesheap, gapidx)
565 565
566 566 # the gap sizes are stored as negatives to be sorted decreasingly
567 567 # by the heap
568 568 readdata -= (-oppgapsize)
569 569 if readdata > 0:
570 570 density = chainpayload / float(readdata)
571 571 else:
572 572 density = 1.0
573 573
574 574 # Cut the revs at collected indices
575 575 previdx = 0
576 576 while indicesheap:
577 577 idx = heapq.heappop(indicesheap)
578 578
579 579 chunk = _trimchunk(revlog, revs, previdx, idx)
580 580 if chunk:
581 581 yield chunk
582 582
583 583 previdx = idx
584 584
585 585 chunk = _trimchunk(revlog, revs, previdx)
586 586 if chunk:
587 587 yield chunk
588 588
589 589 @attr.s(slots=True, frozen=True)
590 590 class _deltainfo(object):
591 591 distance = attr.ib()
592 592 deltalen = attr.ib()
593 593 data = attr.ib()
594 594 base = attr.ib()
595 595 chainbase = attr.ib()
596 596 chainlen = attr.ib()
597 597 compresseddeltalen = attr.ib()
598 598
599 599 class _deltacomputer(object):
600 600 def __init__(self, revlog):
601 601 self.revlog = revlog
602 602
603 603 def _getcandidaterevs(self, p1, p2, cachedelta):
604 604 """
605 605 Provides revisions that present an interest to be diffed against,
606 606 grouped by level of easiness.
607 607 """
608 608 revlog = self.revlog
609 609 gdelta = revlog._generaldelta
610 610 curr = len(revlog)
611 611 prev = curr - 1
612 612 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
613 613
614 614 # should we try to build a delta?
615 615 if prev != nullrev and revlog.storedeltachains:
616 616 tested = set()
617 617 # This condition is true most of the time when processing
618 618 # changegroup data into a generaldelta repo. The only time it
619 619 # isn't true is if this is the first revision in a delta chain
620 620 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
621 621 if cachedelta and gdelta and revlog._lazydeltabase:
622 622 # Assume what we received from the server is a good choice
623 623 # build delta will reuse the cache
624 624 yield (cachedelta[0],)
625 625 tested.add(cachedelta[0])
626 626
627 627 if gdelta:
628 628 # exclude already lazy tested base if any
629 629 parents = [p for p in (p1r, p2r)
630 630 if p != nullrev and p not in tested]
631 631
632 632 if not revlog._deltabothparents and len(parents) == 2:
633 633 parents.sort()
634 634 # To minimize the chance of having to build a fulltext,
635 635 # pick first whichever parent is closest to us (max rev)
636 636 yield (parents[1],)
637 637 # then the other one (min rev) if the first did not fit
638 638 yield (parents[0],)
639 639 tested.update(parents)
640 640 elif len(parents) > 0:
641 641 # Test all parents (1 or 2), and keep the best candidate
642 642 yield parents
643 643 tested.update(parents)
644 644
645 645 if prev not in tested:
646 646 # other approach failed try against prev to hopefully save us a
647 647 # fulltext.
648 648 yield (prev,)
649 649 tested.add(prev)
650 650
651 651 def buildtext(self, revinfo, fh):
652 652 """Builds a fulltext version of a revision
653 653
654 654 revinfo: _revisioninfo instance that contains all needed info
655 655 fh: file handle to either the .i or the .d revlog file,
656 656 depending on whether it is inlined or not
657 657 """
658 658 btext = revinfo.btext
659 659 if btext[0] is not None:
660 660 return btext[0]
661 661
662 662 revlog = self.revlog
663 663 cachedelta = revinfo.cachedelta
664 664 flags = revinfo.flags
665 665 node = revinfo.node
666 666
667 667 baserev = cachedelta[0]
668 668 delta = cachedelta[1]
669 669 # special case deltas which replace entire base; no need to decode
670 670 # base revision. this neatly avoids censored bases, which throw when
671 671 # they're decoded.
672 672 hlen = struct.calcsize(">lll")
673 673 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
674 674 len(delta) - hlen):
675 675 btext[0] = delta[hlen:]
676 676 else:
677 677 # deltabase is rawtext before changed by flag processors, which is
678 678 # equivalent to non-raw text
679 679 basetext = revlog.revision(baserev, _df=fh, raw=False)
680 680 btext[0] = mdiff.patch(basetext, delta)
681 681
682 682 try:
683 683 res = revlog._processflags(btext[0], flags, 'read', raw=True)
684 684 btext[0], validatehash = res
685 685 if validatehash:
686 686 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
687 687 if flags & REVIDX_ISCENSORED:
688 688 raise RevlogError(_('node %s is not censored') % node)
689 689 except CensoredNodeError:
690 690 # must pass the censored index flag to add censored revisions
691 691 if not flags & REVIDX_ISCENSORED:
692 692 raise
693 693 return btext[0]
694 694
695 695 def _builddeltadiff(self, base, revinfo, fh):
696 696 revlog = self.revlog
697 697 t = self.buildtext(revinfo, fh)
698 698 if revlog.iscensored(base):
699 699 # deltas based on a censored revision must replace the
700 700 # full content in one patch, so delta works everywhere
701 701 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
702 702 delta = header + t
703 703 else:
704 704 ptext = revlog.revision(base, _df=fh, raw=True)
705 705 delta = mdiff.textdiff(ptext, t)
706 706
707 707 return delta
708 708
709 709 def _builddeltainfo(self, revinfo, base, fh):
710 710 # can we use the cached delta?
711 711 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
712 712 delta = revinfo.cachedelta[1]
713 713 else:
714 714 delta = self._builddeltadiff(base, revinfo, fh)
715 715 revlog = self.revlog
716 716 header, data = revlog.compress(delta)
717 717 deltalen = len(header) + len(data)
718 718 chainbase = revlog.chainbase(base)
719 719 offset = revlog.end(len(revlog) - 1)
720 720 dist = deltalen + offset - revlog.start(chainbase)
721 721 if revlog._generaldelta:
722 722 deltabase = base
723 723 else:
724 724 deltabase = chainbase
725 725 chainlen, compresseddeltalen = revlog._chaininfo(base)
726 726 chainlen += 1
727 727 compresseddeltalen += deltalen
728 728 return _deltainfo(dist, deltalen, (header, data), deltabase,
729 729 chainbase, chainlen, compresseddeltalen)
730 730
731 731 def finddeltainfo(self, revinfo, fh):
732 732 """Find an acceptable delta against a candidate revision
733 733
734 734 revinfo: information about the revision (instance of _revisioninfo)
735 735 fh: file handle to either the .i or the .d revlog file,
736 736 depending on whether it is inlined or not
737 737
738 738 Returns the first acceptable candidate revision, as ordered by
739 739 _getcandidaterevs
740 740 """
741 741 cachedelta = revinfo.cachedelta
742 742 p1 = revinfo.p1
743 743 p2 = revinfo.p2
744 744 revlog = self.revlog
745 745
746 deltalength = self.revlog.length
747 deltaparent = self.revlog.deltaparent
748
746 749 deltainfo = None
747 750 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
748 751 nominateddeltas = []
749 752 for candidaterev in candidaterevs:
753 # skip over empty delta (no need to include them in a chain)
754 while candidaterev != nullrev and not deltalength(candidaterev):
755 candidaterev = deltaparent(candidaterev)
756 # no need to try a delta against nullid, this will be handled
757 # by fulltext later.
758 if candidaterev == nullrev:
759 continue
750 760 # no delta for rawtext-changing revs (see "candelta" for why)
751 761 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
752 762 continue
753 763 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
754 764 if revlog._isgooddeltainfo(candidatedelta, revinfo):
755 765 nominateddeltas.append(candidatedelta)
756 766 if nominateddeltas:
757 767 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
758 768 break
759 769
760 770 return deltainfo
761 771
762 772 @attr.s(slots=True, frozen=True)
763 773 class _revisioninfo(object):
764 774 """Information about a revision that allows building its fulltext
765 775 node: expected hash of the revision
766 776 p1, p2: parent revs of the revision
767 777 btext: built text cache consisting of a one-element list
768 778 cachedelta: (baserev, uncompressed_delta) or None
769 779 flags: flags associated to the revision storage
770 780
771 781 One of btext[0] or cachedelta must be set.
772 782 """
773 783 node = attr.ib()
774 784 p1 = attr.ib()
775 785 p2 = attr.ib()
776 786 btext = attr.ib()
777 787 textlen = attr.ib()
778 788 cachedelta = attr.ib()
779 789 flags = attr.ib()
780 790
781 791 # index v0:
782 792 # 4 bytes: offset
783 793 # 4 bytes: compressed length
784 794 # 4 bytes: base rev
785 795 # 4 bytes: link rev
786 796 # 20 bytes: parent 1 nodeid
787 797 # 20 bytes: parent 2 nodeid
788 798 # 20 bytes: nodeid
789 799 indexformatv0 = struct.Struct(">4l20s20s20s")
790 800 indexformatv0_pack = indexformatv0.pack
791 801 indexformatv0_unpack = indexformatv0.unpack
792 802
793 803 class revlogoldindex(list):
794 804 def __getitem__(self, i):
795 805 if i == -1:
796 806 return (0, 0, 0, -1, -1, -1, -1, nullid)
797 807 return list.__getitem__(self, i)
798 808
799 809 class revlogoldio(object):
800 810 def __init__(self):
801 811 self.size = indexformatv0.size
802 812
803 813 def parseindex(self, data, inline):
804 814 s = self.size
805 815 index = []
806 816 nodemap = {nullid: nullrev}
807 817 n = off = 0
808 818 l = len(data)
809 819 while off + s <= l:
810 820 cur = data[off:off + s]
811 821 off += s
812 822 e = indexformatv0_unpack(cur)
813 823 # transform to revlogv1 format
814 824 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
815 825 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
816 826 index.append(e2)
817 827 nodemap[e[6]] = n
818 828 n += 1
819 829
820 830 return revlogoldindex(index), nodemap, None
821 831
822 832 def packentry(self, entry, node, version, rev):
823 833 if gettype(entry[0]):
824 834 raise RevlogError(_('index entry flags need revlog version 1'))
825 835 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
826 836 node(entry[5]), node(entry[6]), entry[7])
827 837 return indexformatv0_pack(*e2)
828 838
829 839 # index ng:
830 840 # 6 bytes: offset
831 841 # 2 bytes: flags
832 842 # 4 bytes: compressed length
833 843 # 4 bytes: uncompressed length
834 844 # 4 bytes: base rev
835 845 # 4 bytes: link rev
836 846 # 4 bytes: parent 1 rev
837 847 # 4 bytes: parent 2 rev
838 848 # 32 bytes: nodeid
839 849 indexformatng = struct.Struct(">Qiiiiii20s12x")
840 850 indexformatng_pack = indexformatng.pack
841 851 versionformat = struct.Struct(">I")
842 852 versionformat_pack = versionformat.pack
843 853 versionformat_unpack = versionformat.unpack
844 854
845 855 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
846 856 # signed integer)
847 857 _maxentrysize = 0x7fffffff
848 858
849 859 class revlogio(object):
850 860 def __init__(self):
851 861 self.size = indexformatng.size
852 862
853 863 def parseindex(self, data, inline):
854 864 # call the C implementation to parse the index data
855 865 index, cache = parsers.parse_index2(data, inline)
856 866 return index, getattr(index, 'nodemap', None), cache
857 867
858 868 def packentry(self, entry, node, version, rev):
859 869 p = indexformatng_pack(*entry)
860 870 if rev == 0:
861 871 p = versionformat_pack(version) + p[4:]
862 872 return p
863 873
864 874 class revlog(object):
865 875 """
866 876 the underlying revision storage object
867 877
868 878 A revlog consists of two parts, an index and the revision data.
869 879
870 880 The index is a file with a fixed record size containing
871 881 information on each revision, including its nodeid (hash), the
872 882 nodeids of its parents, the position and offset of its data within
873 883 the data file, and the revision it's based on. Finally, each entry
874 884 contains a linkrev entry that can serve as a pointer to external
875 885 data.
876 886
877 887 The revision data itself is a linear collection of data chunks.
878 888 Each chunk represents a revision and is usually represented as a
879 889 delta against the previous chunk. To bound lookup time, runs of
880 890 deltas are limited to about 2 times the length of the original
881 891 version data. This makes retrieval of a version proportional to
882 892 its size, or O(1) relative to the number of revisions.
883 893
884 894 Both pieces of the revlog are written to in an append-only
885 895 fashion, which means we never need to rewrite a file to insert or
886 896 remove data, and can use some simple techniques to avoid the need
887 897 for locking while reading.
888 898
889 899 If checkambig, indexfile is opened with checkambig=True at
890 900 writing, to avoid file stat ambiguity.
891 901
892 902 If mmaplargeindex is True, and an mmapindexthreshold is set, the
893 903 index will be mmapped rather than read if it is larger than the
894 904 configured threshold.
895 905
896 906 If censorable is True, the revlog can have censored revisions.
897 907 """
898 908 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
899 909 mmaplargeindex=False, censorable=False):
900 910 """
901 911 create a revlog object
902 912
903 913 opener is a function that abstracts the file opening operation
904 914 and can be used to implement COW semantics or the like.
905 915 """
906 916 self.indexfile = indexfile
907 917 self.datafile = datafile or (indexfile[:-2] + ".d")
908 918 self.opener = opener
909 919 # When True, indexfile is opened with checkambig=True at writing, to
910 920 # avoid file stat ambiguity.
911 921 self._checkambig = checkambig
912 922 self._censorable = censorable
913 923 # 3-tuple of (node, rev, text) for a raw revision.
914 924 self._cache = None
915 925 # Maps rev to chain base rev.
916 926 self._chainbasecache = util.lrucachedict(100)
917 927 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
918 928 self._chunkcache = (0, '')
919 929 # How much data to read and cache into the raw revlog data cache.
920 930 self._chunkcachesize = 65536
921 931 self._maxchainlen = None
922 932 self._deltabothparents = True
923 933 self.index = []
924 934 # Mapping of partial identifiers to full nodes.
925 935 self._pcache = {}
926 936 # Mapping of revision integer to full node.
927 937 self._nodecache = {nullid: nullrev}
928 938 self._nodepos = None
929 939 self._compengine = 'zlib'
930 940 self._maxdeltachainspan = -1
931 941 self._withsparseread = False
932 942 self._sparserevlog = False
933 943 self._srdensitythreshold = 0.50
934 944 self._srmingapsize = 262144
935 945
936 946 mmapindexthreshold = None
937 947 v = REVLOG_DEFAULT_VERSION
938 948 opts = getattr(opener, 'options', None)
939 949 if opts is not None:
940 950 if 'revlogv2' in opts:
941 951 # version 2 revlogs always use generaldelta.
942 952 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
943 953 elif 'revlogv1' in opts:
944 954 if 'generaldelta' in opts:
945 955 v |= FLAG_GENERALDELTA
946 956 else:
947 957 v = 0
948 958 if 'chunkcachesize' in opts:
949 959 self._chunkcachesize = opts['chunkcachesize']
950 960 if 'maxchainlen' in opts:
951 961 self._maxchainlen = opts['maxchainlen']
952 962 if 'deltabothparents' in opts:
953 963 self._deltabothparents = opts['deltabothparents']
954 964 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
955 965 if 'compengine' in opts:
956 966 self._compengine = opts['compengine']
957 967 if 'maxdeltachainspan' in opts:
958 968 self._maxdeltachainspan = opts['maxdeltachainspan']
959 969 if mmaplargeindex and 'mmapindexthreshold' in opts:
960 970 mmapindexthreshold = opts['mmapindexthreshold']
961 971 self._sparserevlog = bool(opts.get('sparse-revlog', False))
962 972 withsparseread = bool(opts.get('with-sparse-read', False))
963 973 # sparse-revlog forces sparse-read
964 974 self._withsparseread = self._sparserevlog or withsparseread
965 975 if 'sparse-read-density-threshold' in opts:
966 976 self._srdensitythreshold = opts['sparse-read-density-threshold']
967 977 if 'sparse-read-min-gap-size' in opts:
968 978 self._srmingapsize = opts['sparse-read-min-gap-size']
969 979
970 980 if self._chunkcachesize <= 0:
971 981 raise RevlogError(_('revlog chunk cache size %r is not greater '
972 982 'than 0') % self._chunkcachesize)
973 983 elif self._chunkcachesize & (self._chunkcachesize - 1):
974 984 raise RevlogError(_('revlog chunk cache size %r is not a power '
975 985 'of 2') % self._chunkcachesize)
976 986
977 987 indexdata = ''
978 988 self._initempty = True
979 989 try:
980 990 with self._indexfp() as f:
981 991 if (mmapindexthreshold is not None and
982 992 self.opener.fstat(f).st_size >= mmapindexthreshold):
983 993 indexdata = util.buffer(util.mmapread(f))
984 994 else:
985 995 indexdata = f.read()
986 996 if len(indexdata) > 0:
987 997 v = versionformat_unpack(indexdata[:4])[0]
988 998 self._initempty = False
989 999 except IOError as inst:
990 1000 if inst.errno != errno.ENOENT:
991 1001 raise
992 1002
993 1003 self.version = v
994 1004 self._inline = v & FLAG_INLINE_DATA
995 1005 self._generaldelta = v & FLAG_GENERALDELTA
996 1006 flags = v & ~0xFFFF
997 1007 fmt = v & 0xFFFF
998 1008 if fmt == REVLOGV0:
999 1009 if flags:
1000 1010 raise RevlogError(_('unknown flags (%#04x) in version %d '
1001 1011 'revlog %s') %
1002 1012 (flags >> 16, fmt, self.indexfile))
1003 1013 elif fmt == REVLOGV1:
1004 1014 if flags & ~REVLOGV1_FLAGS:
1005 1015 raise RevlogError(_('unknown flags (%#04x) in version %d '
1006 1016 'revlog %s') %
1007 1017 (flags >> 16, fmt, self.indexfile))
1008 1018 elif fmt == REVLOGV2:
1009 1019 if flags & ~REVLOGV2_FLAGS:
1010 1020 raise RevlogError(_('unknown flags (%#04x) in version %d '
1011 1021 'revlog %s') %
1012 1022 (flags >> 16, fmt, self.indexfile))
1013 1023 else:
1014 1024 raise RevlogError(_('unknown version (%d) in revlog %s') %
1015 1025 (fmt, self.indexfile))
1016 1026
1017 1027 self.storedeltachains = True
1018 1028
1019 1029 self._io = revlogio()
1020 1030 if self.version == REVLOGV0:
1021 1031 self._io = revlogoldio()
1022 1032 try:
1023 1033 d = self._io.parseindex(indexdata, self._inline)
1024 1034 except (ValueError, IndexError):
1025 1035 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
1026 1036 self.index, nodemap, self._chunkcache = d
1027 1037 if nodemap is not None:
1028 1038 self.nodemap = self._nodecache = nodemap
1029 1039 if not self._chunkcache:
1030 1040 self._chunkclear()
1031 1041 # revnum -> (chain-length, sum-delta-length)
1032 1042 self._chaininfocache = {}
1033 1043 # revlog header -> revlog compressor
1034 1044 self._decompressors = {}
1035 1045
1036 1046 @util.propertycache
1037 1047 def _compressor(self):
1038 1048 return util.compengines[self._compengine].revlogcompressor()
1039 1049
1040 1050 def _indexfp(self, mode='r'):
1041 1051 """file object for the revlog's index file"""
1042 1052 args = {r'mode': mode}
1043 1053 if mode != 'r':
1044 1054 args[r'checkambig'] = self._checkambig
1045 1055 if mode == 'w':
1046 1056 args[r'atomictemp'] = True
1047 1057 return self.opener(self.indexfile, **args)
1048 1058
1049 1059 def _datafp(self, mode='r'):
1050 1060 """file object for the revlog's data file"""
1051 1061 return self.opener(self.datafile, mode=mode)
1052 1062
1053 1063 @contextlib.contextmanager
1054 1064 def _datareadfp(self, existingfp=None):
1055 1065 """file object suitable to read data"""
1056 1066 if existingfp is not None:
1057 1067 yield existingfp
1058 1068 else:
1059 1069 if self._inline:
1060 1070 func = self._indexfp
1061 1071 else:
1062 1072 func = self._datafp
1063 1073 with func() as fp:
1064 1074 yield fp
1065 1075
1066 1076 def tip(self):
1067 1077 return self.node(len(self.index) - 1)
1068 1078 def __contains__(self, rev):
1069 1079 return 0 <= rev < len(self)
1070 1080 def __len__(self):
1071 1081 return len(self.index)
1072 1082 def __iter__(self):
1073 1083 return iter(pycompat.xrange(len(self)))
1074 1084 def revs(self, start=0, stop=None):
1075 1085 """iterate over all rev in this revlog (from start to stop)"""
1076 1086 step = 1
1077 1087 length = len(self)
1078 1088 if stop is not None:
1079 1089 if start > stop:
1080 1090 step = -1
1081 1091 stop += step
1082 1092 if stop > length:
1083 1093 stop = length
1084 1094 else:
1085 1095 stop = length
1086 1096 return pycompat.xrange(start, stop, step)
1087 1097
1088 1098 @util.propertycache
1089 1099 def nodemap(self):
1090 1100 self.rev(self.node(0))
1091 1101 return self._nodecache
1092 1102
1093 1103 def hasnode(self, node):
1094 1104 try:
1095 1105 self.rev(node)
1096 1106 return True
1097 1107 except KeyError:
1098 1108 return False
1099 1109
1100 1110 def candelta(self, baserev, rev):
1101 1111 """whether two revisions (baserev, rev) can be delta-ed or not"""
1102 1112 # Disable delta if either rev requires a content-changing flag
1103 1113 # processor (ex. LFS). This is because such flag processor can alter
1104 1114 # the rawtext content that the delta will be based on, and two clients
1105 1115 # could have a same revlog node with different flags (i.e. different
1106 1116 # rawtext contents) and the delta could be incompatible.
1107 1117 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
1108 1118 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
1109 1119 return False
1110 1120 return True
1111 1121
1112 1122 def clearcaches(self):
1113 1123 self._cache = None
1114 1124 self._chainbasecache.clear()
1115 1125 self._chunkcache = (0, '')
1116 1126 self._pcache = {}
1117 1127
1118 1128 try:
1119 1129 self._nodecache.clearcaches()
1120 1130 except AttributeError:
1121 1131 self._nodecache = {nullid: nullrev}
1122 1132 self._nodepos = None
1123 1133
1124 1134 def rev(self, node):
1125 1135 try:
1126 1136 return self._nodecache[node]
1127 1137 except TypeError:
1128 1138 raise
1129 1139 except RevlogError:
1130 1140 # parsers.c radix tree lookup failed
1131 1141 if node == wdirid or node in wdirfilenodeids:
1132 1142 raise error.WdirUnsupported
1133 1143 raise LookupError(node, self.indexfile, _('no node'))
1134 1144 except KeyError:
1135 1145 # pure python cache lookup failed
1136 1146 n = self._nodecache
1137 1147 i = self.index
1138 1148 p = self._nodepos
1139 1149 if p is None:
1140 1150 p = len(i) - 1
1141 1151 else:
1142 1152 assert p < len(i)
1143 1153 for r in pycompat.xrange(p, -1, -1):
1144 1154 v = i[r][7]
1145 1155 n[v] = r
1146 1156 if v == node:
1147 1157 self._nodepos = r - 1
1148 1158 return r
1149 1159 if node == wdirid or node in wdirfilenodeids:
1150 1160 raise error.WdirUnsupported
1151 1161 raise LookupError(node, self.indexfile, _('no node'))
1152 1162
1153 1163 # Accessors for index entries.
1154 1164
1155 1165 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1156 1166 # are flags.
1157 1167 def start(self, rev):
1158 1168 return int(self.index[rev][0] >> 16)
1159 1169
1160 1170 def flags(self, rev):
1161 1171 return self.index[rev][0] & 0xFFFF
1162 1172
1163 1173 def length(self, rev):
1164 1174 return self.index[rev][1]
1165 1175
1166 1176 def rawsize(self, rev):
1167 1177 """return the length of the uncompressed text for a given revision"""
1168 1178 l = self.index[rev][2]
1169 1179 if l >= 0:
1170 1180 return l
1171 1181
1172 1182 t = self.revision(rev, raw=True)
1173 1183 return len(t)
1174 1184
1175 1185 def size(self, rev):
1176 1186 """length of non-raw text (processed by a "read" flag processor)"""
1177 1187 # fast path: if no "read" flag processor could change the content,
1178 1188 # size is rawsize. note: ELLIPSIS is known to not change the content.
1179 1189 flags = self.flags(rev)
1180 1190 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1181 1191 return self.rawsize(rev)
1182 1192
1183 1193 return len(self.revision(rev, raw=False))
1184 1194
1185 1195 def chainbase(self, rev):
1186 1196 base = self._chainbasecache.get(rev)
1187 1197 if base is not None:
1188 1198 return base
1189 1199
1190 1200 index = self.index
1191 1201 iterrev = rev
1192 1202 base = index[iterrev][3]
1193 1203 while base != iterrev:
1194 1204 iterrev = base
1195 1205 base = index[iterrev][3]
1196 1206
1197 1207 self._chainbasecache[rev] = base
1198 1208 return base
1199 1209
1200 1210 def linkrev(self, rev):
1201 1211 return self.index[rev][4]
1202 1212
1203 1213 def parentrevs(self, rev):
1204 1214 try:
1205 1215 entry = self.index[rev]
1206 1216 except IndexError:
1207 1217 if rev == wdirrev:
1208 1218 raise error.WdirUnsupported
1209 1219 raise
1210 1220
1211 1221 return entry[5], entry[6]
1212 1222
1213 1223 def node(self, rev):
1214 1224 try:
1215 1225 return self.index[rev][7]
1216 1226 except IndexError:
1217 1227 if rev == wdirrev:
1218 1228 raise error.WdirUnsupported
1219 1229 raise
1220 1230
1221 1231 # Derived from index values.
1222 1232
1223 1233 def end(self, rev):
1224 1234 return self.start(rev) + self.length(rev)
1225 1235
1226 1236 def parents(self, node):
1227 1237 i = self.index
1228 1238 d = i[self.rev(node)]
1229 1239 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
1230 1240
1231 1241 def chainlen(self, rev):
1232 1242 return self._chaininfo(rev)[0]
1233 1243
1234 1244 def _chaininfo(self, rev):
1235 1245 chaininfocache = self._chaininfocache
1236 1246 if rev in chaininfocache:
1237 1247 return chaininfocache[rev]
1238 1248 index = self.index
1239 1249 generaldelta = self._generaldelta
1240 1250 iterrev = rev
1241 1251 e = index[iterrev]
1242 1252 clen = 0
1243 1253 compresseddeltalen = 0
1244 1254 while iterrev != e[3]:
1245 1255 clen += 1
1246 1256 compresseddeltalen += e[1]
1247 1257 if generaldelta:
1248 1258 iterrev = e[3]
1249 1259 else:
1250 1260 iterrev -= 1
1251 1261 if iterrev in chaininfocache:
1252 1262 t = chaininfocache[iterrev]
1253 1263 clen += t[0]
1254 1264 compresseddeltalen += t[1]
1255 1265 break
1256 1266 e = index[iterrev]
1257 1267 else:
1258 1268 # Add text length of base since decompressing that also takes
1259 1269 # work. For cache hits the length is already included.
1260 1270 compresseddeltalen += e[1]
1261 1271 r = (clen, compresseddeltalen)
1262 1272 chaininfocache[rev] = r
1263 1273 return r
1264 1274
1265 1275 def _deltachain(self, rev, stoprev=None):
1266 1276 """Obtain the delta chain for a revision.
1267 1277
1268 1278 ``stoprev`` specifies a revision to stop at. If not specified, we
1269 1279 stop at the base of the chain.
1270 1280
1271 1281 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1272 1282 revs in ascending order and ``stopped`` is a bool indicating whether
1273 1283 ``stoprev`` was hit.
1274 1284 """
1275 1285 # Try C implementation.
1276 1286 try:
1277 1287 return self.index.deltachain(rev, stoprev, self._generaldelta)
1278 1288 except AttributeError:
1279 1289 pass
1280 1290
1281 1291 chain = []
1282 1292
1283 1293 # Alias to prevent attribute lookup in tight loop.
1284 1294 index = self.index
1285 1295 generaldelta = self._generaldelta
1286 1296
1287 1297 iterrev = rev
1288 1298 e = index[iterrev]
1289 1299 while iterrev != e[3] and iterrev != stoprev:
1290 1300 chain.append(iterrev)
1291 1301 if generaldelta:
1292 1302 iterrev = e[3]
1293 1303 else:
1294 1304 iterrev -= 1
1295 1305 e = index[iterrev]
1296 1306
1297 1307 if iterrev == stoprev:
1298 1308 stopped = True
1299 1309 else:
1300 1310 chain.append(iterrev)
1301 1311 stopped = False
1302 1312
1303 1313 chain.reverse()
1304 1314 return chain, stopped
1305 1315
1306 1316 def ancestors(self, revs, stoprev=0, inclusive=False):
1307 1317 """Generate the ancestors of 'revs' in reverse topological order.
1308 1318 Does not generate revs lower than stoprev.
1309 1319
1310 1320 See the documentation for ancestor.lazyancestors for more details."""
1311 1321
1312 1322 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
1313 1323 inclusive=inclusive)
1314 1324
1315 1325 def descendants(self, revs):
1316 1326 """Generate the descendants of 'revs' in revision order.
1317 1327
1318 1328 Yield a sequence of revision numbers starting with a child of
1319 1329 some rev in revs, i.e., each revision is *not* considered a
1320 1330 descendant of itself. Results are ordered by revision number (a
1321 1331 topological sort)."""
1322 1332 first = min(revs)
1323 1333 if first == nullrev:
1324 1334 for i in self:
1325 1335 yield i
1326 1336 return
1327 1337
1328 1338 seen = set(revs)
1329 1339 for i in self.revs(start=first + 1):
1330 1340 for x in self.parentrevs(i):
1331 1341 if x != nullrev and x in seen:
1332 1342 seen.add(i)
1333 1343 yield i
1334 1344 break
1335 1345
1336 1346 def findcommonmissing(self, common=None, heads=None):
1337 1347 """Return a tuple of the ancestors of common and the ancestors of heads
1338 1348 that are not ancestors of common. In revset terminology, we return the
1339 1349 tuple:
1340 1350
1341 1351 ::common, (::heads) - (::common)
1342 1352
1343 1353 The list is sorted by revision number, meaning it is
1344 1354 topologically sorted.
1345 1355
1346 1356 'heads' and 'common' are both lists of node IDs. If heads is
1347 1357 not supplied, uses all of the revlog's heads. If common is not
1348 1358 supplied, uses nullid."""
1349 1359 if common is None:
1350 1360 common = [nullid]
1351 1361 if heads is None:
1352 1362 heads = self.heads()
1353 1363
1354 1364 common = [self.rev(n) for n in common]
1355 1365 heads = [self.rev(n) for n in heads]
1356 1366
1357 1367 # we want the ancestors, but inclusive
1358 1368 class lazyset(object):
1359 1369 def __init__(self, lazyvalues):
1360 1370 self.addedvalues = set()
1361 1371 self.lazyvalues = lazyvalues
1362 1372
1363 1373 def __contains__(self, value):
1364 1374 return value in self.addedvalues or value in self.lazyvalues
1365 1375
1366 1376 def __iter__(self):
1367 1377 added = self.addedvalues
1368 1378 for r in added:
1369 1379 yield r
1370 1380 for r in self.lazyvalues:
1371 1381 if not r in added:
1372 1382 yield r
1373 1383
1374 1384 def add(self, value):
1375 1385 self.addedvalues.add(value)
1376 1386
1377 1387 def update(self, values):
1378 1388 self.addedvalues.update(values)
1379 1389
1380 1390 has = lazyset(self.ancestors(common))
1381 1391 has.add(nullrev)
1382 1392 has.update(common)
1383 1393
1384 1394 # take all ancestors from heads that aren't in has
1385 1395 missing = set()
1386 1396 visit = collections.deque(r for r in heads if r not in has)
1387 1397 while visit:
1388 1398 r = visit.popleft()
1389 1399 if r in missing:
1390 1400 continue
1391 1401 else:
1392 1402 missing.add(r)
1393 1403 for p in self.parentrevs(r):
1394 1404 if p not in has:
1395 1405 visit.append(p)
1396 1406 missing = list(missing)
1397 1407 missing.sort()
1398 1408 return has, [self.node(miss) for miss in missing]
1399 1409
1400 1410 def incrementalmissingrevs(self, common=None):
1401 1411 """Return an object that can be used to incrementally compute the
1402 1412 revision numbers of the ancestors of arbitrary sets that are not
1403 1413 ancestors of common. This is an ancestor.incrementalmissingancestors
1404 1414 object.
1405 1415
1406 1416 'common' is a list of revision numbers. If common is not supplied, uses
1407 1417 nullrev.
1408 1418 """
1409 1419 if common is None:
1410 1420 common = [nullrev]
1411 1421
1412 1422 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1413 1423
1414 1424 def findmissingrevs(self, common=None, heads=None):
1415 1425 """Return the revision numbers of the ancestors of heads that
1416 1426 are not ancestors of common.
1417 1427
1418 1428 More specifically, return a list of revision numbers corresponding to
1419 1429 nodes N such that every N satisfies the following constraints:
1420 1430
1421 1431 1. N is an ancestor of some node in 'heads'
1422 1432 2. N is not an ancestor of any node in 'common'
1423 1433
1424 1434 The list is sorted by revision number, meaning it is
1425 1435 topologically sorted.
1426 1436
1427 1437 'heads' and 'common' are both lists of revision numbers. If heads is
1428 1438 not supplied, uses all of the revlog's heads. If common is not
1429 1439 supplied, uses nullid."""
1430 1440 if common is None:
1431 1441 common = [nullrev]
1432 1442 if heads is None:
1433 1443 heads = self.headrevs()
1434 1444
1435 1445 inc = self.incrementalmissingrevs(common=common)
1436 1446 return inc.missingancestors(heads)
1437 1447
1438 1448 def findmissing(self, common=None, heads=None):
1439 1449 """Return the ancestors of heads that are not ancestors of common.
1440 1450
1441 1451 More specifically, return a list of nodes N such that every N
1442 1452 satisfies the following constraints:
1443 1453
1444 1454 1. N is an ancestor of some node in 'heads'
1445 1455 2. N is not an ancestor of any node in 'common'
1446 1456
1447 1457 The list is sorted by revision number, meaning it is
1448 1458 topologically sorted.
1449 1459
1450 1460 'heads' and 'common' are both lists of node IDs. If heads is
1451 1461 not supplied, uses all of the revlog's heads. If common is not
1452 1462 supplied, uses nullid."""
1453 1463 if common is None:
1454 1464 common = [nullid]
1455 1465 if heads is None:
1456 1466 heads = self.heads()
1457 1467
1458 1468 common = [self.rev(n) for n in common]
1459 1469 heads = [self.rev(n) for n in heads]
1460 1470
1461 1471 inc = self.incrementalmissingrevs(common=common)
1462 1472 return [self.node(r) for r in inc.missingancestors(heads)]
1463 1473
1464 1474 def nodesbetween(self, roots=None, heads=None):
1465 1475 """Return a topological path from 'roots' to 'heads'.
1466 1476
1467 1477 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1468 1478 topologically sorted list of all nodes N that satisfy both of
1469 1479 these constraints:
1470 1480
1471 1481 1. N is a descendant of some node in 'roots'
1472 1482 2. N is an ancestor of some node in 'heads'
1473 1483
1474 1484 Every node is considered to be both a descendant and an ancestor
1475 1485 of itself, so every reachable node in 'roots' and 'heads' will be
1476 1486 included in 'nodes'.
1477 1487
1478 1488 'outroots' is the list of reachable nodes in 'roots', i.e., the
1479 1489 subset of 'roots' that is returned in 'nodes'. Likewise,
1480 1490 'outheads' is the subset of 'heads' that is also in 'nodes'.
1481 1491
1482 1492 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1483 1493 unspecified, uses nullid as the only root. If 'heads' is
1484 1494 unspecified, uses list of all of the revlog's heads."""
1485 1495 nonodes = ([], [], [])
1486 1496 if roots is not None:
1487 1497 roots = list(roots)
1488 1498 if not roots:
1489 1499 return nonodes
1490 1500 lowestrev = min([self.rev(n) for n in roots])
1491 1501 else:
1492 1502 roots = [nullid] # Everybody's a descendant of nullid
1493 1503 lowestrev = nullrev
1494 1504 if (lowestrev == nullrev) and (heads is None):
1495 1505 # We want _all_ the nodes!
1496 1506 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1497 1507 if heads is None:
1498 1508 # All nodes are ancestors, so the latest ancestor is the last
1499 1509 # node.
1500 1510 highestrev = len(self) - 1
1501 1511 # Set ancestors to None to signal that every node is an ancestor.
1502 1512 ancestors = None
1503 1513 # Set heads to an empty dictionary for later discovery of heads
1504 1514 heads = {}
1505 1515 else:
1506 1516 heads = list(heads)
1507 1517 if not heads:
1508 1518 return nonodes
1509 1519 ancestors = set()
1510 1520 # Turn heads into a dictionary so we can remove 'fake' heads.
1511 1521 # Also, later we will be using it to filter out the heads we can't
1512 1522 # find from roots.
1513 1523 heads = dict.fromkeys(heads, False)
1514 1524 # Start at the top and keep marking parents until we're done.
1515 1525 nodestotag = set(heads)
1516 1526 # Remember where the top was so we can use it as a limit later.
1517 1527 highestrev = max([self.rev(n) for n in nodestotag])
1518 1528 while nodestotag:
1519 1529 # grab a node to tag
1520 1530 n = nodestotag.pop()
1521 1531 # Never tag nullid
1522 1532 if n == nullid:
1523 1533 continue
1524 1534 # A node's revision number represents its place in a
1525 1535 # topologically sorted list of nodes.
1526 1536 r = self.rev(n)
1527 1537 if r >= lowestrev:
1528 1538 if n not in ancestors:
1529 1539 # If we are possibly a descendant of one of the roots
1530 1540 # and we haven't already been marked as an ancestor
1531 1541 ancestors.add(n) # Mark as ancestor
1532 1542 # Add non-nullid parents to list of nodes to tag.
1533 1543 nodestotag.update([p for p in self.parents(n) if
1534 1544 p != nullid])
1535 1545 elif n in heads: # We've seen it before, is it a fake head?
1536 1546 # So it is, real heads should not be the ancestors of
1537 1547 # any other heads.
1538 1548 heads.pop(n)
1539 1549 if not ancestors:
1540 1550 return nonodes
1541 1551 # Now that we have our set of ancestors, we want to remove any
1542 1552 # roots that are not ancestors.
1543 1553
1544 1554 # If one of the roots was nullid, everything is included anyway.
1545 1555 if lowestrev > nullrev:
1546 1556 # But, since we weren't, let's recompute the lowest rev to not
1547 1557 # include roots that aren't ancestors.
1548 1558
1549 1559 # Filter out roots that aren't ancestors of heads
1550 1560 roots = [root for root in roots if root in ancestors]
1551 1561 # Recompute the lowest revision
1552 1562 if roots:
1553 1563 lowestrev = min([self.rev(root) for root in roots])
1554 1564 else:
1555 1565 # No more roots? Return empty list
1556 1566 return nonodes
1557 1567 else:
1558 1568 # We are descending from nullid, and don't need to care about
1559 1569 # any other roots.
1560 1570 lowestrev = nullrev
1561 1571 roots = [nullid]
1562 1572 # Transform our roots list into a set.
1563 1573 descendants = set(roots)
1564 1574 # Also, keep the original roots so we can filter out roots that aren't
1565 1575 # 'real' roots (i.e. are descended from other roots).
1566 1576 roots = descendants.copy()
1567 1577 # Our topologically sorted list of output nodes.
1568 1578 orderedout = []
1569 1579 # Don't start at nullid since we don't want nullid in our output list,
1570 1580 # and if nullid shows up in descendants, empty parents will look like
1571 1581 # they're descendants.
1572 1582 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1573 1583 n = self.node(r)
1574 1584 isdescendant = False
1575 1585 if lowestrev == nullrev: # Everybody is a descendant of nullid
1576 1586 isdescendant = True
1577 1587 elif n in descendants:
1578 1588 # n is already a descendant
1579 1589 isdescendant = True
1580 1590 # This check only needs to be done here because all the roots
1581 1591 # will start being marked is descendants before the loop.
1582 1592 if n in roots:
1583 1593 # If n was a root, check if it's a 'real' root.
1584 1594 p = tuple(self.parents(n))
1585 1595 # If any of its parents are descendants, it's not a root.
1586 1596 if (p[0] in descendants) or (p[1] in descendants):
1587 1597 roots.remove(n)
1588 1598 else:
1589 1599 p = tuple(self.parents(n))
1590 1600 # A node is a descendant if either of its parents are
1591 1601 # descendants. (We seeded the dependents list with the roots
1592 1602 # up there, remember?)
1593 1603 if (p[0] in descendants) or (p[1] in descendants):
1594 1604 descendants.add(n)
1595 1605 isdescendant = True
1596 1606 if isdescendant and ((ancestors is None) or (n in ancestors)):
1597 1607 # Only include nodes that are both descendants and ancestors.
1598 1608 orderedout.append(n)
1599 1609 if (ancestors is not None) and (n in heads):
1600 1610 # We're trying to figure out which heads are reachable
1601 1611 # from roots.
1602 1612 # Mark this head as having been reached
1603 1613 heads[n] = True
1604 1614 elif ancestors is None:
1605 1615 # Otherwise, we're trying to discover the heads.
1606 1616 # Assume this is a head because if it isn't, the next step
1607 1617 # will eventually remove it.
1608 1618 heads[n] = True
1609 1619 # But, obviously its parents aren't.
1610 1620 for p in self.parents(n):
1611 1621 heads.pop(p, None)
1612 1622 heads = [head for head, flag in heads.iteritems() if flag]
1613 1623 roots = list(roots)
1614 1624 assert orderedout
1615 1625 assert roots
1616 1626 assert heads
1617 1627 return (orderedout, roots, heads)
1618 1628
1619 1629 def headrevs(self):
1620 1630 try:
1621 1631 return self.index.headrevs()
1622 1632 except AttributeError:
1623 1633 return self._headrevs()
1624 1634
1625 1635 def computephases(self, roots):
1626 1636 return self.index.computephasesmapsets(roots)
1627 1637
1628 1638 def _headrevs(self):
1629 1639 count = len(self)
1630 1640 if not count:
1631 1641 return [nullrev]
1632 1642 # we won't iter over filtered rev so nobody is a head at start
1633 1643 ishead = [0] * (count + 1)
1634 1644 index = self.index
1635 1645 for r in self:
1636 1646 ishead[r] = 1 # I may be an head
1637 1647 e = index[r]
1638 1648 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1639 1649 return [r for r, val in enumerate(ishead) if val]
1640 1650
1641 1651 def heads(self, start=None, stop=None):
1642 1652 """return the list of all nodes that have no children
1643 1653
1644 1654 if start is specified, only heads that are descendants of
1645 1655 start will be returned
1646 1656 if stop is specified, it will consider all the revs from stop
1647 1657 as if they had no children
1648 1658 """
1649 1659 if start is None and stop is None:
1650 1660 if not len(self):
1651 1661 return [nullid]
1652 1662 return [self.node(r) for r in self.headrevs()]
1653 1663
1654 1664 if start is None:
1655 1665 start = nullid
1656 1666 if stop is None:
1657 1667 stop = []
1658 1668 stoprevs = set([self.rev(n) for n in stop])
1659 1669 startrev = self.rev(start)
1660 1670 reachable = {startrev}
1661 1671 heads = {startrev}
1662 1672
1663 1673 parentrevs = self.parentrevs
1664 1674 for r in self.revs(start=startrev + 1):
1665 1675 for p in parentrevs(r):
1666 1676 if p in reachable:
1667 1677 if r not in stoprevs:
1668 1678 reachable.add(r)
1669 1679 heads.add(r)
1670 1680 if p in heads and p not in stoprevs:
1671 1681 heads.remove(p)
1672 1682
1673 1683 return [self.node(r) for r in heads]
1674 1684
1675 1685 def children(self, node):
1676 1686 """find the children of a given node"""
1677 1687 c = []
1678 1688 p = self.rev(node)
1679 1689 for r in self.revs(start=p + 1):
1680 1690 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1681 1691 if prevs:
1682 1692 for pr in prevs:
1683 1693 if pr == p:
1684 1694 c.append(self.node(r))
1685 1695 elif p == nullrev:
1686 1696 c.append(self.node(r))
1687 1697 return c
1688 1698
1689 1699 def commonancestorsheads(self, a, b):
1690 1700 """calculate all the heads of the common ancestors of nodes a and b"""
1691 1701 a, b = self.rev(a), self.rev(b)
1692 1702 ancs = self._commonancestorsheads(a, b)
1693 1703 return pycompat.maplist(self.node, ancs)
1694 1704
1695 1705 def _commonancestorsheads(self, *revs):
1696 1706 """calculate all the heads of the common ancestors of revs"""
1697 1707 try:
1698 1708 ancs = self.index.commonancestorsheads(*revs)
1699 1709 except (AttributeError, OverflowError): # C implementation failed
1700 1710 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1701 1711 return ancs
1702 1712
1703 1713 def isancestor(self, a, b):
1704 1714 """return True if node a is an ancestor of node b
1705 1715
1706 1716 A revision is considered an ancestor of itself."""
1707 1717 a, b = self.rev(a), self.rev(b)
1708 1718 return self.isancestorrev(a, b)
1709 1719
1710 1720 def isancestorrev(self, a, b):
1711 1721 """return True if revision a is an ancestor of revision b
1712 1722
1713 1723 A revision is considered an ancestor of itself.
1714 1724
1715 1725 The implementation of this is trivial but the use of
1716 1726 commonancestorsheads is not."""
1717 1727 if a == nullrev:
1718 1728 return True
1719 1729 elif a == b:
1720 1730 return True
1721 1731 elif a > b:
1722 1732 return False
1723 1733 return a in self._commonancestorsheads(a, b)
1724 1734
1725 1735 def ancestor(self, a, b):
1726 1736 """calculate the "best" common ancestor of nodes a and b"""
1727 1737
1728 1738 a, b = self.rev(a), self.rev(b)
1729 1739 try:
1730 1740 ancs = self.index.ancestors(a, b)
1731 1741 except (AttributeError, OverflowError):
1732 1742 ancs = ancestor.ancestors(self.parentrevs, a, b)
1733 1743 if ancs:
1734 1744 # choose a consistent winner when there's a tie
1735 1745 return min(map(self.node, ancs))
1736 1746 return nullid
1737 1747
1738 1748 def _match(self, id):
1739 1749 if isinstance(id, int):
1740 1750 # rev
1741 1751 return self.node(id)
1742 1752 if len(id) == 20:
1743 1753 # possibly a binary node
1744 1754 # odds of a binary node being all hex in ASCII are 1 in 10**25
1745 1755 try:
1746 1756 node = id
1747 1757 self.rev(node) # quick search the index
1748 1758 return node
1749 1759 except LookupError:
1750 1760 pass # may be partial hex id
1751 1761 try:
1752 1762 # str(rev)
1753 1763 rev = int(id)
1754 1764 if "%d" % rev != id:
1755 1765 raise ValueError
1756 1766 if rev < 0:
1757 1767 rev = len(self) + rev
1758 1768 if rev < 0 or rev >= len(self):
1759 1769 raise ValueError
1760 1770 return self.node(rev)
1761 1771 except (ValueError, OverflowError):
1762 1772 pass
1763 1773 if len(id) == 40:
1764 1774 try:
1765 1775 # a full hex nodeid?
1766 1776 node = bin(id)
1767 1777 self.rev(node)
1768 1778 return node
1769 1779 except (TypeError, LookupError):
1770 1780 pass
1771 1781
1772 1782 def _partialmatch(self, id):
1773 1783 # we don't care wdirfilenodeids as they should be always full hash
1774 1784 maybewdir = wdirhex.startswith(id)
1775 1785 try:
1776 1786 partial = self.index.partialmatch(id)
1777 1787 if partial and self.hasnode(partial):
1778 1788 if maybewdir:
1779 1789 # single 'ff...' match in radix tree, ambiguous with wdir
1780 1790 raise RevlogError
1781 1791 return partial
1782 1792 if maybewdir:
1783 1793 # no 'ff...' match in radix tree, wdir identified
1784 1794 raise error.WdirUnsupported
1785 1795 return None
1786 1796 except RevlogError:
1787 1797 # parsers.c radix tree lookup gave multiple matches
1788 1798 # fast path: for unfiltered changelog, radix tree is accurate
1789 1799 if not getattr(self, 'filteredrevs', None):
1790 1800 raise AmbiguousPrefixLookupError(id, self.indexfile,
1791 1801 _('ambiguous identifier'))
1792 1802 # fall through to slow path that filters hidden revisions
1793 1803 except (AttributeError, ValueError):
1794 1804 # we are pure python, or key was too short to search radix tree
1795 1805 pass
1796 1806
1797 1807 if id in self._pcache:
1798 1808 return self._pcache[id]
1799 1809
1800 1810 if len(id) <= 40:
1801 1811 try:
1802 1812 # hex(node)[:...]
1803 1813 l = len(id) // 2 # grab an even number of digits
1804 1814 prefix = bin(id[:l * 2])
1805 1815 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1806 1816 nl = [n for n in nl if hex(n).startswith(id) and
1807 1817 self.hasnode(n)]
1808 1818 if len(nl) > 0:
1809 1819 if len(nl) == 1 and not maybewdir:
1810 1820 self._pcache[id] = nl[0]
1811 1821 return nl[0]
1812 1822 raise AmbiguousPrefixLookupError(id, self.indexfile,
1813 1823 _('ambiguous identifier'))
1814 1824 if maybewdir:
1815 1825 raise error.WdirUnsupported
1816 1826 return None
1817 1827 except TypeError:
1818 1828 pass
1819 1829
1820 1830 def lookup(self, id):
1821 1831 """locate a node based on:
1822 1832 - revision number or str(revision number)
1823 1833 - nodeid or subset of hex nodeid
1824 1834 """
1825 1835 n = self._match(id)
1826 1836 if n is not None:
1827 1837 return n
1828 1838 n = self._partialmatch(id)
1829 1839 if n:
1830 1840 return n
1831 1841
1832 1842 raise LookupError(id, self.indexfile, _('no match found'))
1833 1843
1834 1844 def shortest(self, node, minlength=1):
1835 1845 """Find the shortest unambiguous prefix that matches node."""
1836 1846 def isvalid(prefix):
1837 1847 try:
1838 1848 node = self._partialmatch(prefix)
1839 1849 except error.RevlogError:
1840 1850 return False
1841 1851 except error.WdirUnsupported:
1842 1852 # single 'ff...' match
1843 1853 return True
1844 1854 if node is None:
1845 1855 raise LookupError(node, self.indexfile, _('no node'))
1846 1856 return True
1847 1857
1848 1858 def maybewdir(prefix):
1849 1859 return all(c == 'f' for c in prefix)
1850 1860
1851 1861 hexnode = hex(node)
1852 1862
1853 1863 def disambiguate(hexnode, minlength):
1854 1864 """Disambiguate against wdirid."""
1855 1865 for length in range(minlength, 41):
1856 1866 prefix = hexnode[:length]
1857 1867 if not maybewdir(prefix):
1858 1868 return prefix
1859 1869
1860 1870 if not getattr(self, 'filteredrevs', None):
1861 1871 try:
1862 1872 length = max(self.index.shortest(node), minlength)
1863 1873 return disambiguate(hexnode, length)
1864 1874 except RevlogError:
1865 1875 if node != wdirid:
1866 1876 raise LookupError(node, self.indexfile, _('no node'))
1867 1877 except AttributeError:
1868 1878 # Fall through to pure code
1869 1879 pass
1870 1880
1871 1881 if node == wdirid:
1872 1882 for length in range(minlength, 41):
1873 1883 prefix = hexnode[:length]
1874 1884 if isvalid(prefix):
1875 1885 return prefix
1876 1886
1877 1887 for length in range(minlength, 41):
1878 1888 prefix = hexnode[:length]
1879 1889 if isvalid(prefix):
1880 1890 return disambiguate(hexnode, length)
1881 1891
1882 1892 def cmp(self, node, text):
1883 1893 """compare text with a given file revision
1884 1894
1885 1895 returns True if text is different than what is stored.
1886 1896 """
1887 1897 p1, p2 = self.parents(node)
1888 1898 return hash(text, p1, p2) != node
1889 1899
1890 1900 def _cachesegment(self, offset, data):
1891 1901 """Add a segment to the revlog cache.
1892 1902
1893 1903 Accepts an absolute offset and the data that is at that location.
1894 1904 """
1895 1905 o, d = self._chunkcache
1896 1906 # try to add to existing cache
1897 1907 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1898 1908 self._chunkcache = o, d + data
1899 1909 else:
1900 1910 self._chunkcache = offset, data
1901 1911
1902 1912 def _readsegment(self, offset, length, df=None):
1903 1913 """Load a segment of raw data from the revlog.
1904 1914
1905 1915 Accepts an absolute offset, length to read, and an optional existing
1906 1916 file handle to read from.
1907 1917
1908 1918 If an existing file handle is passed, it will be seeked and the
1909 1919 original seek position will NOT be restored.
1910 1920
1911 1921 Returns a str or buffer of raw byte data.
1912 1922 """
1913 1923 # Cache data both forward and backward around the requested
1914 1924 # data, in a fixed size window. This helps speed up operations
1915 1925 # involving reading the revlog backwards.
1916 1926 cachesize = self._chunkcachesize
1917 1927 realoffset = offset & ~(cachesize - 1)
1918 1928 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1919 1929 - realoffset)
1920 1930 with self._datareadfp(df) as df:
1921 1931 df.seek(realoffset)
1922 1932 d = df.read(reallength)
1923 1933 self._cachesegment(realoffset, d)
1924 1934 if offset != realoffset or reallength != length:
1925 1935 return util.buffer(d, offset - realoffset, length)
1926 1936 return d
1927 1937
1928 1938 def _getsegment(self, offset, length, df=None):
1929 1939 """Obtain a segment of raw data from the revlog.
1930 1940
1931 1941 Accepts an absolute offset, length of bytes to obtain, and an
1932 1942 optional file handle to the already-opened revlog. If the file
1933 1943 handle is used, it's original seek position will not be preserved.
1934 1944
1935 1945 Requests for data may be returned from a cache.
1936 1946
1937 1947 Returns a str or a buffer instance of raw byte data.
1938 1948 """
1939 1949 o, d = self._chunkcache
1940 1950 l = len(d)
1941 1951
1942 1952 # is it in the cache?
1943 1953 cachestart = offset - o
1944 1954 cacheend = cachestart + length
1945 1955 if cachestart >= 0 and cacheend <= l:
1946 1956 if cachestart == 0 and cacheend == l:
1947 1957 return d # avoid a copy
1948 1958 return util.buffer(d, cachestart, cacheend - cachestart)
1949 1959
1950 1960 return self._readsegment(offset, length, df=df)
1951 1961
1952 1962 def _getsegmentforrevs(self, startrev, endrev, df=None):
1953 1963 """Obtain a segment of raw data corresponding to a range of revisions.
1954 1964
1955 1965 Accepts the start and end revisions and an optional already-open
1956 1966 file handle to be used for reading. If the file handle is read, its
1957 1967 seek position will not be preserved.
1958 1968
1959 1969 Requests for data may be satisfied by a cache.
1960 1970
1961 1971 Returns a 2-tuple of (offset, data) for the requested range of
1962 1972 revisions. Offset is the integer offset from the beginning of the
1963 1973 revlog and data is a str or buffer of the raw byte data.
1964 1974
1965 1975 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1966 1976 to determine where each revision's data begins and ends.
1967 1977 """
1968 1978 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1969 1979 # (functions are expensive).
1970 1980 index = self.index
1971 1981 istart = index[startrev]
1972 1982 start = int(istart[0] >> 16)
1973 1983 if startrev == endrev:
1974 1984 end = start + istart[1]
1975 1985 else:
1976 1986 iend = index[endrev]
1977 1987 end = int(iend[0] >> 16) + iend[1]
1978 1988
1979 1989 if self._inline:
1980 1990 start += (startrev + 1) * self._io.size
1981 1991 end += (endrev + 1) * self._io.size
1982 1992 length = end - start
1983 1993
1984 1994 return start, self._getsegment(start, length, df=df)
1985 1995
1986 1996 def _chunk(self, rev, df=None):
1987 1997 """Obtain a single decompressed chunk for a revision.
1988 1998
1989 1999 Accepts an integer revision and an optional already-open file handle
1990 2000 to be used for reading. If used, the seek position of the file will not
1991 2001 be preserved.
1992 2002
1993 2003 Returns a str holding uncompressed data for the requested revision.
1994 2004 """
1995 2005 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1996 2006
1997 2007 def _chunks(self, revs, df=None, targetsize=None):
1998 2008 """Obtain decompressed chunks for the specified revisions.
1999 2009
2000 2010 Accepts an iterable of numeric revisions that are assumed to be in
2001 2011 ascending order. Also accepts an optional already-open file handle
2002 2012 to be used for reading. If used, the seek position of the file will
2003 2013 not be preserved.
2004 2014
2005 2015 This function is similar to calling ``self._chunk()`` multiple times,
2006 2016 but is faster.
2007 2017
2008 2018 Returns a list with decompressed data for each requested revision.
2009 2019 """
2010 2020 if not revs:
2011 2021 return []
2012 2022 start = self.start
2013 2023 length = self.length
2014 2024 inline = self._inline
2015 2025 iosize = self._io.size
2016 2026 buffer = util.buffer
2017 2027
2018 2028 l = []
2019 2029 ladd = l.append
2020 2030
2021 2031 if not self._withsparseread:
2022 2032 slicedchunks = (revs,)
2023 2033 else:
2024 2034 slicedchunks = _slicechunk(self, revs, targetsize=targetsize)
2025 2035
2026 2036 for revschunk in slicedchunks:
2027 2037 firstrev = revschunk[0]
2028 2038 # Skip trailing revisions with empty diff
2029 2039 for lastrev in revschunk[::-1]:
2030 2040 if length(lastrev) != 0:
2031 2041 break
2032 2042
2033 2043 try:
2034 2044 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
2035 2045 except OverflowError:
2036 2046 # issue4215 - we can't cache a run of chunks greater than
2037 2047 # 2G on Windows
2038 2048 return [self._chunk(rev, df=df) for rev in revschunk]
2039 2049
2040 2050 decomp = self.decompress
2041 2051 for rev in revschunk:
2042 2052 chunkstart = start(rev)
2043 2053 if inline:
2044 2054 chunkstart += (rev + 1) * iosize
2045 2055 chunklength = length(rev)
2046 2056 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
2047 2057
2048 2058 return l
2049 2059
2050 2060 def _chunkclear(self):
2051 2061 """Clear the raw chunk cache."""
2052 2062 self._chunkcache = (0, '')
2053 2063
2054 2064 def deltaparent(self, rev):
2055 2065 """return deltaparent of the given revision"""
2056 2066 base = self.index[rev][3]
2057 2067 if base == rev:
2058 2068 return nullrev
2059 2069 elif self._generaldelta:
2060 2070 return base
2061 2071 else:
2062 2072 return rev - 1
2063 2073
2064 2074 def revdiff(self, rev1, rev2):
2065 2075 """return or calculate a delta between two revisions
2066 2076
2067 2077 The delta calculated is in binary form and is intended to be written to
2068 2078 revlog data directly. So this function needs raw revision data.
2069 2079 """
2070 2080 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2071 2081 return bytes(self._chunk(rev2))
2072 2082
2073 2083 return mdiff.textdiff(self.revision(rev1, raw=True),
2074 2084 self.revision(rev2, raw=True))
2075 2085
2076 2086 def revision(self, nodeorrev, _df=None, raw=False):
2077 2087 """return an uncompressed revision of a given node or revision
2078 2088 number.
2079 2089
2080 2090 _df - an existing file handle to read from. (internal-only)
2081 2091 raw - an optional argument specifying if the revision data is to be
2082 2092 treated as raw data when applying flag transforms. 'raw' should be set
2083 2093 to True when generating changegroups or in debug commands.
2084 2094 """
2085 2095 if isinstance(nodeorrev, int):
2086 2096 rev = nodeorrev
2087 2097 node = self.node(rev)
2088 2098 else:
2089 2099 node = nodeorrev
2090 2100 rev = None
2091 2101
2092 2102 cachedrev = None
2093 2103 flags = None
2094 2104 rawtext = None
2095 2105 if node == nullid:
2096 2106 return ""
2097 2107 if self._cache:
2098 2108 if self._cache[0] == node:
2099 2109 # _cache only stores rawtext
2100 2110 if raw:
2101 2111 return self._cache[2]
2102 2112 # duplicated, but good for perf
2103 2113 if rev is None:
2104 2114 rev = self.rev(node)
2105 2115 if flags is None:
2106 2116 flags = self.flags(rev)
2107 2117 # no extra flags set, no flag processor runs, text = rawtext
2108 2118 if flags == REVIDX_DEFAULT_FLAGS:
2109 2119 return self._cache[2]
2110 2120 # rawtext is reusable. need to run flag processor
2111 2121 rawtext = self._cache[2]
2112 2122
2113 2123 cachedrev = self._cache[1]
2114 2124
2115 2125 # look up what we need to read
2116 2126 if rawtext is None:
2117 2127 if rev is None:
2118 2128 rev = self.rev(node)
2119 2129
2120 2130 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2121 2131 if stopped:
2122 2132 rawtext = self._cache[2]
2123 2133
2124 2134 # drop cache to save memory
2125 2135 self._cache = None
2126 2136
2127 2137 targetsize = None
2128 2138 rawsize = self.index[rev][2]
2129 2139 if 0 <= rawsize:
2130 2140 targetsize = 4 * rawsize
2131 2141
2132 2142 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2133 2143 if rawtext is None:
2134 2144 rawtext = bytes(bins[0])
2135 2145 bins = bins[1:]
2136 2146
2137 2147 rawtext = mdiff.patches(rawtext, bins)
2138 2148 self._cache = (node, rev, rawtext)
2139 2149
2140 2150 if flags is None:
2141 2151 if rev is None:
2142 2152 rev = self.rev(node)
2143 2153 flags = self.flags(rev)
2144 2154
2145 2155 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
2146 2156 if validatehash:
2147 2157 self.checkhash(text, node, rev=rev)
2148 2158
2149 2159 return text
2150 2160
2151 2161 def hash(self, text, p1, p2):
2152 2162 """Compute a node hash.
2153 2163
2154 2164 Available as a function so that subclasses can replace the hash
2155 2165 as needed.
2156 2166 """
2157 2167 return hash(text, p1, p2)
2158 2168
2159 2169 def _processflags(self, text, flags, operation, raw=False):
2160 2170 """Inspect revision data flags and applies transforms defined by
2161 2171 registered flag processors.
2162 2172
2163 2173 ``text`` - the revision data to process
2164 2174 ``flags`` - the revision flags
2165 2175 ``operation`` - the operation being performed (read or write)
2166 2176 ``raw`` - an optional argument describing if the raw transform should be
2167 2177 applied.
2168 2178
2169 2179 This method processes the flags in the order (or reverse order if
2170 2180 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
2171 2181 flag processors registered for present flags. The order of flags defined
2172 2182 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
2173 2183
2174 2184 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
2175 2185 processed text and ``validatehash`` is a bool indicating whether the
2176 2186 returned text should be checked for hash integrity.
2177 2187
2178 2188 Note: If the ``raw`` argument is set, it has precedence over the
2179 2189 operation and will only update the value of ``validatehash``.
2180 2190 """
2181 2191 # fast path: no flag processors will run
2182 2192 if flags == 0:
2183 2193 return text, True
2184 2194 if not operation in ('read', 'write'):
2185 2195 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
2186 2196 # Check all flags are known.
2187 2197 if flags & ~REVIDX_KNOWN_FLAGS:
2188 2198 raise RevlogError(_("incompatible revision flag '%#x'") %
2189 2199 (flags & ~REVIDX_KNOWN_FLAGS))
2190 2200 validatehash = True
2191 2201 # Depending on the operation (read or write), the order might be
2192 2202 # reversed due to non-commutative transforms.
2193 2203 orderedflags = REVIDX_FLAGS_ORDER
2194 2204 if operation == 'write':
2195 2205 orderedflags = reversed(orderedflags)
2196 2206
2197 2207 for flag in orderedflags:
2198 2208 # If a flagprocessor has been registered for a known flag, apply the
2199 2209 # related operation transform and update result tuple.
2200 2210 if flag & flags:
2201 2211 vhash = True
2202 2212
2203 2213 if flag not in _flagprocessors:
2204 2214 message = _("missing processor for flag '%#x'") % (flag)
2205 2215 raise RevlogError(message)
2206 2216
2207 2217 processor = _flagprocessors[flag]
2208 2218 if processor is not None:
2209 2219 readtransform, writetransform, rawtransform = processor
2210 2220
2211 2221 if raw:
2212 2222 vhash = rawtransform(self, text)
2213 2223 elif operation == 'read':
2214 2224 text, vhash = readtransform(self, text)
2215 2225 else: # write operation
2216 2226 text, vhash = writetransform(self, text)
2217 2227 validatehash = validatehash and vhash
2218 2228
2219 2229 return text, validatehash
2220 2230
2221 2231 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2222 2232 """Check node hash integrity.
2223 2233
2224 2234 Available as a function so that subclasses can extend hash mismatch
2225 2235 behaviors as needed.
2226 2236 """
2227 2237 try:
2228 2238 if p1 is None and p2 is None:
2229 2239 p1, p2 = self.parents(node)
2230 2240 if node != self.hash(text, p1, p2):
2231 2241 revornode = rev
2232 2242 if revornode is None:
2233 2243 revornode = templatefilters.short(hex(node))
2234 2244 raise RevlogError(_("integrity check failed on %s:%s")
2235 2245 % (self.indexfile, pycompat.bytestr(revornode)))
2236 2246 except RevlogError:
2237 2247 if self._censorable and _censoredtext(text):
2238 2248 raise error.CensoredNodeError(self.indexfile, node, text)
2239 2249 raise
2240 2250
2241 2251 def _enforceinlinesize(self, tr, fp=None):
2242 2252 """Check if the revlog is too big for inline and convert if so.
2243 2253
2244 2254 This should be called after revisions are added to the revlog. If the
2245 2255 revlog has grown too large to be an inline revlog, it will convert it
2246 2256 to use multiple index and data files.
2247 2257 """
2248 2258 tiprev = len(self) - 1
2249 2259 if (not self._inline or
2250 2260 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
2251 2261 return
2252 2262
2253 2263 trinfo = tr.find(self.indexfile)
2254 2264 if trinfo is None:
2255 2265 raise RevlogError(_("%s not found in the transaction")
2256 2266 % self.indexfile)
2257 2267
2258 2268 trindex = trinfo[2]
2259 2269 if trindex is not None:
2260 2270 dataoff = self.start(trindex)
2261 2271 else:
2262 2272 # revlog was stripped at start of transaction, use all leftover data
2263 2273 trindex = len(self) - 1
2264 2274 dataoff = self.end(tiprev)
2265 2275
2266 2276 tr.add(self.datafile, dataoff)
2267 2277
2268 2278 if fp:
2269 2279 fp.flush()
2270 2280 fp.close()
2271 2281
2272 2282 with self._datafp('w') as df:
2273 2283 for r in self:
2274 2284 df.write(self._getsegmentforrevs(r, r)[1])
2275 2285
2276 2286 with self._indexfp('w') as fp:
2277 2287 self.version &= ~FLAG_INLINE_DATA
2278 2288 self._inline = False
2279 2289 io = self._io
2280 2290 for i in self:
2281 2291 e = io.packentry(self.index[i], self.node, self.version, i)
2282 2292 fp.write(e)
2283 2293
2284 2294 # the temp file replace the real index when we exit the context
2285 2295 # manager
2286 2296
2287 2297 tr.replace(self.indexfile, trindex * self._io.size)
2288 2298 self._chunkclear()
2289 2299
2290 2300 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
2291 2301 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
2292 2302 """add a revision to the log
2293 2303
2294 2304 text - the revision data to add
2295 2305 transaction - the transaction object used for rollback
2296 2306 link - the linkrev data to add
2297 2307 p1, p2 - the parent nodeids of the revision
2298 2308 cachedelta - an optional precomputed delta
2299 2309 node - nodeid of revision; typically node is not specified, and it is
2300 2310 computed by default as hash(text, p1, p2), however subclasses might
2301 2311 use different hashing method (and override checkhash() in such case)
2302 2312 flags - the known flags to set on the revision
2303 2313 deltacomputer - an optional _deltacomputer instance shared between
2304 2314 multiple calls
2305 2315 """
2306 2316 if link == nullrev:
2307 2317 raise RevlogError(_("attempted to add linkrev -1 to %s")
2308 2318 % self.indexfile)
2309 2319
2310 2320 if flags:
2311 2321 node = node or self.hash(text, p1, p2)
2312 2322
2313 2323 rawtext, validatehash = self._processflags(text, flags, 'write')
2314 2324
2315 2325 # If the flag processor modifies the revision data, ignore any provided
2316 2326 # cachedelta.
2317 2327 if rawtext != text:
2318 2328 cachedelta = None
2319 2329
2320 2330 if len(rawtext) > _maxentrysize:
2321 2331 raise RevlogError(
2322 2332 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
2323 2333 % (self.indexfile, len(rawtext)))
2324 2334
2325 2335 node = node or self.hash(rawtext, p1, p2)
2326 2336 if node in self.nodemap:
2327 2337 return node
2328 2338
2329 2339 if validatehash:
2330 2340 self.checkhash(rawtext, node, p1=p1, p2=p2)
2331 2341
2332 2342 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
2333 2343 flags, cachedelta=cachedelta,
2334 2344 deltacomputer=deltacomputer)
2335 2345
2336 2346 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
2337 2347 cachedelta=None, deltacomputer=None):
2338 2348 """add a raw revision with known flags, node and parents
2339 2349 useful when reusing a revision not stored in this revlog (ex: received
2340 2350 over wire, or read from an external bundle).
2341 2351 """
2342 2352 dfh = None
2343 2353 if not self._inline:
2344 2354 dfh = self._datafp("a+")
2345 2355 ifh = self._indexfp("a+")
2346 2356 try:
2347 2357 return self._addrevision(node, rawtext, transaction, link, p1, p2,
2348 2358 flags, cachedelta, ifh, dfh,
2349 2359 deltacomputer=deltacomputer)
2350 2360 finally:
2351 2361 if dfh:
2352 2362 dfh.close()
2353 2363 ifh.close()
2354 2364
2355 2365 def compress(self, data):
2356 2366 """Generate a possibly-compressed representation of data."""
2357 2367 if not data:
2358 2368 return '', data
2359 2369
2360 2370 compressed = self._compressor.compress(data)
2361 2371
2362 2372 if compressed:
2363 2373 # The revlog compressor added the header in the returned data.
2364 2374 return '', compressed
2365 2375
2366 2376 if data[0:1] == '\0':
2367 2377 return '', data
2368 2378 return 'u', data
2369 2379
2370 2380 def decompress(self, data):
2371 2381 """Decompress a revlog chunk.
2372 2382
2373 2383 The chunk is expected to begin with a header identifying the
2374 2384 format type so it can be routed to an appropriate decompressor.
2375 2385 """
2376 2386 if not data:
2377 2387 return data
2378 2388
2379 2389 # Revlogs are read much more frequently than they are written and many
2380 2390 # chunks only take microseconds to decompress, so performance is
2381 2391 # important here.
2382 2392 #
2383 2393 # We can make a few assumptions about revlogs:
2384 2394 #
2385 2395 # 1) the majority of chunks will be compressed (as opposed to inline
2386 2396 # raw data).
2387 2397 # 2) decompressing *any* data will likely by at least 10x slower than
2388 2398 # returning raw inline data.
2389 2399 # 3) we want to prioritize common and officially supported compression
2390 2400 # engines
2391 2401 #
2392 2402 # It follows that we want to optimize for "decompress compressed data
2393 2403 # when encoded with common and officially supported compression engines"
2394 2404 # case over "raw data" and "data encoded by less common or non-official
2395 2405 # compression engines." That is why we have the inline lookup first
2396 2406 # followed by the compengines lookup.
2397 2407 #
2398 2408 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2399 2409 # compressed chunks. And this matters for changelog and manifest reads.
2400 2410 t = data[0:1]
2401 2411
2402 2412 if t == 'x':
2403 2413 try:
2404 2414 return _zlibdecompress(data)
2405 2415 except zlib.error as e:
2406 2416 raise RevlogError(_('revlog decompress error: %s') %
2407 2417 stringutil.forcebytestr(e))
2408 2418 # '\0' is more common than 'u' so it goes first.
2409 2419 elif t == '\0':
2410 2420 return data
2411 2421 elif t == 'u':
2412 2422 return util.buffer(data, 1)
2413 2423
2414 2424 try:
2415 2425 compressor = self._decompressors[t]
2416 2426 except KeyError:
2417 2427 try:
2418 2428 engine = util.compengines.forrevlogheader(t)
2419 2429 compressor = engine.revlogcompressor()
2420 2430 self._decompressors[t] = compressor
2421 2431 except KeyError:
2422 2432 raise RevlogError(_('unknown compression type %r') % t)
2423 2433
2424 2434 return compressor.decompress(data)
2425 2435
2426 2436 def _isgooddeltainfo(self, deltainfo, revinfo):
2427 2437 """Returns True if the given delta is good. Good means that it is within
2428 2438 the disk span, disk size, and chain length bounds that we know to be
2429 2439 performant."""
2430 2440 if deltainfo is None:
2431 2441 return False
2432 2442
2433 2443 # - 'deltainfo.distance' is the distance from the base revision --
2434 2444 # bounding it limits the amount of I/O we need to do.
2435 2445 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
2436 2446 # deltas we need to apply -- bounding it limits the amount of CPU
2437 2447 # we consume.
2438 2448
2439 2449 if self._sparserevlog:
2440 2450 # As sparse-read will be used, we can consider that the distance,
2441 2451 # instead of being the span of the whole chunk,
2442 2452 # is the span of the largest read chunk
2443 2453 base = deltainfo.base
2444 2454
2445 2455 if base != nullrev:
2446 2456 deltachain = self._deltachain(base)[0]
2447 2457 else:
2448 2458 deltachain = []
2449 2459
2450 2460 chunks = _slicechunk(self, deltachain, deltainfo)
2451 2461 distance = max(map(lambda revs:_segmentspan(self, revs), chunks))
2452 2462 else:
2453 2463 distance = deltainfo.distance
2454 2464
2455 2465 textlen = revinfo.textlen
2456 2466 defaultmax = textlen * 4
2457 2467 maxdist = self._maxdeltachainspan
2458 2468 if not maxdist:
2459 2469 maxdist = distance # ensure the conditional pass
2460 2470 maxdist = max(maxdist, defaultmax)
2461 2471 if self._sparserevlog and maxdist < self._srmingapsize:
2462 2472 # In multiple place, we are ignoring irrelevant data range below a
2463 2473 # certain size. Be also apply this tradeoff here and relax span
2464 2474 # constraint for small enought content.
2465 2475 maxdist = self._srmingapsize
2466 2476 if (distance > maxdist or deltainfo.deltalen > textlen or
2467 2477 deltainfo.compresseddeltalen > textlen * 2 or
2468 2478 (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)):
2469 2479 return False
2470 2480
2471 2481 return True
2472 2482
2473 2483 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2474 2484 cachedelta, ifh, dfh, alwayscache=False,
2475 2485 deltacomputer=None):
2476 2486 """internal function to add revisions to the log
2477 2487
2478 2488 see addrevision for argument descriptions.
2479 2489
2480 2490 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2481 2491
2482 2492 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2483 2493 be used.
2484 2494
2485 2495 invariants:
2486 2496 - rawtext is optional (can be None); if not set, cachedelta must be set.
2487 2497 if both are set, they must correspond to each other.
2488 2498 """
2489 2499 if node == nullid:
2490 2500 raise RevlogError(_("%s: attempt to add null revision") %
2491 2501 (self.indexfile))
2492 2502 if node == wdirid or node in wdirfilenodeids:
2493 2503 raise RevlogError(_("%s: attempt to add wdir revision") %
2494 2504 (self.indexfile))
2495 2505
2496 2506 if self._inline:
2497 2507 fh = ifh
2498 2508 else:
2499 2509 fh = dfh
2500 2510
2501 2511 btext = [rawtext]
2502 2512
2503 2513 curr = len(self)
2504 2514 prev = curr - 1
2505 2515 offset = self.end(prev)
2506 2516 p1r, p2r = self.rev(p1), self.rev(p2)
2507 2517
2508 2518 # full versions are inserted when the needed deltas
2509 2519 # become comparable to the uncompressed text
2510 2520 if rawtext is None:
2511 2521 # need rawtext size, before changed by flag processors, which is
2512 2522 # the non-raw size. use revlog explicitly to avoid filelog's extra
2513 2523 # logic that might remove metadata size.
2514 2524 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2515 2525 cachedelta[1])
2516 2526 else:
2517 2527 textlen = len(rawtext)
2518 2528
2519 2529 if deltacomputer is None:
2520 2530 deltacomputer = _deltacomputer(self)
2521 2531
2522 2532 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2523 2533
2524 2534 # no delta for flag processor revision (see "candelta" for why)
2525 2535 # not calling candelta since only one revision needs test, also to
2526 2536 # avoid overhead fetching flags again.
2527 2537 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2528 2538 deltainfo = None
2529 2539 else:
2530 2540 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2531 2541
2532 2542 if deltainfo is not None:
2533 2543 base = deltainfo.base
2534 2544 chainbase = deltainfo.chainbase
2535 2545 data = deltainfo.data
2536 2546 l = deltainfo.deltalen
2537 2547 else:
2538 2548 rawtext = deltacomputer.buildtext(revinfo, fh)
2539 2549 data = self.compress(rawtext)
2540 2550 l = len(data[1]) + len(data[0])
2541 2551 base = chainbase = curr
2542 2552
2543 2553 e = (offset_type(offset, flags), l, textlen,
2544 2554 base, link, p1r, p2r, node)
2545 2555 self.index.append(e)
2546 2556 self.nodemap[node] = curr
2547 2557
2548 2558 entry = self._io.packentry(e, self.node, self.version, curr)
2549 2559 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2550 2560
2551 2561 if alwayscache and rawtext is None:
2552 2562 rawtext = deltacomputer._buildtext(revinfo, fh)
2553 2563
2554 2564 if type(rawtext) == bytes: # only accept immutable objects
2555 2565 self._cache = (node, curr, rawtext)
2556 2566 self._chainbasecache[curr] = chainbase
2557 2567 return node
2558 2568
2559 2569 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2560 2570 # Files opened in a+ mode have inconsistent behavior on various
2561 2571 # platforms. Windows requires that a file positioning call be made
2562 2572 # when the file handle transitions between reads and writes. See
2563 2573 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2564 2574 # platforms, Python or the platform itself can be buggy. Some versions
2565 2575 # of Solaris have been observed to not append at the end of the file
2566 2576 # if the file was seeked to before the end. See issue4943 for more.
2567 2577 #
2568 2578 # We work around this issue by inserting a seek() before writing.
2569 2579 # Note: This is likely not necessary on Python 3.
2570 2580 ifh.seek(0, os.SEEK_END)
2571 2581 if dfh:
2572 2582 dfh.seek(0, os.SEEK_END)
2573 2583
2574 2584 curr = len(self) - 1
2575 2585 if not self._inline:
2576 2586 transaction.add(self.datafile, offset)
2577 2587 transaction.add(self.indexfile, curr * len(entry))
2578 2588 if data[0]:
2579 2589 dfh.write(data[0])
2580 2590 dfh.write(data[1])
2581 2591 ifh.write(entry)
2582 2592 else:
2583 2593 offset += curr * self._io.size
2584 2594 transaction.add(self.indexfile, offset, curr)
2585 2595 ifh.write(entry)
2586 2596 ifh.write(data[0])
2587 2597 ifh.write(data[1])
2588 2598 self._enforceinlinesize(transaction, ifh)
2589 2599
2590 2600 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2591 2601 """
2592 2602 add a delta group
2593 2603
2594 2604 given a set of deltas, add them to the revision log. the
2595 2605 first delta is against its parent, which should be in our
2596 2606 log, the rest are against the previous delta.
2597 2607
2598 2608 If ``addrevisioncb`` is defined, it will be called with arguments of
2599 2609 this revlog and the node that was added.
2600 2610 """
2601 2611
2602 2612 nodes = []
2603 2613
2604 2614 r = len(self)
2605 2615 end = 0
2606 2616 if r:
2607 2617 end = self.end(r - 1)
2608 2618 ifh = self._indexfp("a+")
2609 2619 isize = r * self._io.size
2610 2620 if self._inline:
2611 2621 transaction.add(self.indexfile, end + isize, r)
2612 2622 dfh = None
2613 2623 else:
2614 2624 transaction.add(self.indexfile, isize, r)
2615 2625 transaction.add(self.datafile, end)
2616 2626 dfh = self._datafp("a+")
2617 2627 def flush():
2618 2628 if dfh:
2619 2629 dfh.flush()
2620 2630 ifh.flush()
2621 2631 try:
2622 2632 deltacomputer = _deltacomputer(self)
2623 2633 # loop through our set of deltas
2624 2634 for data in deltas:
2625 2635 node, p1, p2, linknode, deltabase, delta, flags = data
2626 2636 link = linkmapper(linknode)
2627 2637 flags = flags or REVIDX_DEFAULT_FLAGS
2628 2638
2629 2639 nodes.append(node)
2630 2640
2631 2641 if node in self.nodemap:
2632 2642 # this can happen if two branches make the same change
2633 2643 continue
2634 2644
2635 2645 for p in (p1, p2):
2636 2646 if p not in self.nodemap:
2637 2647 raise LookupError(p, self.indexfile,
2638 2648 _('unknown parent'))
2639 2649
2640 2650 if deltabase not in self.nodemap:
2641 2651 raise LookupError(deltabase, self.indexfile,
2642 2652 _('unknown delta base'))
2643 2653
2644 2654 baserev = self.rev(deltabase)
2645 2655
2646 2656 if baserev != nullrev and self.iscensored(baserev):
2647 2657 # if base is censored, delta must be full replacement in a
2648 2658 # single patch operation
2649 2659 hlen = struct.calcsize(">lll")
2650 2660 oldlen = self.rawsize(baserev)
2651 2661 newlen = len(delta) - hlen
2652 2662 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2653 2663 raise error.CensoredBaseError(self.indexfile,
2654 2664 self.node(baserev))
2655 2665
2656 2666 if not flags and self._peek_iscensored(baserev, delta, flush):
2657 2667 flags |= REVIDX_ISCENSORED
2658 2668
2659 2669 # We assume consumers of addrevisioncb will want to retrieve
2660 2670 # the added revision, which will require a call to
2661 2671 # revision(). revision() will fast path if there is a cache
2662 2672 # hit. So, we tell _addrevision() to always cache in this case.
2663 2673 # We're only using addgroup() in the context of changegroup
2664 2674 # generation so the revision data can always be handled as raw
2665 2675 # by the flagprocessor.
2666 2676 self._addrevision(node, None, transaction, link,
2667 2677 p1, p2, flags, (baserev, delta),
2668 2678 ifh, dfh,
2669 2679 alwayscache=bool(addrevisioncb),
2670 2680 deltacomputer=deltacomputer)
2671 2681
2672 2682 if addrevisioncb:
2673 2683 addrevisioncb(self, node)
2674 2684
2675 2685 if not dfh and not self._inline:
2676 2686 # addrevision switched from inline to conventional
2677 2687 # reopen the index
2678 2688 ifh.close()
2679 2689 dfh = self._datafp("a+")
2680 2690 ifh = self._indexfp("a+")
2681 2691 finally:
2682 2692 if dfh:
2683 2693 dfh.close()
2684 2694 ifh.close()
2685 2695
2686 2696 return nodes
2687 2697
2688 2698 def iscensored(self, rev):
2689 2699 """Check if a file revision is censored."""
2690 2700 if not self._censorable:
2691 2701 return False
2692 2702
2693 2703 return self.flags(rev) & REVIDX_ISCENSORED
2694 2704
2695 2705 def _peek_iscensored(self, baserev, delta, flush):
2696 2706 """Quickly check if a delta produces a censored revision."""
2697 2707 if not self._censorable:
2698 2708 return False
2699 2709
2700 2710 # Fragile heuristic: unless new file meta keys are added alphabetically
2701 2711 # preceding "censored", all censored revisions are prefixed by
2702 2712 # "\1\ncensored:". A delta producing such a censored revision must be a
2703 2713 # full-replacement delta, so we inspect the first and only patch in the
2704 2714 # delta for this prefix.
2705 2715 hlen = struct.calcsize(">lll")
2706 2716 if len(delta) <= hlen:
2707 2717 return False
2708 2718
2709 2719 oldlen = self.rawsize(baserev)
2710 2720 newlen = len(delta) - hlen
2711 2721 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2712 2722 return False
2713 2723
2714 2724 add = "\1\ncensored:"
2715 2725 addlen = len(add)
2716 2726 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2717 2727
2718 2728 def getstrippoint(self, minlink):
2719 2729 """find the minimum rev that must be stripped to strip the linkrev
2720 2730
2721 2731 Returns a tuple containing the minimum rev and a set of all revs that
2722 2732 have linkrevs that will be broken by this strip.
2723 2733 """
2724 2734 brokenrevs = set()
2725 2735 strippoint = len(self)
2726 2736
2727 2737 heads = {}
2728 2738 futurelargelinkrevs = set()
2729 2739 for head in self.headrevs():
2730 2740 headlinkrev = self.linkrev(head)
2731 2741 heads[head] = headlinkrev
2732 2742 if headlinkrev >= minlink:
2733 2743 futurelargelinkrevs.add(headlinkrev)
2734 2744
2735 2745 # This algorithm involves walking down the rev graph, starting at the
2736 2746 # heads. Since the revs are topologically sorted according to linkrev,
2737 2747 # once all head linkrevs are below the minlink, we know there are
2738 2748 # no more revs that could have a linkrev greater than minlink.
2739 2749 # So we can stop walking.
2740 2750 while futurelargelinkrevs:
2741 2751 strippoint -= 1
2742 2752 linkrev = heads.pop(strippoint)
2743 2753
2744 2754 if linkrev < minlink:
2745 2755 brokenrevs.add(strippoint)
2746 2756 else:
2747 2757 futurelargelinkrevs.remove(linkrev)
2748 2758
2749 2759 for p in self.parentrevs(strippoint):
2750 2760 if p != nullrev:
2751 2761 plinkrev = self.linkrev(p)
2752 2762 heads[p] = plinkrev
2753 2763 if plinkrev >= minlink:
2754 2764 futurelargelinkrevs.add(plinkrev)
2755 2765
2756 2766 return strippoint, brokenrevs
2757 2767
2758 2768 def strip(self, minlink, transaction):
2759 2769 """truncate the revlog on the first revision with a linkrev >= minlink
2760 2770
2761 2771 This function is called when we're stripping revision minlink and
2762 2772 its descendants from the repository.
2763 2773
2764 2774 We have to remove all revisions with linkrev >= minlink, because
2765 2775 the equivalent changelog revisions will be renumbered after the
2766 2776 strip.
2767 2777
2768 2778 So we truncate the revlog on the first of these revisions, and
2769 2779 trust that the caller has saved the revisions that shouldn't be
2770 2780 removed and that it'll re-add them after this truncation.
2771 2781 """
2772 2782 if len(self) == 0:
2773 2783 return
2774 2784
2775 2785 rev, _ = self.getstrippoint(minlink)
2776 2786 if rev == len(self):
2777 2787 return
2778 2788
2779 2789 # first truncate the files on disk
2780 2790 end = self.start(rev)
2781 2791 if not self._inline:
2782 2792 transaction.add(self.datafile, end)
2783 2793 end = rev * self._io.size
2784 2794 else:
2785 2795 end += rev * self._io.size
2786 2796
2787 2797 transaction.add(self.indexfile, end)
2788 2798
2789 2799 # then reset internal state in memory to forget those revisions
2790 2800 self._cache = None
2791 2801 self._chaininfocache = {}
2792 2802 self._chunkclear()
2793 2803 for x in pycompat.xrange(rev, len(self)):
2794 2804 del self.nodemap[self.node(x)]
2795 2805
2796 2806 del self.index[rev:-1]
2797 2807 self._nodepos = None
2798 2808
2799 2809 def checksize(self):
2800 2810 expected = 0
2801 2811 if len(self):
2802 2812 expected = max(0, self.end(len(self) - 1))
2803 2813
2804 2814 try:
2805 2815 with self._datafp() as f:
2806 2816 f.seek(0, 2)
2807 2817 actual = f.tell()
2808 2818 dd = actual - expected
2809 2819 except IOError as inst:
2810 2820 if inst.errno != errno.ENOENT:
2811 2821 raise
2812 2822 dd = 0
2813 2823
2814 2824 try:
2815 2825 f = self.opener(self.indexfile)
2816 2826 f.seek(0, 2)
2817 2827 actual = f.tell()
2818 2828 f.close()
2819 2829 s = self._io.size
2820 2830 i = max(0, actual // s)
2821 2831 di = actual - (i * s)
2822 2832 if self._inline:
2823 2833 databytes = 0
2824 2834 for r in self:
2825 2835 databytes += max(0, self.length(r))
2826 2836 dd = 0
2827 2837 di = actual - len(self) * s - databytes
2828 2838 except IOError as inst:
2829 2839 if inst.errno != errno.ENOENT:
2830 2840 raise
2831 2841 di = 0
2832 2842
2833 2843 return (dd, di)
2834 2844
2835 2845 def files(self):
2836 2846 res = [self.indexfile]
2837 2847 if not self._inline:
2838 2848 res.append(self.datafile)
2839 2849 return res
2840 2850
2841 2851 DELTAREUSEALWAYS = 'always'
2842 2852 DELTAREUSESAMEREVS = 'samerevs'
2843 2853 DELTAREUSENEVER = 'never'
2844 2854
2845 2855 DELTAREUSEFULLADD = 'fulladd'
2846 2856
2847 2857 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2848 2858
2849 2859 def clone(self, tr, destrevlog, addrevisioncb=None,
2850 2860 deltareuse=DELTAREUSESAMEREVS, deltabothparents=None):
2851 2861 """Copy this revlog to another, possibly with format changes.
2852 2862
2853 2863 The destination revlog will contain the same revisions and nodes.
2854 2864 However, it may not be bit-for-bit identical due to e.g. delta encoding
2855 2865 differences.
2856 2866
2857 2867 The ``deltareuse`` argument control how deltas from the existing revlog
2858 2868 are preserved in the destination revlog. The argument can have the
2859 2869 following values:
2860 2870
2861 2871 DELTAREUSEALWAYS
2862 2872 Deltas will always be reused (if possible), even if the destination
2863 2873 revlog would not select the same revisions for the delta. This is the
2864 2874 fastest mode of operation.
2865 2875 DELTAREUSESAMEREVS
2866 2876 Deltas will be reused if the destination revlog would pick the same
2867 2877 revisions for the delta. This mode strikes a balance between speed
2868 2878 and optimization.
2869 2879 DELTAREUSENEVER
2870 2880 Deltas will never be reused. This is the slowest mode of execution.
2871 2881 This mode can be used to recompute deltas (e.g. if the diff/delta
2872 2882 algorithm changes).
2873 2883
2874 2884 Delta computation can be slow, so the choice of delta reuse policy can
2875 2885 significantly affect run time.
2876 2886
2877 2887 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2878 2888 two extremes. Deltas will be reused if they are appropriate. But if the
2879 2889 delta could choose a better revision, it will do so. This means if you
2880 2890 are converting a non-generaldelta revlog to a generaldelta revlog,
2881 2891 deltas will be recomputed if the delta's parent isn't a parent of the
2882 2892 revision.
2883 2893
2884 2894 In addition to the delta policy, the ``deltabothparents`` argument
2885 2895 controls whether to compute deltas against both parents for merges.
2886 2896 By default, the current default is used.
2887 2897 """
2888 2898 if deltareuse not in self.DELTAREUSEALL:
2889 2899 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2890 2900
2891 2901 if len(destrevlog):
2892 2902 raise ValueError(_('destination revlog is not empty'))
2893 2903
2894 2904 if getattr(self, 'filteredrevs', None):
2895 2905 raise ValueError(_('source revlog has filtered revisions'))
2896 2906 if getattr(destrevlog, 'filteredrevs', None):
2897 2907 raise ValueError(_('destination revlog has filtered revisions'))
2898 2908
2899 2909 # lazydeltabase controls whether to reuse a cached delta, if possible.
2900 2910 oldlazydeltabase = destrevlog._lazydeltabase
2901 2911 oldamd = destrevlog._deltabothparents
2902 2912
2903 2913 try:
2904 2914 if deltareuse == self.DELTAREUSEALWAYS:
2905 2915 destrevlog._lazydeltabase = True
2906 2916 elif deltareuse == self.DELTAREUSESAMEREVS:
2907 2917 destrevlog._lazydeltabase = False
2908 2918
2909 2919 destrevlog._deltabothparents = deltabothparents or oldamd
2910 2920
2911 2921 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2912 2922 self.DELTAREUSESAMEREVS)
2913 2923
2914 2924 deltacomputer = _deltacomputer(destrevlog)
2915 2925 index = self.index
2916 2926 for rev in self:
2917 2927 entry = index[rev]
2918 2928
2919 2929 # Some classes override linkrev to take filtered revs into
2920 2930 # account. Use raw entry from index.
2921 2931 flags = entry[0] & 0xffff
2922 2932 linkrev = entry[4]
2923 2933 p1 = index[entry[5]][7]
2924 2934 p2 = index[entry[6]][7]
2925 2935 node = entry[7]
2926 2936
2927 2937 # (Possibly) reuse the delta from the revlog if allowed and
2928 2938 # the revlog chunk is a delta.
2929 2939 cachedelta = None
2930 2940 rawtext = None
2931 2941 if populatecachedelta:
2932 2942 dp = self.deltaparent(rev)
2933 2943 if dp != nullrev:
2934 2944 cachedelta = (dp, bytes(self._chunk(rev)))
2935 2945
2936 2946 if not cachedelta:
2937 2947 rawtext = self.revision(rev, raw=True)
2938 2948
2939 2949
2940 2950 if deltareuse == self.DELTAREUSEFULLADD:
2941 2951 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2942 2952 cachedelta=cachedelta,
2943 2953 node=node, flags=flags,
2944 2954 deltacomputer=deltacomputer)
2945 2955 else:
2946 2956 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2947 2957 checkambig=False)
2948 2958 dfh = None
2949 2959 if not destrevlog._inline:
2950 2960 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2951 2961 try:
2952 2962 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2953 2963 p2, flags, cachedelta, ifh, dfh,
2954 2964 deltacomputer=deltacomputer)
2955 2965 finally:
2956 2966 if dfh:
2957 2967 dfh.close()
2958 2968 ifh.close()
2959 2969
2960 2970 if addrevisioncb:
2961 2971 addrevisioncb(self, rev, node)
2962 2972 finally:
2963 2973 destrevlog._lazydeltabase = oldlazydeltabase
2964 2974 destrevlog._deltabothparents = oldamd
@@ -1,402 +1,402
1 1 #require no-reposimplestore
2 2
3 3 Check whether size of generaldelta revlog is not bigger than its
4 4 regular equivalent. Test would fail if generaldelta was naive
5 5 implementation of parentdelta: third manifest revision would be fully
6 6 inserted due to big distance from its paren revision (zero).
7 7
8 8 $ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no
9 9 $ cd repo
10 10 $ echo foo > foo
11 11 $ echo bar > bar
12 12 $ echo baz > baz
13 13 $ hg commit -q -Am boo
14 14 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
15 15 $ for r in 1 2 3; do
16 16 > echo $r > foo
17 17 > hg commit -q -m $r
18 18 > hg up -q -r 0
19 19 > hg pull . -q -r $r -R ../gdrepo
20 20 > done
21 21
22 22 $ cd ..
23 23 >>> from __future__ import print_function
24 24 >>> import os
25 25 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
26 26 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
27 27 >>> if regsize < gdsize:
28 28 ... print('generaldata increased size of manifest')
29 29
30 30 Verify rev reordering doesnt create invalid bundles (issue4462)
31 31 This requires a commit tree that when pulled will reorder manifest revs such
32 32 that the second manifest to create a file rev will be ordered before the first
33 33 manifest to create that file rev. We also need to do a partial pull to ensure
34 34 reordering happens. At the end we verify the linkrev points at the earliest
35 35 commit.
36 36
37 37 $ hg init server --config format.generaldelta=True
38 38 $ cd server
39 39 $ touch a
40 40 $ hg commit -Aqm a
41 41 $ echo x > x
42 42 $ echo y > y
43 43 $ hg commit -Aqm xy
44 44 $ hg up -q '.^'
45 45 $ echo x > x
46 46 $ echo z > z
47 47 $ hg commit -Aqm xz
48 48 $ hg up -q 1
49 49 $ echo b > b
50 50 $ hg commit -Aqm b
51 51 $ hg merge -q 2
52 52 $ hg commit -Aqm merge
53 53 $ echo c > c
54 54 $ hg commit -Aqm c
55 55 $ hg log -G -T '{rev} {shortest(node)} {desc}'
56 56 @ 5 ebb8 c
57 57 |
58 58 o 4 baf7 merge
59 59 |\
60 60 | o 3 a129 b
61 61 | |
62 62 o | 2 958c xz
63 63 | |
64 64 | o 1 f00c xy
65 65 |/
66 66 o 0 3903 a
67 67
68 68 $ cd ..
69 69 $ hg init client --config format.generaldelta=false --config format.usegeneraldelta=false
70 70 $ cd client
71 71 $ hg pull -q ../server -r 4
72 72 $ hg debugdeltachain x
73 73 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
74 74 0 1 1 -1 base 3 2 3 1.50000 3 0 0.00000
75 75
76 76 $ cd ..
77 77
78 78 Test "usegeneraldelta" config
79 79 (repo are general delta, but incoming bundle are not re-deltafied)
80 80
81 81 delta coming from the server base delta server are not recompressed.
82 82 (also include the aggressive version for comparison)
83 83
84 84 $ hg clone repo --pull --config format.usegeneraldelta=1 usegd
85 85 requesting all changes
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 4 changesets with 6 changes to 3 files (+2 heads)
90 90 new changesets 0ea3fcf9d01d:bba78d330d9c
91 91 updating to branch default
92 92 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 93 $ hg clone repo --pull --config format.generaldelta=1 full
94 94 requesting all changes
95 95 adding changesets
96 96 adding manifests
97 97 adding file changes
98 98 added 4 changesets with 6 changes to 3 files (+2 heads)
99 99 new changesets 0ea3fcf9d01d:bba78d330d9c
100 100 updating to branch default
101 101 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 102 $ hg -R repo debugdeltachain -m
103 103 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
104 104 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000
105 105 1 1 2 0 prev 57 135 161 1.19259 161 0 0.00000
106 106 2 1 3 1 prev 57 135 218 1.61481 218 0 0.00000
107 107 3 2 1 -1 base 104 135 104 0.77037 104 0 0.00000
108 108 $ hg -R usegd debugdeltachain -m
109 109 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
110 110 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000
111 111 1 1 2 0 p1 57 135 161 1.19259 161 0 0.00000
112 112 2 1 3 1 prev 57 135 218 1.61481 218 0 0.00000
113 113 3 1 2 0 p1 57 135 161 1.19259 275 114 0.70807
114 114 $ hg -R full debugdeltachain -m
115 115 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
116 116 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000
117 117 1 1 2 0 p1 57 135 161 1.19259 161 0 0.00000
118 118 2 1 2 0 p1 57 135 161 1.19259 218 57 0.35404
119 119 3 1 2 0 p1 57 135 161 1.19259 275 114 0.70807
120 120
121 121 Test revlog.optimize-delta-parent-choice
122 122
123 123 $ hg init --config format.generaldelta=1 aggressive
124 124 $ cd aggressive
125 125 $ cat << EOF >> .hg/hgrc
126 126 > [format]
127 127 > generaldelta = 1
128 128 > EOF
129 129 $ touch a b c d e
130 130 $ hg commit -Aqm side1
131 131 $ hg up -q null
132 132 $ touch x y
133 133 $ hg commit -Aqm side2
134 134
135 135 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
136 136 $ hg merge -q 0
137 137 $ hg commit -q -m merge
138 138 $ hg debugdeltachain -m
139 139 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
140 140 0 1 1 -1 base 59 215 59 0.27442 59 0 0.00000
141 141 1 1 2 0 prev 61 86 120 1.39535 120 0 0.00000
142 142 2 1 2 0 p2 62 301 121 0.40199 182 61 0.50413
143 143
144 144 $ hg strip -q -r . --config extensions.strip=
145 145
146 146 - Verify aggressive merge uses p2 (commit 0) as delta parent
147 147 $ hg up -q -C 1
148 148 $ hg merge -q 0
149 149 $ hg commit -q -m merge --config storage.revlog.optimize-delta-parent-choice=yes
150 150 $ hg debugdeltachain -m
151 151 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
152 152 0 1 1 -1 base 59 215 59 0.27442 59 0 0.00000
153 153 1 1 2 0 prev 61 86 120 1.39535 120 0 0.00000
154 154 2 1 2 0 p2 62 301 121 0.40199 182 61 0.50413
155 155
156 156 Test that strip bundle use bundle2
157 157 $ hg --config extensions.strip= strip .
158 158 0 files updated, 0 files merged, 5 files removed, 0 files unresolved
159 159 saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg
160 160 $ hg debugbundle .hg/strip-backup/*
161 161 Stream params: {Compression: BZ}
162 162 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
163 163 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
164 164 cache:rev-branch-cache -- {} (mandatory: False)
165 165 phase-heads -- {} (mandatory: True)
166 166 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9 draft
167 167
168 168 $ cd ..
169 169
170 170 test maxdeltachainspan
171 171
172 172 $ hg init source-repo
173 173 $ cd source-repo
174 174 $ hg debugbuilddag --new-file '.+5:brancha$.+11:branchb$.+30:branchc<brancha+2<branchb+2'
175 175 # add an empty revision somewhere
176 176 $ hg up tip
177 177 14 files updated, 0 files merged, 0 files removed, 0 files unresolved
178 178 $ hg rm .
179 179 removing nf10
180 180 removing nf11
181 181 removing nf12
182 182 removing nf13
183 183 removing nf14
184 184 removing nf15
185 185 removing nf16
186 186 removing nf17
187 187 removing nf51
188 188 removing nf52
189 189 removing nf6
190 190 removing nf7
191 191 removing nf8
192 192 removing nf9
193 193 $ hg commit -m 'empty all'
194 194 $ hg revert --all --rev 'p1(.)'
195 195 adding nf10
196 196 adding nf11
197 197 adding nf12
198 198 adding nf13
199 199 adding nf14
200 200 adding nf15
201 201 adding nf16
202 202 adding nf17
203 203 adding nf51
204 204 adding nf52
205 205 adding nf6
206 206 adding nf7
207 207 adding nf8
208 208 adding nf9
209 209 $ hg commit -m 'restore all'
210 210 $ hg up null
211 211 0 files updated, 0 files merged, 14 files removed, 0 files unresolved
212 212 $
213 213 $ cd ..
214 214 $ hg -R source-repo debugdeltachain -m
215 215 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
216 216 0 1 1 -1 base 46 45 46 1.02222 46 0 0.00000
217 217 1 1 2 0 p1 57 90 103 1.14444 103 0 0.00000
218 218 2 1 3 1 p1 57 135 160 1.18519 160 0 0.00000
219 219 3 1 4 2 p1 57 180 217 1.20556 217 0 0.00000
220 220 4 1 5 3 p1 57 225 274 1.21778 274 0 0.00000
221 221 5 1 6 4 p1 57 270 331 1.22593 331 0 0.00000
222 222 6 2 1 -1 base 46 45 46 1.02222 46 0 0.00000
223 223 7 2 2 6 p1 57 90 103 1.14444 103 0 0.00000
224 224 8 2 3 7 p1 57 135 160 1.18519 160 0 0.00000
225 225 9 2 4 8 p1 57 180 217 1.20556 217 0 0.00000
226 226 10 2 5 9 p1 58 226 275 1.21681 275 0 0.00000
227 227 11 2 6 10 p1 58 272 333 1.22426 333 0 0.00000
228 228 12 2 7 11 p1 58 318 391 1.22956 391 0 0.00000
229 229 13 2 8 12 p1 58 364 449 1.23352 449 0 0.00000
230 230 14 2 9 13 p1 58 410 507 1.23659 507 0 0.00000
231 231 15 2 10 14 p1 58 456 565 1.23904 565 0 0.00000
232 232 16 2 11 15 p1 58 502 623 1.24104 623 0 0.00000
233 233 17 2 12 16 p1 58 548 681 1.24270 681 0 0.00000
234 234 18 3 1 -1 base 47 46 47 1.02174 47 0 0.00000
235 235 19 3 2 18 p1 58 92 105 1.14130 105 0 0.00000
236 236 20 3 3 19 p1 58 138 163 1.18116 163 0 0.00000
237 237 21 3 4 20 p1 58 184 221 1.20109 221 0 0.00000
238 238 22 3 5 21 p1 58 230 279 1.21304 279 0 0.00000
239 239 23 3 6 22 p1 58 276 337 1.22101 337 0 0.00000
240 240 24 3 7 23 p1 58 322 395 1.22671 395 0 0.00000
241 241 25 3 8 24 p1 58 368 453 1.23098 453 0 0.00000
242 242 26 3 9 25 p1 58 414 511 1.23430 511 0 0.00000
243 243 27 3 10 26 p1 58 460 569 1.23696 569 0 0.00000
244 244 28 3 11 27 p1 58 506 627 1.23913 627 0 0.00000
245 245 29 3 12 28 p1 58 552 685 1.24094 685 0 0.00000
246 246 30 3 13 29 p1 58 598 743 1.24247 743 0 0.00000
247 247 31 3 14 30 p1 58 644 801 1.24379 801 0 0.00000
248 248 32 3 15 31 p1 58 690 859 1.24493 859 0 0.00000
249 249 33 3 16 32 p1 58 736 917 1.24592 917 0 0.00000
250 250 34 3 17 33 p1 58 782 975 1.24680 975 0 0.00000
251 251 35 3 18 34 p1 58 828 1033 1.24758 1033 0 0.00000
252 252 36 3 19 35 p1 58 874 1091 1.24828 1091 0 0.00000
253 253 37 3 20 36 p1 58 920 1149 1.24891 1149 0 0.00000
254 254 38 3 21 37 p1 58 966 1207 1.24948 1207 0 0.00000
255 255 39 3 22 38 p1 58 1012 1265 1.25000 1265 0 0.00000
256 256 40 3 23 39 p1 58 1058 1323 1.25047 1323 0 0.00000
257 257 41 3 24 40 p1 58 1104 1381 1.25091 1381 0 0.00000
258 258 42 3 25 41 p1 58 1150 1439 1.25130 1439 0 0.00000
259 259 43 3 26 42 p1 58 1196 1497 1.25167 1497 0 0.00000
260 260 44 3 27 43 p1 58 1242 1555 1.25201 1555 0 0.00000
261 261 45 3 28 44 p1 58 1288 1613 1.25233 1613 0 0.00000
262 262 46 3 29 45 p1 58 1334 1671 1.25262 1671 0 0.00000
263 263 47 3 30 46 p1 58 1380 1729 1.25290 1729 0 0.00000
264 264 48 3 31 47 p1 58 1426 1787 1.25316 1787 0 0.00000
265 265 49 4 1 -1 base 197 316 197 0.62342 197 0 0.00000
266 266 50 4 2 49 p1 58 362 255 0.70442 255 0 0.00000
267 267 51 4 3 50 prev 356 594 611 1.02862 611 0 0.00000
268 268 52 4 4 51 p1 58 640 669 1.04531 669 0 0.00000
269 269 53 5 1 -1 base 0 0 0 0.00000 0 0 0.00000
270 54 5 2 53 p1 376 640 376 0.58750 376 0 0.00000
270 54 6 1 -1 base 369 640 369 0.57656 369 0 0.00000
271 271 $ hg clone --pull source-repo --config experimental.maxdeltachainspan=2800 relax-chain --config format.generaldelta=yes
272 272 requesting all changes
273 273 adding changesets
274 274 adding manifests
275 275 adding file changes
276 276 added 55 changesets with 53 changes to 53 files (+2 heads)
277 277 new changesets 61246295ee1e:c930ac4a5b32
278 278 updating to branch default
279 279 14 files updated, 0 files merged, 0 files removed, 0 files unresolved
280 280 $ hg -R relax-chain debugdeltachain -m
281 281 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
282 282 0 1 1 -1 base 46 45 46 1.02222 46 0 0.00000
283 283 1 1 2 0 p1 57 90 103 1.14444 103 0 0.00000
284 284 2 1 3 1 p1 57 135 160 1.18519 160 0 0.00000
285 285 3 1 4 2 p1 57 180 217 1.20556 217 0 0.00000
286 286 4 1 5 3 p1 57 225 274 1.21778 274 0 0.00000
287 287 5 1 6 4 p1 57 270 331 1.22593 331 0 0.00000
288 288 6 2 1 -1 base 46 45 46 1.02222 46 0 0.00000
289 289 7 2 2 6 p1 57 90 103 1.14444 103 0 0.00000
290 290 8 2 3 7 p1 57 135 160 1.18519 160 0 0.00000
291 291 9 2 4 8 p1 57 180 217 1.20556 217 0 0.00000
292 292 10 2 5 9 p1 58 226 275 1.21681 275 0 0.00000
293 293 11 2 6 10 p1 58 272 333 1.22426 333 0 0.00000
294 294 12 2 7 11 p1 58 318 391 1.22956 391 0 0.00000
295 295 13 2 8 12 p1 58 364 449 1.23352 449 0 0.00000
296 296 14 2 9 13 p1 58 410 507 1.23659 507 0 0.00000
297 297 15 2 10 14 p1 58 456 565 1.23904 565 0 0.00000
298 298 16 2 11 15 p1 58 502 623 1.24104 623 0 0.00000
299 299 17 2 12 16 p1 58 548 681 1.24270 681 0 0.00000
300 300 18 3 1 -1 base 47 46 47 1.02174 47 0 0.00000
301 301 19 3 2 18 p1 58 92 105 1.14130 105 0 0.00000
302 302 20 3 3 19 p1 58 138 163 1.18116 163 0 0.00000
303 303 21 3 4 20 p1 58 184 221 1.20109 221 0 0.00000
304 304 22 3 5 21 p1 58 230 279 1.21304 279 0 0.00000
305 305 23 3 6 22 p1 58 276 337 1.22101 337 0 0.00000
306 306 24 3 7 23 p1 58 322 395 1.22671 395 0 0.00000
307 307 25 3 8 24 p1 58 368 453 1.23098 453 0 0.00000
308 308 26 3 9 25 p1 58 414 511 1.23430 511 0 0.00000
309 309 27 3 10 26 p1 58 460 569 1.23696 569 0 0.00000
310 310 28 3 11 27 p1 58 506 627 1.23913 627 0 0.00000
311 311 29 3 12 28 p1 58 552 685 1.24094 685 0 0.00000
312 312 30 3 13 29 p1 58 598 743 1.24247 743 0 0.00000
313 313 31 3 14 30 p1 58 644 801 1.24379 801 0 0.00000
314 314 32 3 15 31 p1 58 690 859 1.24493 859 0 0.00000
315 315 33 3 16 32 p1 58 736 917 1.24592 917 0 0.00000
316 316 34 3 17 33 p1 58 782 975 1.24680 975 0 0.00000
317 317 35 3 18 34 p1 58 828 1033 1.24758 1033 0 0.00000
318 318 36 3 19 35 p1 58 874 1091 1.24828 1091 0 0.00000
319 319 37 3 20 36 p1 58 920 1149 1.24891 1149 0 0.00000
320 320 38 3 21 37 p1 58 966 1207 1.24948 1207 0 0.00000
321 321 39 3 22 38 p1 58 1012 1265 1.25000 1265 0 0.00000
322 322 40 3 23 39 p1 58 1058 1323 1.25047 1323 0 0.00000
323 323 41 3 24 40 p1 58 1104 1381 1.25091 1381 0 0.00000
324 324 42 3 25 41 p1 58 1150 1439 1.25130 1439 0 0.00000
325 325 43 3 26 42 p1 58 1196 1497 1.25167 1497 0 0.00000
326 326 44 3 27 43 p1 58 1242 1555 1.25201 1555 0 0.00000
327 327 45 3 28 44 p1 58 1288 1613 1.25233 1613 0 0.00000
328 328 46 3 29 45 p1 58 1334 1671 1.25262 1671 0 0.00000
329 329 47 3 30 46 p1 58 1380 1729 1.25290 1729 0 0.00000
330 330 48 3 31 47 p1 58 1426 1787 1.25316 1787 0 0.00000
331 331 49 4 1 -1 base 197 316 197 0.62342 197 0 0.00000
332 332 50 4 2 49 p1 58 362 255 0.70442 255 0 0.00000
333 333 51 2 13 17 p1 58 594 739 1.24411 2781 2042 2.76319
334 334 52 5 1 -1 base 369 640 369 0.57656 369 0 0.00000
335 335 53 6 1 -1 base 0 0 0 0.00000 0 0 0.00000
336 54 6 2 53 p1 376 640 376 0.58750 376 0 0.00000
336 54 7 1 -1 base 369 640 369 0.57656 369 0 0.00000
337 337 $ hg clone --pull source-repo --config experimental.maxdeltachainspan=0 noconst-chain --config format.generaldelta=yes
338 338 requesting all changes
339 339 adding changesets
340 340 adding manifests
341 341 adding file changes
342 342 added 55 changesets with 53 changes to 53 files (+2 heads)
343 343 new changesets 61246295ee1e:c930ac4a5b32
344 344 updating to branch default
345 345 14 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 346 $ hg -R noconst-chain debugdeltachain -m
347 347 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
348 348 0 1 1 -1 base 46 45 46 1.02222 46 0 0.00000
349 349 1 1 2 0 p1 57 90 103 1.14444 103 0 0.00000
350 350 2 1 3 1 p1 57 135 160 1.18519 160 0 0.00000
351 351 3 1 4 2 p1 57 180 217 1.20556 217 0 0.00000
352 352 4 1 5 3 p1 57 225 274 1.21778 274 0 0.00000
353 353 5 1 6 4 p1 57 270 331 1.22593 331 0 0.00000
354 354 6 2 1 -1 base 46 45 46 1.02222 46 0 0.00000
355 355 7 2 2 6 p1 57 90 103 1.14444 103 0 0.00000
356 356 8 2 3 7 p1 57 135 160 1.18519 160 0 0.00000
357 357 9 2 4 8 p1 57 180 217 1.20556 217 0 0.00000
358 358 10 2 5 9 p1 58 226 275 1.21681 275 0 0.00000
359 359 11 2 6 10 p1 58 272 333 1.22426 333 0 0.00000
360 360 12 2 7 11 p1 58 318 391 1.22956 391 0 0.00000
361 361 13 2 8 12 p1 58 364 449 1.23352 449 0 0.00000
362 362 14 2 9 13 p1 58 410 507 1.23659 507 0 0.00000
363 363 15 2 10 14 p1 58 456 565 1.23904 565 0 0.00000
364 364 16 2 11 15 p1 58 502 623 1.24104 623 0 0.00000
365 365 17 2 12 16 p1 58 548 681 1.24270 681 0 0.00000
366 366 18 3 1 -1 base 47 46 47 1.02174 47 0 0.00000
367 367 19 3 2 18 p1 58 92 105 1.14130 105 0 0.00000
368 368 20 3 3 19 p1 58 138 163 1.18116 163 0 0.00000
369 369 21 3 4 20 p1 58 184 221 1.20109 221 0 0.00000
370 370 22 3 5 21 p1 58 230 279 1.21304 279 0 0.00000
371 371 23 3 6 22 p1 58 276 337 1.22101 337 0 0.00000
372 372 24 3 7 23 p1 58 322 395 1.22671 395 0 0.00000
373 373 25 3 8 24 p1 58 368 453 1.23098 453 0 0.00000
374 374 26 3 9 25 p1 58 414 511 1.23430 511 0 0.00000
375 375 27 3 10 26 p1 58 460 569 1.23696 569 0 0.00000
376 376 28 3 11 27 p1 58 506 627 1.23913 627 0 0.00000
377 377 29 3 12 28 p1 58 552 685 1.24094 685 0 0.00000
378 378 30 3 13 29 p1 58 598 743 1.24247 743 0 0.00000
379 379 31 3 14 30 p1 58 644 801 1.24379 801 0 0.00000
380 380 32 3 15 31 p1 58 690 859 1.24493 859 0 0.00000
381 381 33 3 16 32 p1 58 736 917 1.24592 917 0 0.00000
382 382 34 3 17 33 p1 58 782 975 1.24680 975 0 0.00000
383 383 35 3 18 34 p1 58 828 1033 1.24758 1033 0 0.00000
384 384 36 3 19 35 p1 58 874 1091 1.24828 1091 0 0.00000
385 385 37 3 20 36 p1 58 920 1149 1.24891 1149 0 0.00000
386 386 38 3 21 37 p1 58 966 1207 1.24948 1207 0 0.00000
387 387 39 3 22 38 p1 58 1012 1265 1.25000 1265 0 0.00000
388 388 40 3 23 39 p1 58 1058 1323 1.25047 1323 0 0.00000
389 389 41 3 24 40 p1 58 1104 1381 1.25091 1381 0 0.00000
390 390 42 3 25 41 p1 58 1150 1439 1.25130 1439 0 0.00000
391 391 43 3 26 42 p1 58 1196 1497 1.25167 1497 0 0.00000
392 392 44 3 27 43 p1 58 1242 1555 1.25201 1555 0 0.00000
393 393 45 3 28 44 p1 58 1288 1613 1.25233 1613 0 0.00000
394 394 46 3 29 45 p1 58 1334 1671 1.25262 1671 0 0.00000
395 395 47 3 30 46 p1 58 1380 1729 1.25290 1729 0 0.00000
396 396 48 3 31 47 p1 58 1426 1787 1.25316 1787 0 0.00000
397 397 49 1 7 5 p1 58 316 389 1.23101 2857 2468 6.34447
398 398 50 1 8 49 p1 58 362 447 1.23481 2915 2468 5.52125
399 399 51 2 13 17 p1 58 594 739 1.24411 2642 1903 2.57510
400 400 52 2 14 51 p1 58 640 797 1.24531 2700 1903 2.38770
401 401 53 4 1 -1 base 0 0 0 0.00000 0 0 0.00000
402 54 4 2 53 p1 376 640 376 0.58750 376 0 0.00000
402 54 5 1 -1 base 369 640 369 0.57656 369 0 0.00000
General Comments 0
You need to be logged in to leave comments. Login now