##// END OF EJS Templates
delta-find: drop the temporary indent...
marmoute -
r52230:ac8b798e default
parent child Browse files
Show More
@@ -1,1713 +1,1712 b''
1 1 # revlogdeltas.py - Logic around delta computation for revlog
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2018 Octobus <contact@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """Helper class to compute deltas stored inside revlogs"""
9 9
10 10
11 11 import collections
12 12 import struct
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from ..node import nullrev
16 16 from ..i18n import _
17 17
18 18 from .constants import (
19 19 COMP_MODE_DEFAULT,
20 20 COMP_MODE_INLINE,
21 21 COMP_MODE_PLAIN,
22 22 DELTA_BASE_REUSE_FORCE,
23 23 DELTA_BASE_REUSE_NO,
24 24 KIND_CHANGELOG,
25 25 KIND_FILELOG,
26 26 KIND_MANIFESTLOG,
27 27 REVIDX_ISCENSORED,
28 28 REVIDX_RAWTEXT_CHANGING_FLAGS,
29 29 )
30 30
31 31 from ..thirdparty import attr
32 32
33 33 from .. import (
34 34 error,
35 35 mdiff,
36 36 util,
37 37 )
38 38
39 39 from . import flagutil
40 40
41 41 # maximum <delta-chain-data>/<revision-text-length> ratio
42 42 LIMIT_DELTA2TEXT = 2
43 43
44 44
45 45 class _testrevlog:
46 46 """minimalist fake revlog to use in doctests"""
47 47
48 48 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
49 49 """data is an list of revision payload boundaries"""
50 50 from .. import revlog
51 51
52 52 self._data = data
53 53 self.data_config = revlog.DataConfig()
54 54 self.data_config.sr_density_threshold = density
55 55 self.data_config.sr_min_gap_size = mingap
56 56 self.delta_config = revlog.DeltaConfig()
57 57 self.feature_config = revlog.FeatureConfig()
58 58 self._snapshot = set(snapshot)
59 59 self.index = None
60 60
61 61 def start(self, rev):
62 62 if rev == nullrev:
63 63 return 0
64 64 if rev == 0:
65 65 return 0
66 66 return self._data[rev - 1]
67 67
68 68 def end(self, rev):
69 69 if rev == nullrev:
70 70 return 0
71 71 return self._data[rev]
72 72
73 73 def length(self, rev):
74 74 return self.end(rev) - self.start(rev)
75 75
76 76 def __len__(self):
77 77 return len(self._data)
78 78
79 79 def issnapshot(self, rev):
80 80 if rev == nullrev:
81 81 return True
82 82 return rev in self._snapshot
83 83
84 84
85 85 def slicechunk(revlog, revs, targetsize=None):
86 86 """slice revs to reduce the amount of unrelated data to be read from disk.
87 87
88 88 ``revs`` is sliced into groups that should be read in one time.
89 89 Assume that revs are sorted.
90 90
91 91 The initial chunk is sliced until the overall density (payload/chunks-span
92 92 ratio) is above `revlog.data_config.sr_density_threshold`. No gap smaller
93 93 than `revlog.data_config.sr_min_gap_size` is skipped.
94 94
95 95 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
96 96 For consistency with other slicing choice, this limit won't go lower than
97 97 `revlog.data_config.sr_min_gap_size`.
98 98
99 99 If individual revisions chunk are larger than this limit, they will still
100 100 be raised individually.
101 101
102 102 >>> data = [
103 103 ... 5, #00 (5)
104 104 ... 10, #01 (5)
105 105 ... 12, #02 (2)
106 106 ... 12, #03 (empty)
107 107 ... 27, #04 (15)
108 108 ... 31, #05 (4)
109 109 ... 31, #06 (empty)
110 110 ... 42, #07 (11)
111 111 ... 47, #08 (5)
112 112 ... 47, #09 (empty)
113 113 ... 48, #10 (1)
114 114 ... 51, #11 (3)
115 115 ... 74, #12 (23)
116 116 ... 85, #13 (11)
117 117 ... 86, #14 (1)
118 118 ... 91, #15 (5)
119 119 ... ]
120 120 >>> revlog = _testrevlog(data, snapshot=range(16))
121 121
122 122 >>> list(slicechunk(revlog, list(range(16))))
123 123 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
124 124 >>> list(slicechunk(revlog, [0, 15]))
125 125 [[0], [15]]
126 126 >>> list(slicechunk(revlog, [0, 11, 15]))
127 127 [[0], [11], [15]]
128 128 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
129 129 [[0], [11, 13, 15]]
130 130 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
131 131 [[1, 2], [5, 8, 10, 11], [14]]
132 132
133 133 Slicing with a maximum chunk size
134 134 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
135 135 [[0], [11], [13], [15]]
136 136 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
137 137 [[0], [11], [13, 15]]
138 138
139 139 Slicing involving nullrev
140 140 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
141 141 [[-1, 0], [11], [13, 15]]
142 142 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
143 143 [[-1], [13], [15]]
144 144 """
145 145 if targetsize is not None:
146 146 targetsize = max(targetsize, revlog.data_config.sr_min_gap_size)
147 147 # targetsize should not be specified when evaluating delta candidates:
148 148 # * targetsize is used to ensure we stay within specification when reading,
149 149 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
150 150 if densityslicing is None:
151 151 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
152 152 for chunk in densityslicing(
153 153 revs,
154 154 revlog.data_config.sr_density_threshold,
155 155 revlog.data_config.sr_min_gap_size,
156 156 ):
157 157 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
158 158 yield subchunk
159 159
160 160
161 161 def _slicechunktosize(revlog, revs, targetsize=None):
162 162 """slice revs to match the target size
163 163
164 164 This is intended to be used on chunk that density slicing selected by that
165 165 are still too large compared to the read garantee of revlog. This might
166 166 happens when "minimal gap size" interrupted the slicing or when chain are
167 167 built in a way that create large blocks next to each other.
168 168
169 169 >>> data = [
170 170 ... 3, #0 (3)
171 171 ... 5, #1 (2)
172 172 ... 6, #2 (1)
173 173 ... 8, #3 (2)
174 174 ... 8, #4 (empty)
175 175 ... 11, #5 (3)
176 176 ... 12, #6 (1)
177 177 ... 13, #7 (1)
178 178 ... 14, #8 (1)
179 179 ... ]
180 180
181 181 == All snapshots cases ==
182 182 >>> revlog = _testrevlog(data, snapshot=range(9))
183 183
184 184 Cases where chunk is already small enough
185 185 >>> list(_slicechunktosize(revlog, [0], 3))
186 186 [[0]]
187 187 >>> list(_slicechunktosize(revlog, [6, 7], 3))
188 188 [[6, 7]]
189 189 >>> list(_slicechunktosize(revlog, [0], None))
190 190 [[0]]
191 191 >>> list(_slicechunktosize(revlog, [6, 7], None))
192 192 [[6, 7]]
193 193
194 194 cases where we need actual slicing
195 195 >>> list(_slicechunktosize(revlog, [0, 1], 3))
196 196 [[0], [1]]
197 197 >>> list(_slicechunktosize(revlog, [1, 3], 3))
198 198 [[1], [3]]
199 199 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
200 200 [[1, 2], [3]]
201 201 >>> list(_slicechunktosize(revlog, [3, 5], 3))
202 202 [[3], [5]]
203 203 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
204 204 [[3], [5]]
205 205 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
206 206 [[5], [6, 7, 8]]
207 207 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
208 208 [[0], [1, 2], [3], [5], [6, 7, 8]]
209 209
210 210 Case with too large individual chunk (must return valid chunk)
211 211 >>> list(_slicechunktosize(revlog, [0, 1], 2))
212 212 [[0], [1]]
213 213 >>> list(_slicechunktosize(revlog, [1, 3], 1))
214 214 [[1], [3]]
215 215 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
216 216 [[3], [5]]
217 217
218 218 == No Snapshot cases ==
219 219 >>> revlog = _testrevlog(data)
220 220
221 221 Cases where chunk is already small enough
222 222 >>> list(_slicechunktosize(revlog, [0], 3))
223 223 [[0]]
224 224 >>> list(_slicechunktosize(revlog, [6, 7], 3))
225 225 [[6, 7]]
226 226 >>> list(_slicechunktosize(revlog, [0], None))
227 227 [[0]]
228 228 >>> list(_slicechunktosize(revlog, [6, 7], None))
229 229 [[6, 7]]
230 230
231 231 cases where we need actual slicing
232 232 >>> list(_slicechunktosize(revlog, [0, 1], 3))
233 233 [[0], [1]]
234 234 >>> list(_slicechunktosize(revlog, [1, 3], 3))
235 235 [[1], [3]]
236 236 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
237 237 [[1], [2, 3]]
238 238 >>> list(_slicechunktosize(revlog, [3, 5], 3))
239 239 [[3], [5]]
240 240 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
241 241 [[3], [4, 5]]
242 242 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
243 243 [[5], [6, 7, 8]]
244 244 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
245 245 [[0], [1, 2], [3], [5], [6, 7, 8]]
246 246
247 247 Case with too large individual chunk (must return valid chunk)
248 248 >>> list(_slicechunktosize(revlog, [0, 1], 2))
249 249 [[0], [1]]
250 250 >>> list(_slicechunktosize(revlog, [1, 3], 1))
251 251 [[1], [3]]
252 252 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
253 253 [[3], [5]]
254 254
255 255 == mixed case ==
256 256 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
257 257 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
258 258 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
259 259 """
260 260 assert targetsize is None or 0 <= targetsize
261 261 startdata = revlog.start(revs[0])
262 262 enddata = revlog.end(revs[-1])
263 263 fullspan = enddata - startdata
264 264 if targetsize is None or fullspan <= targetsize:
265 265 yield revs
266 266 return
267 267
268 268 startrevidx = 0
269 269 endrevidx = 1
270 270 iterrevs = enumerate(revs)
271 271 next(iterrevs) # skip first rev.
272 272 # first step: get snapshots out of the way
273 273 for idx, r in iterrevs:
274 274 span = revlog.end(r) - startdata
275 275 snapshot = revlog.issnapshot(r)
276 276 if span <= targetsize and snapshot:
277 277 endrevidx = idx + 1
278 278 else:
279 279 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
280 280 if chunk:
281 281 yield chunk
282 282 startrevidx = idx
283 283 startdata = revlog.start(r)
284 284 endrevidx = idx + 1
285 285 if not snapshot:
286 286 break
287 287
288 288 # for the others, we use binary slicing to quickly converge toward valid
289 289 # chunks (otherwise, we might end up looking for start/end of many
290 290 # revisions). This logic is not looking for the perfect slicing point, it
291 291 # focuses on quickly converging toward valid chunks.
292 292 nbitem = len(revs)
293 293 while (enddata - startdata) > targetsize:
294 294 endrevidx = nbitem
295 295 if nbitem - startrevidx <= 1:
296 296 break # protect against individual chunk larger than limit
297 297 localenddata = revlog.end(revs[endrevidx - 1])
298 298 span = localenddata - startdata
299 299 while span > targetsize:
300 300 if endrevidx - startrevidx <= 1:
301 301 break # protect against individual chunk larger than limit
302 302 endrevidx -= (endrevidx - startrevidx) // 2
303 303 localenddata = revlog.end(revs[endrevidx - 1])
304 304 span = localenddata - startdata
305 305 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
306 306 if chunk:
307 307 yield chunk
308 308 startrevidx = endrevidx
309 309 startdata = revlog.start(revs[startrevidx])
310 310
311 311 chunk = _trimchunk(revlog, revs, startrevidx)
312 312 if chunk:
313 313 yield chunk
314 314
315 315
316 316 def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0):
317 317 """slice revs to reduce the amount of unrelated data to be read from disk.
318 318
319 319 ``revs`` is sliced into groups that should be read in one time.
320 320 Assume that revs are sorted.
321 321
322 322 The initial chunk is sliced until the overall density (payload/chunks-span
323 323 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
324 324 skipped.
325 325
326 326 >>> revlog = _testrevlog([
327 327 ... 5, #00 (5)
328 328 ... 10, #01 (5)
329 329 ... 12, #02 (2)
330 330 ... 12, #03 (empty)
331 331 ... 27, #04 (15)
332 332 ... 31, #05 (4)
333 333 ... 31, #06 (empty)
334 334 ... 42, #07 (11)
335 335 ... 47, #08 (5)
336 336 ... 47, #09 (empty)
337 337 ... 48, #10 (1)
338 338 ... 51, #11 (3)
339 339 ... 74, #12 (23)
340 340 ... 85, #13 (11)
341 341 ... 86, #14 (1)
342 342 ... 91, #15 (5)
343 343 ... ])
344 344
345 345 >>> list(_slicechunktodensity(revlog, list(range(16))))
346 346 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
347 347 >>> list(_slicechunktodensity(revlog, [0, 15]))
348 348 [[0], [15]]
349 349 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
350 350 [[0], [11], [15]]
351 351 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
352 352 [[0], [11, 13, 15]]
353 353 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
354 354 [[1, 2], [5, 8, 10, 11], [14]]
355 355 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
356 356 ... mingapsize=20))
357 357 [[1, 2, 3, 5, 8, 10, 11], [14]]
358 358 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
359 359 ... targetdensity=0.95))
360 360 [[1, 2], [5], [8, 10, 11], [14]]
361 361 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
362 362 ... targetdensity=0.95, mingapsize=12))
363 363 [[1, 2], [5, 8, 10, 11], [14]]
364 364 """
365 365 start = revlog.start
366 366 length = revlog.length
367 367
368 368 if len(revs) <= 1:
369 369 yield revs
370 370 return
371 371
372 372 deltachainspan = segmentspan(revlog, revs)
373 373
374 374 if deltachainspan < mingapsize:
375 375 yield revs
376 376 return
377 377
378 378 readdata = deltachainspan
379 379 chainpayload = sum(length(r) for r in revs)
380 380
381 381 if deltachainspan:
382 382 density = chainpayload / float(deltachainspan)
383 383 else:
384 384 density = 1.0
385 385
386 386 if density >= targetdensity:
387 387 yield revs
388 388 return
389 389
390 390 # Store the gaps in a heap to have them sorted by decreasing size
391 391 gaps = []
392 392 prevend = None
393 393 for i, rev in enumerate(revs):
394 394 revstart = start(rev)
395 395 revlen = length(rev)
396 396
397 397 # Skip empty revisions to form larger holes
398 398 if revlen == 0:
399 399 continue
400 400
401 401 if prevend is not None:
402 402 gapsize = revstart - prevend
403 403 # only consider holes that are large enough
404 404 if gapsize > mingapsize:
405 405 gaps.append((gapsize, i))
406 406
407 407 prevend = revstart + revlen
408 408 # sort the gaps to pop them from largest to small
409 409 gaps.sort()
410 410
411 411 # Collect the indices of the largest holes until the density is acceptable
412 412 selected = []
413 413 while gaps and density < targetdensity:
414 414 gapsize, gapidx = gaps.pop()
415 415
416 416 selected.append(gapidx)
417 417
418 418 # the gap sizes are stored as negatives to be sorted decreasingly
419 419 # by the heap
420 420 readdata -= gapsize
421 421 if readdata > 0:
422 422 density = chainpayload / float(readdata)
423 423 else:
424 424 density = 1.0
425 425 selected.sort()
426 426
427 427 # Cut the revs at collected indices
428 428 previdx = 0
429 429 for idx in selected:
430 430
431 431 chunk = _trimchunk(revlog, revs, previdx, idx)
432 432 if chunk:
433 433 yield chunk
434 434
435 435 previdx = idx
436 436
437 437 chunk = _trimchunk(revlog, revs, previdx)
438 438 if chunk:
439 439 yield chunk
440 440
441 441
442 442 def _trimchunk(revlog, revs, startidx, endidx=None):
443 443 """returns revs[startidx:endidx] without empty trailing revs
444 444
445 445 Doctest Setup
446 446 >>> revlog = _testrevlog([
447 447 ... 5, #0
448 448 ... 10, #1
449 449 ... 12, #2
450 450 ... 12, #3 (empty)
451 451 ... 17, #4
452 452 ... 21, #5
453 453 ... 21, #6 (empty)
454 454 ... ])
455 455
456 456 Contiguous cases:
457 457 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
458 458 [0, 1, 2, 3, 4, 5]
459 459 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
460 460 [0, 1, 2, 3, 4]
461 461 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
462 462 [0, 1, 2]
463 463 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
464 464 [2]
465 465 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
466 466 [3, 4, 5]
467 467 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
468 468 [3, 4]
469 469
470 470 Discontiguous cases:
471 471 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
472 472 [1, 3, 5]
473 473 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
474 474 [1]
475 475 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
476 476 [3, 5]
477 477 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
478 478 [3, 5]
479 479 """
480 480 length = revlog.length
481 481
482 482 if endidx is None:
483 483 endidx = len(revs)
484 484
485 485 # If we have a non-emtpy delta candidate, there are nothing to trim
486 486 if revs[endidx - 1] < len(revlog):
487 487 # Trim empty revs at the end, except the very first revision of a chain
488 488 while (
489 489 endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0
490 490 ):
491 491 endidx -= 1
492 492
493 493 return revs[startidx:endidx]
494 494
495 495
496 496 def segmentspan(revlog, revs):
497 497 """Get the byte span of a segment of revisions
498 498
499 499 revs is a sorted array of revision numbers
500 500
501 501 >>> revlog = _testrevlog([
502 502 ... 5, #0
503 503 ... 10, #1
504 504 ... 12, #2
505 505 ... 12, #3 (empty)
506 506 ... 17, #4
507 507 ... ])
508 508
509 509 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
510 510 17
511 511 >>> segmentspan(revlog, [0, 4])
512 512 17
513 513 >>> segmentspan(revlog, [3, 4])
514 514 5
515 515 >>> segmentspan(revlog, [1, 2, 3,])
516 516 7
517 517 >>> segmentspan(revlog, [1, 3])
518 518 7
519 519 """
520 520 if not revs:
521 521 return 0
522 522 end = revlog.end(revs[-1])
523 523 return end - revlog.start(revs[0])
524 524
525 525
526 526 def _textfromdelta(revlog, baserev, delta, p1, p2, flags, expectednode):
527 527 """build full text from a (base, delta) pair and other metadata"""
528 528 # special case deltas which replace entire base; no need to decode
529 529 # base revision. this neatly avoids censored bases, which throw when
530 530 # they're decoded.
531 531 hlen = struct.calcsize(b">lll")
532 532 if delta[:hlen] == mdiff.replacediffheader(
533 533 revlog.rawsize(baserev), len(delta) - hlen
534 534 ):
535 535 fulltext = delta[hlen:]
536 536 else:
537 537 # deltabase is rawtext before changed by flag processors, which is
538 538 # equivalent to non-raw text
539 539 basetext = revlog.revision(baserev)
540 540 fulltext = mdiff.patch(basetext, delta)
541 541
542 542 try:
543 543 validatehash = flagutil.processflagsraw(revlog, fulltext, flags)
544 544 if validatehash:
545 545 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
546 546 if flags & REVIDX_ISCENSORED:
547 547 raise error.StorageError(
548 548 _(b'node %s is not censored') % expectednode
549 549 )
550 550 except error.CensoredNodeError:
551 551 # must pass the censored index flag to add censored revisions
552 552 if not flags & REVIDX_ISCENSORED:
553 553 raise
554 554 return fulltext
555 555
556 556
557 557 @attr.s(slots=True, frozen=True)
558 558 class _deltainfo:
559 559 distance = attr.ib()
560 560 deltalen = attr.ib()
561 561 data = attr.ib()
562 562 base = attr.ib()
563 563 chainbase = attr.ib()
564 564 chainlen = attr.ib()
565 565 compresseddeltalen = attr.ib()
566 566 snapshotdepth = attr.ib()
567 567
568 568
569 569 def drop_u_compression(delta):
570 570 """turn into a "u" (no-compression) into no-compression without header
571 571
572 572 This is useful for revlog format that has better compression method.
573 573 """
574 574 assert delta.data[0] == b'u', delta.data[0]
575 575 return _deltainfo(
576 576 delta.distance,
577 577 delta.deltalen - 1,
578 578 (b'', delta.data[1]),
579 579 delta.base,
580 580 delta.chainbase,
581 581 delta.chainlen,
582 582 delta.compresseddeltalen,
583 583 delta.snapshotdepth,
584 584 )
585 585
586 586
587 587 # If a revision's full text is that much bigger than a base candidate full
588 588 # text's, it is very unlikely that it will produce a valid delta. We no longer
589 589 # consider these candidates.
590 590 LIMIT_BASE2TEXT = 500
591 591
592 592
593 593 class _DeltaSearch:
594 594 """perform the search of a good delta for a single revlog revision
595 595
596 596 note: some of the deltacomputer.finddeltainfo logic should probably move
597 597 here.
598 598 """
599 599
600 600 def __init__(
601 601 self,
602 602 revlog,
603 603 revinfo,
604 604 p1,
605 605 p2,
606 606 cachedelta,
607 607 excluded_bases=None,
608 608 target_rev=None,
609 609 snapshot_cache=None,
610 610 ):
611 611 # the DELTA_BASE_REUSE_FORCE case should have been taken care of sooner
612 612 # so we should never end up asking such question. Adding the assert as
613 613 # a safe-guard to detect anything that would be fishy in this regard.
614 614 assert (
615 615 cachedelta is None
616 616 or cachedelta[2] != DELTA_BASE_REUSE_FORCE
617 617 or not revlog.delta_config.general_delta
618 618 )
619 619 self.revlog = revlog
620 620 self.revinfo = revinfo
621 621 self.textlen = revinfo.textlen
622 622 self.p1 = p1
623 623 self.p2 = p2
624 624 self.cachedelta = cachedelta
625 625 self.excluded_bases = excluded_bases
626 626 if target_rev is None:
627 627 self.target_rev = len(self.revlog)
628 628 self.target_rev = target_rev
629 629 if snapshot_cache is None:
630 630 # map: base-rev: [snapshot-revs]
631 631 snapshot_cache = SnapshotCache()
632 632 self.snapshot_cache = snapshot_cache
633 633
634 634 self.tested = {nullrev}
635 635
636 636 self._candidates_iterator = self._candidate_groups()
637 637 self._last_good = None
638 638 self.current_group = self._candidates_iterator.send(self._last_good)
639 639
640 640 @property
641 641 def done(self):
642 642 """True when all possible candidate have been tested"""
643 643 return self.current_group is None
644 644
645 645 def next_group(self, good_delta=None):
646 646 """move to the next group to test
647 647
648 648 The group of revision to test will be available in
649 649 `self.current_group`. If the previous group had any good delta, the
650 650 best one can be passed as the `good_delta` parameter to help selecting
651 651 the next group.
652 652
653 653 If not revision remains to be, `self.done` will be True and
654 654 `self.current_group` will be None.
655 655 """
656 656 if good_delta is not None:
657 657 self._last_good = good_delta.base
658 658 self.current_group = self._candidates_iterator.send(self._last_good)
659 659
660 660 def _candidate_groups(self):
661 661 """Provides group of revision to be tested as delta base
662 662
663 663 This top level function focus on emitting groups with unique and
664 664 worthwhile content. See _raw_candidate_groups for details about the
665 665 group order.
666 666 """
667 667 # should we try to build a delta?
668 668 if not (len(self.revlog) and self.revlog._storedeltachains):
669 669 yield None
670 670 return
671 671
672 672 if not self.revlog.delta_config.general_delta:
673 673 # before general delta, there is only one possible delta base
674 674 yield (self.target_rev - 1,)
675 675 yield None
676 676 return
677 677
678 678 good = None
679 679
680 680 group_chunk_size = self.revlog.delta_config.candidate_group_chunk_size
681 681
682 682 tested = self.tested # prefetch for speed and code compactness
683 683 candidates = self._refined_groups()
684 684 while True:
685 685 temptative = candidates.send(good)
686 686 if temptative is None:
687 687 break
688 688 group = self._pre_filter_candidate_revs(temptative)
689 689 if group:
690 690 # When the size of the candidate group is big, it can result in
691 691 # a quite significant performance impact. To reduce this, we
692 692 # can send them in smaller batches until the new batch does not
693 693 # provide any improvements.
694 694 #
695 695 # This might reduce the overall efficiency of the compression
696 696 # in some corner cases, but that should also prevent very
697 697 # pathological cases from being an issue. (eg. 20 000
698 698 # candidates).
699 699 #
700 700 # XXX note that the ordering of the group becomes important as
701 701 # it now impacts the final result. The current order is
702 702 # unprocessed and can be improved.
703 703 if group_chunk_size == 0:
704 704 tested.update(group)
705 705 good = yield tuple(group)
706 706 else:
707 707 prev_good = good
708 708 for start in range(0, len(group), group_chunk_size):
709 709 sub_group = group[start : start + group_chunk_size]
710 710 tested.update(sub_group)
711 711 good = yield tuple(sub_group)
712 712 if prev_good == good:
713 713 break
714 714
715 715 yield None
716 716
717 717 def _pre_filter_candidate_revs(self, temptative):
718 718 """filter possible candidate before computing a delta
719 719
720 720 This function use various criteria to pre-filter candidate delta base
721 721 before we compute a delta and evaluate its quality.
722 722
723 723 Such pre-filter limit the number of computed delta, an expensive operation.
724 724
725 725 return the updated list of revision to test
726 726 """
727 727 deltalength = self.revlog.length
728 728 deltaparent = self.revlog.deltaparent
729 729
730 730 tested = self.tested
731 731 group = []
732 732 for rev in temptative:
733 733 # skip over empty delta (no need to include them in a chain)
734 734 while not (rev == nullrev or rev in tested or deltalength(rev)):
735 735 tested.add(rev)
736 736 rev = deltaparent(rev)
737 737 if self._pre_filter_rev(rev):
738 738 group.append(rev)
739 739 else:
740 740 self.tested.add(rev)
741 741 return group
742 742
743 743 def _pre_filter_rev(self, rev):
744 744 """return True if it seems okay to test a rev, False otherwise"""
745 if True:
746 # no need to try a delta against nullrev, this will be done as
747 # a last resort.
748 if rev == nullrev:
749 return False
750 # filter out revision we tested already
751 if rev in self.tested:
752 return False
745 # no need to try a delta against nullrev, this will be done as
746 # a last resort.
747 if rev == nullrev:
748 return False
749 # filter out revision we tested already
750 if rev in self.tested:
751 return False
753 752
754 # an higher authority deamed the base unworthy (e.g. censored)
755 if self.excluded_bases is not None and rev in self.excluded_bases:
756 return False
757 # We are in some recomputation cases and that rev is too high
758 # in the revlog
759 if self.target_rev is not None and rev >= self.target_rev:
760 return False
753 # an higher authority deamed the base unworthy (e.g. censored)
754 if self.excluded_bases is not None and rev in self.excluded_bases:
755 return False
756 # We are in some recomputation cases and that rev is too high
757 # in the revlog
758 if self.target_rev is not None and rev >= self.target_rev:
759 return False
761 760
762 deltas_limit = self.revinfo.textlen * LIMIT_DELTA2TEXT
763 # filter out delta base that will never produce good delta
764 #
765 # if the delta of that base is already bigger than the limit
766 # for the delta chain size, doing a delta is hopeless.
767 if deltas_limit < self.revlog.length(rev):
768 return False
761 deltas_limit = self.revinfo.textlen * LIMIT_DELTA2TEXT
762 # filter out delta base that will never produce good delta
763 #
764 # if the delta of that base is already bigger than the limit
765 # for the delta chain size, doing a delta is hopeless.
766 if deltas_limit < self.revlog.length(rev):
767 return False
769 768
770 sparse = self.revlog.delta_config.sparse_revlog
771 # if the revision we test again is too small, the resulting delta
772 # will be large anyway as that amount of data to be added is big
773 if sparse and self.revlog.rawsize(rev) < (
774 self.textlen // LIMIT_BASE2TEXT
775 ):
776 return False
769 sparse = self.revlog.delta_config.sparse_revlog
770 # if the revision we test again is too small, the resulting delta
771 # will be large anyway as that amount of data to be added is big
772 if sparse and self.revlog.rawsize(rev) < (
773 self.textlen // LIMIT_BASE2TEXT
774 ):
775 return False
777 776
778 # no delta for rawtext-changing revs (see "candelta" for why)
779 if self.revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
780 return False
777 # no delta for rawtext-changing revs (see "candelta" for why)
778 if self.revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
779 return False
781 780
782 # If we reach here, we are about to build and test a delta.
783 # The delta building process will compute the chaininfo in all
784 # case, since that computation is cached, it is fine to access
785 # it here too.
786 chainlen, chainsize = self.revlog._chaininfo(rev)
787 # if chain will be too long, skip base
788 if (
789 self.revlog.delta_config.max_chain_len
790 and chainlen >= self.revlog.delta_config.max_chain_len
791 ):
792 return False
793 # if chain already have too much data, skip base
794 if deltas_limit < chainsize:
795 return False
796 if sparse and self.revlog.delta_config.upper_bound_comp is not None:
797 maxcomp = self.revlog.delta_config.upper_bound_comp
798 basenotsnap = (self.p1, self.p2, nullrev)
799 if rev not in basenotsnap and self.revlog.issnapshot(rev):
800 snapshotdepth = self.revlog.snapshotdepth(rev)
801 # If text is significantly larger than the base, we can
802 # expect the resulting delta to be proportional to the size
803 # difference
804 revsize = self.revlog.rawsize(rev)
805 rawsizedistance = max(self.textlen - revsize, 0)
806 # use an estimate of the compression upper bound.
807 lowestrealisticdeltalen = rawsizedistance // maxcomp
781 # If we reach here, we are about to build and test a delta.
782 # The delta building process will compute the chaininfo in all
783 # case, since that computation is cached, it is fine to access
784 # it here too.
785 chainlen, chainsize = self.revlog._chaininfo(rev)
786 # if chain will be too long, skip base
787 if (
788 self.revlog.delta_config.max_chain_len
789 and chainlen >= self.revlog.delta_config.max_chain_len
790 ):
791 return False
792 # if chain already have too much data, skip base
793 if deltas_limit < chainsize:
794 return False
795 if sparse and self.revlog.delta_config.upper_bound_comp is not None:
796 maxcomp = self.revlog.delta_config.upper_bound_comp
797 basenotsnap = (self.p1, self.p2, nullrev)
798 if rev not in basenotsnap and self.revlog.issnapshot(rev):
799 snapshotdepth = self.revlog.snapshotdepth(rev)
800 # If text is significantly larger than the base, we can
801 # expect the resulting delta to be proportional to the size
802 # difference
803 revsize = self.revlog.rawsize(rev)
804 rawsizedistance = max(self.textlen - revsize, 0)
805 # use an estimate of the compression upper bound.
806 lowestrealisticdeltalen = rawsizedistance // maxcomp
808 807
809 # check the absolute constraint on the delta size
810 snapshotlimit = self.textlen >> snapshotdepth
811 if snapshotlimit < lowestrealisticdeltalen:
812 # delta lower bound is larger than accepted upper
813 # bound
814 return False
808 # check the absolute constraint on the delta size
809 snapshotlimit = self.textlen >> snapshotdepth
810 if snapshotlimit < lowestrealisticdeltalen:
811 # delta lower bound is larger than accepted upper
812 # bound
813 return False
815 814
816 # check the relative constraint on the delta size
817 revlength = self.revlog.length(rev)
818 if revlength < lowestrealisticdeltalen:
819 # delta probable lower bound is larger than target
820 # base
821 return False
815 # check the relative constraint on the delta size
816 revlength = self.revlog.length(rev)
817 if revlength < lowestrealisticdeltalen:
818 # delta probable lower bound is larger than target
819 # base
820 return False
822 821 return True
823 822
824 823 def _refined_groups(self):
825 824 good = None
826 825 # First we try to reuse a the delta contained in the bundle. (or from
827 826 # the source revlog)
828 827 #
829 828 # This logic only applies to general delta repositories and can be
830 829 # disabled through configuration. Disabling reuse source delta is
831 830 # useful when we want to make sure we recomputed "optimal" deltas.
832 831 debug_info = None
833 832 if (
834 833 self.cachedelta is not None
835 834 and self.cachedelta[2] > DELTA_BASE_REUSE_NO
836 835 ):
837 836 # Assume what we received from the server is a good choice
838 837 # build delta will reuse the cache
839 838 if debug_info is not None:
840 839 debug_info['cached-delta.tested'] += 1
841 840 good = yield (self.cachedelta[0],)
842 841 if good is not None:
843 842 if debug_info is not None:
844 843 debug_info['cached-delta.accepted'] += 1
845 844 yield None
846 845 return
847 846 groups = self._raw_groups()
848 847 for candidates in groups:
849 848 good = yield candidates
850 849 if good is not None:
851 850 break
852 851
853 852 # If sparse revlog is enabled, we can try to refine the available
854 853 # deltas
855 854 if not self.revlog.delta_config.sparse_revlog:
856 855 yield None
857 856 return
858 857
859 858 # if we have a refinable value, try to refine it
860 859 if (
861 860 good is not None
862 861 and good not in (self.p1, self.p2)
863 862 and self.revlog.issnapshot(good)
864 863 ):
865 864 # refine snapshot down
866 865 previous = None
867 866 while previous != good:
868 867 previous = good
869 868 base = self.revlog.deltaparent(good)
870 869 if base == nullrev:
871 870 break
872 871 good = yield (base,)
873 872 # refine snapshot up
874 873 if not self.snapshot_cache.snapshots:
875 874 self.snapshot_cache.update(self.revlog, good + 1)
876 875 previous = None
877 876 while good != previous:
878 877 previous = good
879 878 children = tuple(
880 879 sorted(c for c in self.snapshot_cache.snapshots[good])
881 880 )
882 881 good = yield children
883 882
884 883 if debug_info is not None:
885 884 if good is None:
886 885 debug_info['no-solution'] += 1
887 886
888 887 yield None
889 888
890 889 def _raw_groups(self):
891 890 """Provides group of revision to be tested as delta base
892 891
893 892 This lower level function focus on emitting delta theorically
894 893 interresting without looking it any practical details.
895 894
896 895 The group order aims at providing fast or small candidates first.
897 896 """
898 897 # Why search for delta base if we cannot use a delta base ?
899 898 assert self.revlog.delta_config.general_delta
900 899 # also see issue6056
901 900 sparse = self.revlog.delta_config.sparse_revlog
902 901 prev = self.target_rev - 1
903 902 deltachain = lambda rev: self.revlog._deltachain(rev)[0]
904 903
905 904 # exclude already lazy tested base if any
906 905 parents = [p for p in (self.p1, self.p2) if p != nullrev]
907 906
908 907 if (
909 908 not self.revlog.delta_config.delta_both_parents
910 909 and len(parents) == 2
911 910 ):
912 911 parents.sort()
913 912 # To minimize the chance of having to build a fulltext,
914 913 # pick first whichever parent is closest to us (max rev)
915 914 yield (parents[1],)
916 915 # then the other one (min rev) if the first did not fit
917 916 yield (parents[0],)
918 917 elif len(parents) > 0:
919 918 # Test all parents (1 or 2), and keep the best candidate
920 919 yield parents
921 920
922 921 if sparse and parents:
923 922 # See if we can use an existing snapshot in the parent chains to
924 923 # use as a base for a new intermediate-snapshot
925 924 #
926 925 # search for snapshot in parents delta chain map: snapshot-level:
927 926 # snapshot-rev
928 927 parents_snaps = collections.defaultdict(set)
929 928 candidate_chains = [deltachain(p) for p in parents]
930 929 for chain in candidate_chains:
931 930 for idx, s in enumerate(chain):
932 931 if not self.revlog.issnapshot(s):
933 932 break
934 933 parents_snaps[idx].add(s)
935 934 snapfloor = min(parents_snaps[0]) + 1
936 935 self.snapshot_cache.update(self.revlog, snapfloor)
937 936 # search for the highest "unrelated" revision
938 937 #
939 938 # Adding snapshots used by "unrelated" revision increase the odd we
940 939 # reuse an independant, yet better snapshot chain.
941 940 #
942 941 # XXX instead of building a set of revisions, we could lazily
943 942 # enumerate over the chains. That would be more efficient, however
944 943 # we stick to simple code for now.
945 944 all_revs = set()
946 945 for chain in candidate_chains:
947 946 all_revs.update(chain)
948 947 other = None
949 948 for r in self.revlog.revs(prev, snapfloor):
950 949 if r not in all_revs:
951 950 other = r
952 951 break
953 952 if other is not None:
954 953 # To avoid unfair competition, we won't use unrelated
955 954 # intermediate snapshot that are deeper than the ones from the
956 955 # parent delta chain.
957 956 max_depth = max(parents_snaps.keys())
958 957 chain = deltachain(other)
959 958 for depth, s in enumerate(chain):
960 959 if s < snapfloor:
961 960 continue
962 961 if max_depth < depth:
963 962 break
964 963 if not self.revlog.issnapshot(s):
965 964 break
966 965 parents_snaps[depth].add(s)
967 966 # Test them as possible intermediate snapshot base We test them
968 967 # from highest to lowest level. High level one are more likely to
969 968 # result in small delta
970 969 floor = None
971 970 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
972 971 siblings = set()
973 972 for s in snaps:
974 973 siblings.update(self.snapshot_cache.snapshots[s])
975 974 # Before considering making a new intermediate snapshot, we
976 975 # check if an existing snapshot, children of base we consider,
977 976 # would be suitable.
978 977 #
979 978 # It give a change to reuse a delta chain "unrelated" to the
980 979 # current revision instead of starting our own. Without such
981 980 # re-use, topological branches would keep reopening new chains.
982 981 # Creating more and more snapshot as the repository grow.
983 982
984 983 if floor is not None:
985 984 # We only do this for siblings created after the one in our
986 985 # parent's delta chain. Those created before has less
987 986 # chances to be valid base since our ancestors had to
988 987 # create a new snapshot.
989 988 siblings = [r for r in siblings if floor < r]
990 989 yield tuple(sorted(siblings))
991 990 # then test the base from our parent's delta chain.
992 991 yield tuple(sorted(snaps))
993 992 floor = min(snaps)
994 993 # No suitable base found in the parent chain, search if any full
995 994 # snapshots emitted since parent's base would be a suitable base
996 995 # for an intermediate snapshot.
997 996 #
998 997 # It give a chance to reuse a delta chain unrelated to the current
999 998 # revisions instead of starting our own. Without such re-use,
1000 999 # topological branches would keep reopening new full chains.
1001 1000 # Creating more and more snapshot as the repository grow.
1002 1001 full = [
1003 1002 r
1004 1003 for r in self.snapshot_cache.snapshots[nullrev]
1005 1004 if snapfloor <= r
1006 1005 ]
1007 1006 yield tuple(sorted(full))
1008 1007
1009 1008 if not sparse:
1010 1009 # other approach failed try against prev to hopefully save us a
1011 1010 # fulltext.
1012 1011 yield (prev,)
1013 1012
1014 1013 def is_good_delta_info(self, deltainfo):
1015 1014 """Returns True if the given delta is good. Good means that it is
1016 1015 within the disk span, disk size, and chain length bounds that we know
1017 1016 to be performant."""
1018 1017 if deltainfo is None:
1019 1018 return False
1020 1019
1021 1020 # the DELTA_BASE_REUSE_FORCE case should have been taken care of sooner
1022 1021 # so we should never end up asking such question. Adding the assert as
1023 1022 # a safe-guard to detect anything that would be fishy in this regard.
1024 1023 assert (
1025 1024 self.revinfo.cachedelta is None
1026 1025 or self.revinfo.cachedelta[2] != DELTA_BASE_REUSE_FORCE
1027 1026 or not self.revlog.delta_config.general_delta
1028 1027 )
1029 1028
1030 1029 # - 'deltainfo.distance' is the distance from the base revision --
1031 1030 # bounding it limits the amount of I/O we need to do.
1032 1031 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
1033 1032 # deltas we need to apply -- bounding it limits the amount of CPU
1034 1033 # we consume.
1035 1034
1036 1035 textlen = self.revinfo.textlen
1037 1036 defaultmax = textlen * 4
1038 1037 maxdist = self.revlog.delta_config.max_deltachain_span
1039 1038 if not maxdist:
1040 1039 maxdist = deltainfo.distance # ensure the conditional pass
1041 1040 maxdist = max(maxdist, defaultmax)
1042 1041
1043 1042 # Bad delta from read span:
1044 1043 #
1045 1044 # If the span of data read is larger than the maximum allowed.
1046 1045 #
1047 1046 # In the sparse-revlog case, we rely on the associated "sparse
1048 1047 # reading" to avoid issue related to the span of data. In theory, it
1049 1048 # would be possible to build pathological revlog where delta pattern
1050 1049 # would lead to too many reads. However, they do not happen in
1051 1050 # practice at all. So we skip the span check entirely.
1052 1051 if (
1053 1052 not self.revlog.delta_config.sparse_revlog
1054 1053 and maxdist < deltainfo.distance
1055 1054 ):
1056 1055 return False
1057 1056
1058 1057 # Bad delta from new delta size:
1059 1058 #
1060 1059 # If the delta size is larger than the target text, storing the delta
1061 1060 # will be inefficient.
1062 1061 if textlen < deltainfo.deltalen:
1063 1062 return False
1064 1063
1065 1064 # Bad delta from cumulated payload size:
1066 1065 #
1067 1066 # If the sum of delta get larger than K * target text length.
1068 1067 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
1069 1068 return False
1070 1069
1071 1070 # Bad delta from chain length:
1072 1071 #
1073 1072 # If the number of delta in the chain gets too high.
1074 1073 if (
1075 1074 self.revlog.delta_config.max_chain_len
1076 1075 and self.revlog.delta_config.max_chain_len < deltainfo.chainlen
1077 1076 ):
1078 1077 return False
1079 1078
1080 1079 # bad delta from intermediate snapshot size limit
1081 1080 #
1082 1081 # If an intermediate snapshot size is higher than the limit. The
1083 1082 # limit exist to prevent endless chain of intermediate delta to be
1084 1083 # created.
1085 1084 if (
1086 1085 deltainfo.snapshotdepth is not None
1087 1086 and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen
1088 1087 ):
1089 1088 return False
1090 1089
1091 1090 # bad delta if new intermediate snapshot is larger than the previous
1092 1091 # snapshot
1093 1092 if (
1094 1093 deltainfo.snapshotdepth
1095 1094 and self.revlog.length(deltainfo.base) < deltainfo.deltalen
1096 1095 ):
1097 1096 return False
1098 1097
1099 1098 return True
1100 1099
1101 1100
1102 1101 class SnapshotCache:
1103 1102 __slots__ = ('snapshots', '_start_rev', '_end_rev')
1104 1103
1105 1104 def __init__(self):
1106 1105 self.snapshots = collections.defaultdict(set)
1107 1106 self._start_rev = None
1108 1107 self._end_rev = None
1109 1108
1110 1109 def update(self, revlog, start_rev=0):
1111 1110 """find snapshots from start_rev to tip"""
1112 1111 nb_revs = len(revlog)
1113 1112 end_rev = nb_revs - 1
1114 1113 if start_rev > end_rev:
1115 1114 return # range is empty
1116 1115
1117 1116 if self._start_rev is None:
1118 1117 assert self._end_rev is None
1119 1118 self._update(revlog, start_rev, end_rev)
1120 1119 elif not (self._start_rev <= start_rev and end_rev <= self._end_rev):
1121 1120 if start_rev < self._start_rev:
1122 1121 self._update(revlog, start_rev, self._start_rev - 1)
1123 1122 if self._end_rev < end_rev:
1124 1123 self._update(revlog, self._end_rev + 1, end_rev)
1125 1124
1126 1125 if self._start_rev is None:
1127 1126 assert self._end_rev is None
1128 1127 self._end_rev = end_rev
1129 1128 self._start_rev = start_rev
1130 1129 else:
1131 1130 self._start_rev = min(self._start_rev, start_rev)
1132 1131 self._end_rev = max(self._end_rev, end_rev)
1133 1132 assert self._start_rev <= self._end_rev, (
1134 1133 self._start_rev,
1135 1134 self._end_rev,
1136 1135 )
1137 1136
1138 1137 def _update(self, revlog, start_rev, end_rev):
1139 1138 """internal method that actually do update content"""
1140 1139 assert self._start_rev is None or (
1141 1140 start_rev < self._start_rev or start_rev > self._end_rev
1142 1141 ), (self._start_rev, self._end_rev, start_rev, end_rev)
1143 1142 assert self._start_rev is None or (
1144 1143 end_rev < self._start_rev or end_rev > self._end_rev
1145 1144 ), (self._start_rev, self._end_rev, start_rev, end_rev)
1146 1145 cache = self.snapshots
1147 1146 if hasattr(revlog.index, 'findsnapshots'):
1148 1147 revlog.index.findsnapshots(cache, start_rev, end_rev)
1149 1148 else:
1150 1149 deltaparent = revlog.deltaparent
1151 1150 issnapshot = revlog.issnapshot
1152 1151 for rev in revlog.revs(start_rev, end_rev):
1153 1152 if issnapshot(rev):
1154 1153 cache[deltaparent(rev)].add(rev)
1155 1154
1156 1155
1157 1156 class deltacomputer:
1158 1157 """object capable of computing delta and finding delta for multiple revision
1159 1158
1160 1159 This object is meant to compute and find multiple delta applied to the same
1161 1160 revlog.
1162 1161 """
1163 1162
1164 1163 def __init__(
1165 1164 self,
1166 1165 revlog,
1167 1166 write_debug=None,
1168 1167 debug_search=False,
1169 1168 debug_info=None,
1170 1169 ):
1171 1170 self.revlog = revlog
1172 1171 self._write_debug = write_debug
1173 1172 if write_debug is None:
1174 1173 self._debug_search = False
1175 1174 else:
1176 1175 self._debug_search = debug_search
1177 1176 self._debug_info = debug_info
1178 1177 self._snapshot_cache = SnapshotCache()
1179 1178
1180 1179 @property
1181 1180 def _gather_debug(self):
1182 1181 return self._write_debug is not None or self._debug_info is not None
1183 1182
1184 1183 def buildtext(self, revinfo):
1185 1184 """Builds a fulltext version of a revision
1186 1185
1187 1186 revinfo: revisioninfo instance that contains all needed info
1188 1187 """
1189 1188 btext = revinfo.btext
1190 1189 if btext[0] is not None:
1191 1190 return btext[0]
1192 1191
1193 1192 revlog = self.revlog
1194 1193 cachedelta = revinfo.cachedelta
1195 1194 baserev = cachedelta[0]
1196 1195 delta = cachedelta[1]
1197 1196
1198 1197 fulltext = btext[0] = _textfromdelta(
1199 1198 revlog,
1200 1199 baserev,
1201 1200 delta,
1202 1201 revinfo.p1,
1203 1202 revinfo.p2,
1204 1203 revinfo.flags,
1205 1204 revinfo.node,
1206 1205 )
1207 1206 return fulltext
1208 1207
1209 1208 def _builddeltadiff(self, base, revinfo):
1210 1209 revlog = self.revlog
1211 1210 t = self.buildtext(revinfo)
1212 1211 if revlog.iscensored(base):
1213 1212 # deltas based on a censored revision must replace the
1214 1213 # full content in one patch, so delta works everywhere
1215 1214 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
1216 1215 delta = header + t
1217 1216 else:
1218 1217 ptext = revlog.rawdata(base)
1219 1218 delta = mdiff.textdiff(ptext, t)
1220 1219
1221 1220 return delta
1222 1221
1223 1222 def _builddeltainfo(self, revinfo, base, target_rev=None):
1224 1223 # can we use the cached delta?
1225 1224 revlog = self.revlog
1226 1225 chainbase = revlog.chainbase(base)
1227 1226 if revlog.delta_config.general_delta:
1228 1227 deltabase = base
1229 1228 else:
1230 1229 if target_rev is not None and base != target_rev - 1:
1231 1230 msg = (
1232 1231 b'general delta cannot use delta for something else '
1233 1232 b'than `prev`: %d<-%d'
1234 1233 )
1235 1234 msg %= (base, target_rev)
1236 1235 raise error.ProgrammingError(msg)
1237 1236 deltabase = chainbase
1238 1237 snapshotdepth = None
1239 1238 if revlog.delta_config.sparse_revlog and deltabase == nullrev:
1240 1239 snapshotdepth = 0
1241 1240 elif revlog.delta_config.sparse_revlog and revlog.issnapshot(deltabase):
1242 1241 # A delta chain should always be one full snapshot,
1243 1242 # zero or more semi-snapshots, and zero or more deltas
1244 1243 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
1245 1244 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
1246 1245 snapshotdepth = len(revlog._deltachain(deltabase)[0])
1247 1246 delta = None
1248 1247 if revinfo.cachedelta:
1249 1248 cachebase = revinfo.cachedelta[0]
1250 1249 # check if the diff still apply
1251 1250 currentbase = cachebase
1252 1251 while (
1253 1252 currentbase != nullrev
1254 1253 and currentbase != base
1255 1254 and self.revlog.length(currentbase) == 0
1256 1255 ):
1257 1256 currentbase = self.revlog.deltaparent(currentbase)
1258 1257 if self.revlog.delta_config.lazy_delta and currentbase == base:
1259 1258 delta = revinfo.cachedelta[1]
1260 1259 if delta is None:
1261 1260 delta = self._builddeltadiff(base, revinfo)
1262 1261 if self._debug_search:
1263 1262 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1264 1263 msg %= len(delta)
1265 1264 self._write_debug(msg)
1266 1265 # snapshotdept need to be neither None nor 0 level snapshot
1267 1266 if revlog.delta_config.upper_bound_comp is not None and snapshotdepth:
1268 1267 lowestrealisticdeltalen = (
1269 1268 len(delta) // revlog.delta_config.upper_bound_comp
1270 1269 )
1271 1270 snapshotlimit = revinfo.textlen >> snapshotdepth
1272 1271 if self._debug_search:
1273 1272 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1274 1273 msg %= lowestrealisticdeltalen
1275 1274 self._write_debug(msg)
1276 1275 if snapshotlimit < lowestrealisticdeltalen:
1277 1276 if self._debug_search:
1278 1277 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1279 1278 self._write_debug(msg)
1280 1279 return None
1281 1280 if revlog.length(base) < lowestrealisticdeltalen:
1282 1281 if self._debug_search:
1283 1282 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1284 1283 self._write_debug(msg)
1285 1284 return None
1286 1285 header, data = revlog._inner.compress(delta)
1287 1286 deltalen = len(header) + len(data)
1288 1287 offset = revlog.end(len(revlog) - 1)
1289 1288 dist = deltalen + offset - revlog.start(chainbase)
1290 1289 chainlen, compresseddeltalen = revlog._chaininfo(base)
1291 1290 chainlen += 1
1292 1291 compresseddeltalen += deltalen
1293 1292
1294 1293 return _deltainfo(
1295 1294 dist,
1296 1295 deltalen,
1297 1296 (header, data),
1298 1297 deltabase,
1299 1298 chainbase,
1300 1299 chainlen,
1301 1300 compresseddeltalen,
1302 1301 snapshotdepth,
1303 1302 )
1304 1303
1305 1304 def _fullsnapshotinfo(self, revinfo, curr):
1306 1305 rawtext = self.buildtext(revinfo)
1307 1306 data = self.revlog._inner.compress(rawtext)
1308 1307 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
1309 1308 deltabase = chainbase = curr
1310 1309 snapshotdepth = 0
1311 1310 chainlen = 1
1312 1311
1313 1312 return _deltainfo(
1314 1313 dist,
1315 1314 deltalen,
1316 1315 data,
1317 1316 deltabase,
1318 1317 chainbase,
1319 1318 chainlen,
1320 1319 compresseddeltalen,
1321 1320 snapshotdepth,
1322 1321 )
1323 1322
1324 1323 def finddeltainfo(self, revinfo, excluded_bases=None, target_rev=None):
1325 1324 """Find an acceptable delta against a candidate revision
1326 1325
1327 1326 revinfo: information about the revision (instance of _revisioninfo)
1328 1327
1329 1328 Returns the first acceptable candidate revision, as ordered by
1330 1329 _candidategroups
1331 1330
1332 1331 If no suitable deltabase is found, we return delta info for a full
1333 1332 snapshot.
1334 1333
1335 1334 `excluded_bases` is an optional set of revision that cannot be used as
1336 1335 a delta base. Use this to recompute delta suitable in censor or strip
1337 1336 context.
1338 1337 """
1339 1338 if target_rev is None:
1340 1339 target_rev = len(self.revlog)
1341 1340
1342 1341 gather_debug = self._gather_debug
1343 1342 cachedelta = revinfo.cachedelta
1344 1343 revlog = self.revlog
1345 1344 p1r = p2r = None
1346 1345
1347 1346 if excluded_bases is None:
1348 1347 excluded_bases = set()
1349 1348
1350 1349 if gather_debug:
1351 1350 start = util.timer()
1352 1351 dbg = self._one_dbg_data()
1353 1352 dbg['revision'] = target_rev
1354 1353 p1r = revlog.rev(revinfo.p1)
1355 1354 p2r = revlog.rev(revinfo.p2)
1356 1355 if p1r != nullrev:
1357 1356 p1_chain_len = revlog._chaininfo(p1r)[0]
1358 1357 else:
1359 1358 p1_chain_len = -1
1360 1359 if p2r != nullrev:
1361 1360 p2_chain_len = revlog._chaininfo(p2r)[0]
1362 1361 else:
1363 1362 p2_chain_len = -1
1364 1363 dbg['p1-chain-len'] = p1_chain_len
1365 1364 dbg['p2-chain-len'] = p2_chain_len
1366 1365
1367 1366 # 1) if the revision is empty, no amount of delta can beat it
1368 1367 #
1369 1368 # 2) no delta for flag processor revision (see "candelta" for why)
1370 1369 # not calling candelta since only one revision needs test, also to
1371 1370 # avoid overhead fetching flags again.
1372 1371 if not revinfo.textlen or revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1373 1372 deltainfo = self._fullsnapshotinfo(revinfo, target_rev)
1374 1373 if gather_debug:
1375 1374 end = util.timer()
1376 1375 dbg['duration'] = end - start
1377 1376 dbg[
1378 1377 'delta-base'
1379 1378 ] = deltainfo.base # pytype: disable=attribute-error
1380 1379 dbg['search_round_count'] = 0
1381 1380 dbg['using-cached-base'] = False
1382 1381 dbg['delta_try_count'] = 0
1383 1382 dbg['type'] = b"full"
1384 1383 dbg['snapshot-depth'] = 0
1385 1384 self._dbg_process_data(dbg)
1386 1385 return deltainfo
1387 1386
1388 1387 deltainfo = None
1389 1388
1390 1389 # If this source delta are to be forcibly reuse, let us comply early.
1391 1390 if (
1392 1391 revlog.delta_config.general_delta
1393 1392 and revinfo.cachedelta is not None
1394 1393 and revinfo.cachedelta[2] == DELTA_BASE_REUSE_FORCE
1395 1394 ):
1396 1395 base = revinfo.cachedelta[0]
1397 1396 if base == nullrev:
1398 1397 dbg_type = b"full"
1399 1398 deltainfo = self._fullsnapshotinfo(revinfo, target_rev)
1400 1399 if gather_debug:
1401 1400 snapshotdepth = 0
1402 1401 elif base not in excluded_bases:
1403 1402 delta = revinfo.cachedelta[1]
1404 1403 header, data = revlog.compress(delta)
1405 1404 deltalen = len(header) + len(data)
1406 1405 if gather_debug:
1407 1406 offset = revlog.end(len(revlog) - 1)
1408 1407 chainbase = revlog.chainbase(base)
1409 1408 distance = deltalen + offset - revlog.start(chainbase)
1410 1409 chainlen, compresseddeltalen = revlog._chaininfo(base)
1411 1410 chainlen += 1
1412 1411 compresseddeltalen += deltalen
1413 1412 if base == p1r or base == p2r:
1414 1413 dbg_type = b"delta"
1415 1414 snapshotdepth = None
1416 1415 elif not revlog.issnapshot(base):
1417 1416 snapshotdepth = None
1418 1417 else:
1419 1418 dbg_type = b"snapshot"
1420 1419 snapshotdepth = revlog.snapshotdepth(base) + 1
1421 1420 else:
1422 1421 distance = None
1423 1422 chainbase = None
1424 1423 chainlen = None
1425 1424 compresseddeltalen = None
1426 1425 snapshotdepth = None
1427 1426 deltainfo = _deltainfo(
1428 1427 distance=distance,
1429 1428 deltalen=deltalen,
1430 1429 data=(header, data),
1431 1430 base=base,
1432 1431 chainbase=chainbase,
1433 1432 chainlen=chainlen,
1434 1433 compresseddeltalen=compresseddeltalen,
1435 1434 snapshotdepth=snapshotdepth,
1436 1435 )
1437 1436
1438 1437 if deltainfo is not None:
1439 1438 if gather_debug:
1440 1439 end = util.timer()
1441 1440 dbg['duration'] = end - start
1442 1441 dbg[
1443 1442 'delta-base'
1444 1443 ] = deltainfo.base # pytype: disable=attribute-error
1445 1444 dbg['search_round_count'] = 0
1446 1445 dbg['using-cached-base'] = True
1447 1446 dbg['delta_try_count'] = 0
1448 1447 dbg['type'] = b"full"
1449 1448 if snapshotdepth is None:
1450 1449 dbg['snapshot-depth'] = -1
1451 1450 else:
1452 1451 dbg['snapshot-depth'] = snapshotdepth
1453 1452 self._dbg_process_data(dbg)
1454 1453 return deltainfo
1455 1454
1456 1455 # count the number of different delta we tried (for debug purpose)
1457 1456 dbg_try_count = 0
1458 1457 # count the number of "search round" we did. (for debug purpose)
1459 1458 dbg_try_rounds = 0
1460 1459 dbg_type = b'unknown'
1461 1460
1462 1461 if p1r is None:
1463 1462 p1r = revlog.rev(revinfo.p1)
1464 1463 p2r = revlog.rev(revinfo.p2)
1465 1464
1466 1465 if self._debug_search:
1467 1466 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1468 1467 msg %= target_rev
1469 1468 self._write_debug(msg)
1470 1469
1471 1470 search = _DeltaSearch(
1472 1471 self.revlog,
1473 1472 revinfo,
1474 1473 p1r,
1475 1474 p2r,
1476 1475 cachedelta,
1477 1476 excluded_bases,
1478 1477 target_rev,
1479 1478 snapshot_cache=self._snapshot_cache,
1480 1479 )
1481 1480
1482 1481 while not search.done:
1483 1482 current_group = search.current_group
1484 1483 # current_group can be `None`, but not is search.done is False
1485 1484 # We add this assert to help pytype
1486 1485 assert current_group is not None
1487 1486 candidaterevs = current_group
1488 1487 dbg_try_rounds += 1
1489 1488 if self._debug_search:
1490 1489 prev = None
1491 1490 if deltainfo is not None:
1492 1491 prev = deltainfo.base
1493 1492
1494 1493 if (
1495 1494 cachedelta is not None
1496 1495 and len(candidaterevs) == 1
1497 1496 and cachedelta[0] in candidaterevs
1498 1497 ):
1499 1498 round_type = b"cached-delta"
1500 1499 elif p1r in candidaterevs or p2r in candidaterevs:
1501 1500 round_type = b"parents"
1502 1501 elif prev is not None and all(c < prev for c in candidaterevs):
1503 1502 round_type = b"refine-down"
1504 1503 elif prev is not None and all(c > prev for c in candidaterevs):
1505 1504 round_type = b"refine-up"
1506 1505 else:
1507 1506 round_type = b"search-down"
1508 1507 msg = b"DBG-DELTAS-SEARCH: ROUND #%d - %d candidates - %s\n"
1509 1508 msg %= (dbg_try_rounds, len(candidaterevs), round_type)
1510 1509 self._write_debug(msg)
1511 1510 nominateddeltas = []
1512 1511 if deltainfo is not None:
1513 1512 if self._debug_search:
1514 1513 msg = (
1515 1514 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1516 1515 )
1517 1516 msg %= (deltainfo.base, deltainfo.deltalen)
1518 1517 self._write_debug(msg)
1519 1518 # if we already found a good delta,
1520 1519 # challenge it against refined candidates
1521 1520 nominateddeltas.append(deltainfo)
1522 1521 for candidaterev in candidaterevs:
1523 1522 if self._debug_search:
1524 1523 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1525 1524 msg %= candidaterev
1526 1525 self._write_debug(msg)
1527 1526 candidate_type = None
1528 1527 if candidaterev == p1r:
1529 1528 candidate_type = b"p1"
1530 1529 elif candidaterev == p2r:
1531 1530 candidate_type = b"p2"
1532 1531 elif self.revlog.issnapshot(candidaterev):
1533 1532 candidate_type = b"snapshot-%d"
1534 1533 candidate_type %= self.revlog.snapshotdepth(
1535 1534 candidaterev
1536 1535 )
1537 1536
1538 1537 if candidate_type is not None:
1539 1538 msg = b"DBG-DELTAS-SEARCH: type=%s\n"
1540 1539 msg %= candidate_type
1541 1540 self._write_debug(msg)
1542 1541 msg = b"DBG-DELTAS-SEARCH: size=%d\n"
1543 1542 msg %= self.revlog.length(candidaterev)
1544 1543 self._write_debug(msg)
1545 1544 msg = b"DBG-DELTAS-SEARCH: base=%d\n"
1546 1545 msg %= self.revlog.deltaparent(candidaterev)
1547 1546 self._write_debug(msg)
1548 1547
1549 1548 dbg_try_count += 1
1550 1549
1551 1550 if self._debug_search:
1552 1551 delta_start = util.timer()
1553 1552 candidatedelta = self._builddeltainfo(
1554 1553 revinfo,
1555 1554 candidaterev,
1556 1555 target_rev=target_rev,
1557 1556 )
1558 1557 if self._debug_search:
1559 1558 delta_end = util.timer()
1560 1559 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1561 1560 msg %= delta_end - delta_start
1562 1561 self._write_debug(msg)
1563 1562 if candidatedelta is not None:
1564 1563 if search.is_good_delta_info(candidatedelta):
1565 1564 if self._debug_search:
1566 1565 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1567 1566 msg %= candidatedelta.deltalen
1568 1567 self._write_debug(msg)
1569 1568 nominateddeltas.append(candidatedelta)
1570 1569 elif self._debug_search:
1571 1570 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1572 1571 msg %= candidatedelta.deltalen
1573 1572 self._write_debug(msg)
1574 1573 elif self._debug_search:
1575 1574 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1576 1575 self._write_debug(msg)
1577 1576 if nominateddeltas:
1578 1577 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1579 1578 search.next_group(deltainfo)
1580 1579
1581 1580 if deltainfo is None:
1582 1581 dbg_type = b"full"
1583 1582 deltainfo = self._fullsnapshotinfo(revinfo, target_rev)
1584 1583 elif deltainfo.snapshotdepth: # pytype: disable=attribute-error
1585 1584 dbg_type = b"snapshot"
1586 1585 else:
1587 1586 dbg_type = b"delta"
1588 1587
1589 1588 if gather_debug:
1590 1589 end = util.timer()
1591 1590 if dbg_type == b'full':
1592 1591 used_cached = (
1593 1592 cachedelta is not None
1594 1593 and dbg_try_rounds == 0
1595 1594 and dbg_try_count == 0
1596 1595 and cachedelta[0] == nullrev
1597 1596 )
1598 1597 else:
1599 1598 used_cached = (
1600 1599 cachedelta is not None
1601 1600 and dbg_try_rounds == 1
1602 1601 and dbg_try_count == 1
1603 1602 and deltainfo.base == cachedelta[0]
1604 1603 )
1605 1604 dbg['duration'] = end - start
1606 1605 dbg[
1607 1606 'delta-base'
1608 1607 ] = deltainfo.base # pytype: disable=attribute-error
1609 1608 dbg['search_round_count'] = dbg_try_rounds
1610 1609 dbg['using-cached-base'] = used_cached
1611 1610 dbg['delta_try_count'] = dbg_try_count
1612 1611 dbg['type'] = dbg_type
1613 1612 if (
1614 1613 deltainfo.snapshotdepth # pytype: disable=attribute-error
1615 1614 is not None
1616 1615 ):
1617 1616 dbg[
1618 1617 'snapshot-depth'
1619 1618 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1620 1619 else:
1621 1620 dbg['snapshot-depth'] = -1
1622 1621 self._dbg_process_data(dbg)
1623 1622 return deltainfo
1624 1623
1625 1624 def _one_dbg_data(self):
1626 1625 dbg = {
1627 1626 'duration': None,
1628 1627 'revision': None,
1629 1628 'delta-base': None,
1630 1629 'search_round_count': None,
1631 1630 'using-cached-base': None,
1632 1631 'delta_try_count': None,
1633 1632 'type': None,
1634 1633 'p1-chain-len': None,
1635 1634 'p2-chain-len': None,
1636 1635 'snapshot-depth': None,
1637 1636 'target-revlog': None,
1638 1637 }
1639 1638 target_revlog = b"UNKNOWN"
1640 1639 target_type = self.revlog.target[0]
1641 1640 target_key = self.revlog.target[1]
1642 1641 if target_type == KIND_CHANGELOG:
1643 1642 target_revlog = b'CHANGELOG:'
1644 1643 elif target_type == KIND_MANIFESTLOG:
1645 1644 target_revlog = b'MANIFESTLOG:'
1646 1645 if target_key:
1647 1646 target_revlog += b'%s:' % target_key
1648 1647 elif target_type == KIND_FILELOG:
1649 1648 target_revlog = b'FILELOG:'
1650 1649 if target_key:
1651 1650 target_revlog += b'%s:' % target_key
1652 1651 dbg['target-revlog'] = target_revlog
1653 1652 return dbg
1654 1653
1655 1654 def _dbg_process_data(self, dbg):
1656 1655 if self._debug_info is not None:
1657 1656 self._debug_info.append(dbg)
1658 1657
1659 1658 if self._write_debug is not None:
1660 1659 msg = (
1661 1660 b"DBG-DELTAS:"
1662 1661 b" %-12s"
1663 1662 b" rev=%d:"
1664 1663 b" delta-base=%d"
1665 1664 b" is-cached=%d"
1666 1665 b" - search-rounds=%d"
1667 1666 b" try-count=%d"
1668 1667 b" - delta-type=%-6s"
1669 1668 b" snap-depth=%d"
1670 1669 b" - p1-chain-length=%d"
1671 1670 b" p2-chain-length=%d"
1672 1671 b" - duration=%f"
1673 1672 b"\n"
1674 1673 )
1675 1674 msg %= (
1676 1675 dbg["target-revlog"],
1677 1676 dbg["revision"],
1678 1677 dbg["delta-base"],
1679 1678 dbg["using-cached-base"],
1680 1679 dbg["search_round_count"],
1681 1680 dbg["delta_try_count"],
1682 1681 dbg["type"],
1683 1682 dbg["snapshot-depth"],
1684 1683 dbg["p1-chain-len"],
1685 1684 dbg["p2-chain-len"],
1686 1685 dbg["duration"],
1687 1686 )
1688 1687 self._write_debug(msg)
1689 1688
1690 1689
1691 1690 def delta_compression(default_compression_header, deltainfo):
1692 1691 """return (COMPRESSION_MODE, deltainfo)
1693 1692
1694 1693 used by revlog v2+ format to dispatch between PLAIN and DEFAULT
1695 1694 compression.
1696 1695 """
1697 1696 h, d = deltainfo.data
1698 1697 compression_mode = COMP_MODE_INLINE
1699 1698 if not h and not d:
1700 1699 # not data to store at all... declare them uncompressed
1701 1700 compression_mode = COMP_MODE_PLAIN
1702 1701 elif not h:
1703 1702 t = d[0:1]
1704 1703 if t == b'\0':
1705 1704 compression_mode = COMP_MODE_PLAIN
1706 1705 elif t == default_compression_header:
1707 1706 compression_mode = COMP_MODE_DEFAULT
1708 1707 elif h == b'u':
1709 1708 # we have a more efficient way to declare uncompressed
1710 1709 h = b''
1711 1710 compression_mode = COMP_MODE_PLAIN
1712 1711 deltainfo = drop_u_compression(deltainfo)
1713 1712 return compression_mode, deltainfo
General Comments 0
You need to be logged in to leave comments. Login now