##// END OF EJS Templates
delta-find: add debug information about reuse of cached data...
marmoute -
r50504:78ba4187 default
parent child Browse files
Show More
@@ -1,1332 +1,1351 b''
1 1 # revlogdeltas.py - Logic around delta computation for revlog
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2018 Octobus <contact@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """Helper class to compute deltas stored inside revlogs"""
9 9
10 10
11 11 import collections
12 12 import struct
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from ..node import nullrev
16 16 from ..i18n import _
17 17 from ..pycompat import getattr
18 18
19 19 from .constants import (
20 20 COMP_MODE_DEFAULT,
21 21 COMP_MODE_INLINE,
22 22 COMP_MODE_PLAIN,
23 23 KIND_CHANGELOG,
24 24 KIND_FILELOG,
25 25 KIND_MANIFESTLOG,
26 26 REVIDX_ISCENSORED,
27 27 REVIDX_RAWTEXT_CHANGING_FLAGS,
28 28 )
29 29
30 30 from ..thirdparty import attr
31 31
32 32 from .. import (
33 33 error,
34 34 mdiff,
35 35 util,
36 36 )
37 37
38 38 from . import flagutil
39 39
40 40 # maximum <delta-chain-data>/<revision-text-length> ratio
41 41 LIMIT_DELTA2TEXT = 2
42 42
43 43
44 44 class _testrevlog:
45 45 """minimalist fake revlog to use in doctests"""
46 46
47 47 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
48 48 """data is an list of revision payload boundaries"""
49 49 self._data = data
50 50 self._srdensitythreshold = density
51 51 self._srmingapsize = mingap
52 52 self._snapshot = set(snapshot)
53 53 self.index = None
54 54
55 55 def start(self, rev):
56 56 if rev == nullrev:
57 57 return 0
58 58 if rev == 0:
59 59 return 0
60 60 return self._data[rev - 1]
61 61
62 62 def end(self, rev):
63 63 if rev == nullrev:
64 64 return 0
65 65 return self._data[rev]
66 66
67 67 def length(self, rev):
68 68 return self.end(rev) - self.start(rev)
69 69
70 70 def __len__(self):
71 71 return len(self._data)
72 72
73 73 def issnapshot(self, rev):
74 74 if rev == nullrev:
75 75 return True
76 76 return rev in self._snapshot
77 77
78 78
79 79 def slicechunk(revlog, revs, targetsize=None):
80 80 """slice revs to reduce the amount of unrelated data to be read from disk.
81 81
82 82 ``revs`` is sliced into groups that should be read in one time.
83 83 Assume that revs are sorted.
84 84
85 85 The initial chunk is sliced until the overall density (payload/chunks-span
86 86 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
87 87 `revlog._srmingapsize` is skipped.
88 88
89 89 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
90 90 For consistency with other slicing choice, this limit won't go lower than
91 91 `revlog._srmingapsize`.
92 92
93 93 If individual revisions chunk are larger than this limit, they will still
94 94 be raised individually.
95 95
96 96 >>> data = [
97 97 ... 5, #00 (5)
98 98 ... 10, #01 (5)
99 99 ... 12, #02 (2)
100 100 ... 12, #03 (empty)
101 101 ... 27, #04 (15)
102 102 ... 31, #05 (4)
103 103 ... 31, #06 (empty)
104 104 ... 42, #07 (11)
105 105 ... 47, #08 (5)
106 106 ... 47, #09 (empty)
107 107 ... 48, #10 (1)
108 108 ... 51, #11 (3)
109 109 ... 74, #12 (23)
110 110 ... 85, #13 (11)
111 111 ... 86, #14 (1)
112 112 ... 91, #15 (5)
113 113 ... ]
114 114 >>> revlog = _testrevlog(data, snapshot=range(16))
115 115
116 116 >>> list(slicechunk(revlog, list(range(16))))
117 117 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
118 118 >>> list(slicechunk(revlog, [0, 15]))
119 119 [[0], [15]]
120 120 >>> list(slicechunk(revlog, [0, 11, 15]))
121 121 [[0], [11], [15]]
122 122 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
123 123 [[0], [11, 13, 15]]
124 124 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
125 125 [[1, 2], [5, 8, 10, 11], [14]]
126 126
127 127 Slicing with a maximum chunk size
128 128 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
129 129 [[0], [11], [13], [15]]
130 130 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
131 131 [[0], [11], [13, 15]]
132 132
133 133 Slicing involving nullrev
134 134 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
135 135 [[-1, 0], [11], [13, 15]]
136 136 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
137 137 [[-1], [13], [15]]
138 138 """
139 139 if targetsize is not None:
140 140 targetsize = max(targetsize, revlog._srmingapsize)
141 141 # targetsize should not be specified when evaluating delta candidates:
142 142 # * targetsize is used to ensure we stay within specification when reading,
143 143 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
144 144 if densityslicing is None:
145 145 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
146 146 for chunk in densityslicing(
147 147 revs, revlog._srdensitythreshold, revlog._srmingapsize
148 148 ):
149 149 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
150 150 yield subchunk
151 151
152 152
153 153 def _slicechunktosize(revlog, revs, targetsize=None):
154 154 """slice revs to match the target size
155 155
156 156 This is intended to be used on chunk that density slicing selected by that
157 157 are still too large compared to the read garantee of revlog. This might
158 158 happens when "minimal gap size" interrupted the slicing or when chain are
159 159 built in a way that create large blocks next to each other.
160 160
161 161 >>> data = [
162 162 ... 3, #0 (3)
163 163 ... 5, #1 (2)
164 164 ... 6, #2 (1)
165 165 ... 8, #3 (2)
166 166 ... 8, #4 (empty)
167 167 ... 11, #5 (3)
168 168 ... 12, #6 (1)
169 169 ... 13, #7 (1)
170 170 ... 14, #8 (1)
171 171 ... ]
172 172
173 173 == All snapshots cases ==
174 174 >>> revlog = _testrevlog(data, snapshot=range(9))
175 175
176 176 Cases where chunk is already small enough
177 177 >>> list(_slicechunktosize(revlog, [0], 3))
178 178 [[0]]
179 179 >>> list(_slicechunktosize(revlog, [6, 7], 3))
180 180 [[6, 7]]
181 181 >>> list(_slicechunktosize(revlog, [0], None))
182 182 [[0]]
183 183 >>> list(_slicechunktosize(revlog, [6, 7], None))
184 184 [[6, 7]]
185 185
186 186 cases where we need actual slicing
187 187 >>> list(_slicechunktosize(revlog, [0, 1], 3))
188 188 [[0], [1]]
189 189 >>> list(_slicechunktosize(revlog, [1, 3], 3))
190 190 [[1], [3]]
191 191 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
192 192 [[1, 2], [3]]
193 193 >>> list(_slicechunktosize(revlog, [3, 5], 3))
194 194 [[3], [5]]
195 195 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
196 196 [[3], [5]]
197 197 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
198 198 [[5], [6, 7, 8]]
199 199 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
200 200 [[0], [1, 2], [3], [5], [6, 7, 8]]
201 201
202 202 Case with too large individual chunk (must return valid chunk)
203 203 >>> list(_slicechunktosize(revlog, [0, 1], 2))
204 204 [[0], [1]]
205 205 >>> list(_slicechunktosize(revlog, [1, 3], 1))
206 206 [[1], [3]]
207 207 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
208 208 [[3], [5]]
209 209
210 210 == No Snapshot cases ==
211 211 >>> revlog = _testrevlog(data)
212 212
213 213 Cases where chunk is already small enough
214 214 >>> list(_slicechunktosize(revlog, [0], 3))
215 215 [[0]]
216 216 >>> list(_slicechunktosize(revlog, [6, 7], 3))
217 217 [[6, 7]]
218 218 >>> list(_slicechunktosize(revlog, [0], None))
219 219 [[0]]
220 220 >>> list(_slicechunktosize(revlog, [6, 7], None))
221 221 [[6, 7]]
222 222
223 223 cases where we need actual slicing
224 224 >>> list(_slicechunktosize(revlog, [0, 1], 3))
225 225 [[0], [1]]
226 226 >>> list(_slicechunktosize(revlog, [1, 3], 3))
227 227 [[1], [3]]
228 228 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
229 229 [[1], [2, 3]]
230 230 >>> list(_slicechunktosize(revlog, [3, 5], 3))
231 231 [[3], [5]]
232 232 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
233 233 [[3], [4, 5]]
234 234 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
235 235 [[5], [6, 7, 8]]
236 236 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
237 237 [[0], [1, 2], [3], [5], [6, 7, 8]]
238 238
239 239 Case with too large individual chunk (must return valid chunk)
240 240 >>> list(_slicechunktosize(revlog, [0, 1], 2))
241 241 [[0], [1]]
242 242 >>> list(_slicechunktosize(revlog, [1, 3], 1))
243 243 [[1], [3]]
244 244 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
245 245 [[3], [5]]
246 246
247 247 == mixed case ==
248 248 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
249 249 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
250 250 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
251 251 """
252 252 assert targetsize is None or 0 <= targetsize
253 253 startdata = revlog.start(revs[0])
254 254 enddata = revlog.end(revs[-1])
255 255 fullspan = enddata - startdata
256 256 if targetsize is None or fullspan <= targetsize:
257 257 yield revs
258 258 return
259 259
260 260 startrevidx = 0
261 261 endrevidx = 1
262 262 iterrevs = enumerate(revs)
263 263 next(iterrevs) # skip first rev.
264 264 # first step: get snapshots out of the way
265 265 for idx, r in iterrevs:
266 266 span = revlog.end(r) - startdata
267 267 snapshot = revlog.issnapshot(r)
268 268 if span <= targetsize and snapshot:
269 269 endrevidx = idx + 1
270 270 else:
271 271 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
272 272 if chunk:
273 273 yield chunk
274 274 startrevidx = idx
275 275 startdata = revlog.start(r)
276 276 endrevidx = idx + 1
277 277 if not snapshot:
278 278 break
279 279
280 280 # for the others, we use binary slicing to quickly converge toward valid
281 281 # chunks (otherwise, we might end up looking for start/end of many
282 282 # revisions). This logic is not looking for the perfect slicing point, it
283 283 # focuses on quickly converging toward valid chunks.
284 284 nbitem = len(revs)
285 285 while (enddata - startdata) > targetsize:
286 286 endrevidx = nbitem
287 287 if nbitem - startrevidx <= 1:
288 288 break # protect against individual chunk larger than limit
289 289 localenddata = revlog.end(revs[endrevidx - 1])
290 290 span = localenddata - startdata
291 291 while span > targetsize:
292 292 if endrevidx - startrevidx <= 1:
293 293 break # protect against individual chunk larger than limit
294 294 endrevidx -= (endrevidx - startrevidx) // 2
295 295 localenddata = revlog.end(revs[endrevidx - 1])
296 296 span = localenddata - startdata
297 297 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
298 298 if chunk:
299 299 yield chunk
300 300 startrevidx = endrevidx
301 301 startdata = revlog.start(revs[startrevidx])
302 302
303 303 chunk = _trimchunk(revlog, revs, startrevidx)
304 304 if chunk:
305 305 yield chunk
306 306
307 307
308 308 def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0):
309 309 """slice revs to reduce the amount of unrelated data to be read from disk.
310 310
311 311 ``revs`` is sliced into groups that should be read in one time.
312 312 Assume that revs are sorted.
313 313
314 314 The initial chunk is sliced until the overall density (payload/chunks-span
315 315 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
316 316 skipped.
317 317
318 318 >>> revlog = _testrevlog([
319 319 ... 5, #00 (5)
320 320 ... 10, #01 (5)
321 321 ... 12, #02 (2)
322 322 ... 12, #03 (empty)
323 323 ... 27, #04 (15)
324 324 ... 31, #05 (4)
325 325 ... 31, #06 (empty)
326 326 ... 42, #07 (11)
327 327 ... 47, #08 (5)
328 328 ... 47, #09 (empty)
329 329 ... 48, #10 (1)
330 330 ... 51, #11 (3)
331 331 ... 74, #12 (23)
332 332 ... 85, #13 (11)
333 333 ... 86, #14 (1)
334 334 ... 91, #15 (5)
335 335 ... ])
336 336
337 337 >>> list(_slicechunktodensity(revlog, list(range(16))))
338 338 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
339 339 >>> list(_slicechunktodensity(revlog, [0, 15]))
340 340 [[0], [15]]
341 341 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
342 342 [[0], [11], [15]]
343 343 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
344 344 [[0], [11, 13, 15]]
345 345 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
346 346 [[1, 2], [5, 8, 10, 11], [14]]
347 347 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
348 348 ... mingapsize=20))
349 349 [[1, 2, 3, 5, 8, 10, 11], [14]]
350 350 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
351 351 ... targetdensity=0.95))
352 352 [[1, 2], [5], [8, 10, 11], [14]]
353 353 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
354 354 ... targetdensity=0.95, mingapsize=12))
355 355 [[1, 2], [5, 8, 10, 11], [14]]
356 356 """
357 357 start = revlog.start
358 358 length = revlog.length
359 359
360 360 if len(revs) <= 1:
361 361 yield revs
362 362 return
363 363
364 364 deltachainspan = segmentspan(revlog, revs)
365 365
366 366 if deltachainspan < mingapsize:
367 367 yield revs
368 368 return
369 369
370 370 readdata = deltachainspan
371 371 chainpayload = sum(length(r) for r in revs)
372 372
373 373 if deltachainspan:
374 374 density = chainpayload / float(deltachainspan)
375 375 else:
376 376 density = 1.0
377 377
378 378 if density >= targetdensity:
379 379 yield revs
380 380 return
381 381
382 382 # Store the gaps in a heap to have them sorted by decreasing size
383 383 gaps = []
384 384 prevend = None
385 385 for i, rev in enumerate(revs):
386 386 revstart = start(rev)
387 387 revlen = length(rev)
388 388
389 389 # Skip empty revisions to form larger holes
390 390 if revlen == 0:
391 391 continue
392 392
393 393 if prevend is not None:
394 394 gapsize = revstart - prevend
395 395 # only consider holes that are large enough
396 396 if gapsize > mingapsize:
397 397 gaps.append((gapsize, i))
398 398
399 399 prevend = revstart + revlen
400 400 # sort the gaps to pop them from largest to small
401 401 gaps.sort()
402 402
403 403 # Collect the indices of the largest holes until the density is acceptable
404 404 selected = []
405 405 while gaps and density < targetdensity:
406 406 gapsize, gapidx = gaps.pop()
407 407
408 408 selected.append(gapidx)
409 409
410 410 # the gap sizes are stored as negatives to be sorted decreasingly
411 411 # by the heap
412 412 readdata -= gapsize
413 413 if readdata > 0:
414 414 density = chainpayload / float(readdata)
415 415 else:
416 416 density = 1.0
417 417 selected.sort()
418 418
419 419 # Cut the revs at collected indices
420 420 previdx = 0
421 421 for idx in selected:
422 422
423 423 chunk = _trimchunk(revlog, revs, previdx, idx)
424 424 if chunk:
425 425 yield chunk
426 426
427 427 previdx = idx
428 428
429 429 chunk = _trimchunk(revlog, revs, previdx)
430 430 if chunk:
431 431 yield chunk
432 432
433 433
434 434 def _trimchunk(revlog, revs, startidx, endidx=None):
435 435 """returns revs[startidx:endidx] without empty trailing revs
436 436
437 437 Doctest Setup
438 438 >>> revlog = _testrevlog([
439 439 ... 5, #0
440 440 ... 10, #1
441 441 ... 12, #2
442 442 ... 12, #3 (empty)
443 443 ... 17, #4
444 444 ... 21, #5
445 445 ... 21, #6 (empty)
446 446 ... ])
447 447
448 448 Contiguous cases:
449 449 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
450 450 [0, 1, 2, 3, 4, 5]
451 451 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
452 452 [0, 1, 2, 3, 4]
453 453 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
454 454 [0, 1, 2]
455 455 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
456 456 [2]
457 457 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
458 458 [3, 4, 5]
459 459 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
460 460 [3, 4]
461 461
462 462 Discontiguous cases:
463 463 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
464 464 [1, 3, 5]
465 465 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
466 466 [1]
467 467 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
468 468 [3, 5]
469 469 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
470 470 [3, 5]
471 471 """
472 472 length = revlog.length
473 473
474 474 if endidx is None:
475 475 endidx = len(revs)
476 476
477 477 # If we have a non-emtpy delta candidate, there are nothing to trim
478 478 if revs[endidx - 1] < len(revlog):
479 479 # Trim empty revs at the end, except the very first revision of a chain
480 480 while (
481 481 endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0
482 482 ):
483 483 endidx -= 1
484 484
485 485 return revs[startidx:endidx]
486 486
487 487
488 488 def segmentspan(revlog, revs):
489 489 """Get the byte span of a segment of revisions
490 490
491 491 revs is a sorted array of revision numbers
492 492
493 493 >>> revlog = _testrevlog([
494 494 ... 5, #0
495 495 ... 10, #1
496 496 ... 12, #2
497 497 ... 12, #3 (empty)
498 498 ... 17, #4
499 499 ... ])
500 500
501 501 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
502 502 17
503 503 >>> segmentspan(revlog, [0, 4])
504 504 17
505 505 >>> segmentspan(revlog, [3, 4])
506 506 5
507 507 >>> segmentspan(revlog, [1, 2, 3,])
508 508 7
509 509 >>> segmentspan(revlog, [1, 3])
510 510 7
511 511 """
512 512 if not revs:
513 513 return 0
514 514 end = revlog.end(revs[-1])
515 515 return end - revlog.start(revs[0])
516 516
517 517
518 518 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
519 519 """build full text from a (base, delta) pair and other metadata"""
520 520 # special case deltas which replace entire base; no need to decode
521 521 # base revision. this neatly avoids censored bases, which throw when
522 522 # they're decoded.
523 523 hlen = struct.calcsize(b">lll")
524 524 if delta[:hlen] == mdiff.replacediffheader(
525 525 revlog.rawsize(baserev), len(delta) - hlen
526 526 ):
527 527 fulltext = delta[hlen:]
528 528 else:
529 529 # deltabase is rawtext before changed by flag processors, which is
530 530 # equivalent to non-raw text
531 531 basetext = revlog.revision(baserev, _df=fh)
532 532 fulltext = mdiff.patch(basetext, delta)
533 533
534 534 try:
535 535 validatehash = flagutil.processflagsraw(revlog, fulltext, flags)
536 536 if validatehash:
537 537 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
538 538 if flags & REVIDX_ISCENSORED:
539 539 raise error.StorageError(
540 540 _(b'node %s is not censored') % expectednode
541 541 )
542 542 except error.CensoredNodeError:
543 543 # must pass the censored index flag to add censored revisions
544 544 if not flags & REVIDX_ISCENSORED:
545 545 raise
546 546 return fulltext
547 547
548 548
549 549 @attr.s(slots=True, frozen=True)
550 550 class _deltainfo:
551 551 distance = attr.ib()
552 552 deltalen = attr.ib()
553 553 data = attr.ib()
554 554 base = attr.ib()
555 555 chainbase = attr.ib()
556 556 chainlen = attr.ib()
557 557 compresseddeltalen = attr.ib()
558 558 snapshotdepth = attr.ib()
559 559
560 560
561 561 def drop_u_compression(delta):
562 562 """turn into a "u" (no-compression) into no-compression without header
563 563
564 564 This is useful for revlog format that has better compression method.
565 565 """
566 566 assert delta.data[0] == b'u', delta.data[0]
567 567 return _deltainfo(
568 568 delta.distance,
569 569 delta.deltalen - 1,
570 570 (b'', delta.data[1]),
571 571 delta.base,
572 572 delta.chainbase,
573 573 delta.chainlen,
574 574 delta.compresseddeltalen,
575 575 delta.snapshotdepth,
576 576 )
577 577
578 578
579 579 def isgooddeltainfo(revlog, deltainfo, revinfo):
580 580 """Returns True if the given delta is good. Good means that it is within
581 581 the disk span, disk size, and chain length bounds that we know to be
582 582 performant."""
583 583 if deltainfo is None:
584 584 return False
585 585
586 586 # - 'deltainfo.distance' is the distance from the base revision --
587 587 # bounding it limits the amount of I/O we need to do.
588 588 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
589 589 # deltas we need to apply -- bounding it limits the amount of CPU
590 590 # we consume.
591 591
592 592 textlen = revinfo.textlen
593 593 defaultmax = textlen * 4
594 594 maxdist = revlog._maxdeltachainspan
595 595 if not maxdist:
596 596 maxdist = deltainfo.distance # ensure the conditional pass
597 597 maxdist = max(maxdist, defaultmax)
598 598
599 599 # Bad delta from read span:
600 600 #
601 601 # If the span of data read is larger than the maximum allowed.
602 602 #
603 603 # In the sparse-revlog case, we rely on the associated "sparse reading"
604 604 # to avoid issue related to the span of data. In theory, it would be
605 605 # possible to build pathological revlog where delta pattern would lead
606 606 # to too many reads. However, they do not happen in practice at all. So
607 607 # we skip the span check entirely.
608 608 if not revlog._sparserevlog and maxdist < deltainfo.distance:
609 609 return False
610 610
611 611 # Bad delta from new delta size:
612 612 #
613 613 # If the delta size is larger than the target text, storing the
614 614 # delta will be inefficient.
615 615 if textlen < deltainfo.deltalen:
616 616 return False
617 617
618 618 # Bad delta from cumulated payload size:
619 619 #
620 620 # If the sum of delta get larger than K * target text length.
621 621 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
622 622 return False
623 623
624 624 # Bad delta from chain length:
625 625 #
626 626 # If the number of delta in the chain gets too high.
627 627 if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen:
628 628 return False
629 629
630 630 # bad delta from intermediate snapshot size limit
631 631 #
632 632 # If an intermediate snapshot size is higher than the limit. The
633 633 # limit exist to prevent endless chain of intermediate delta to be
634 634 # created.
635 635 if (
636 636 deltainfo.snapshotdepth is not None
637 637 and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen
638 638 ):
639 639 return False
640 640
641 641 # bad delta if new intermediate snapshot is larger than the previous
642 642 # snapshot
643 643 if (
644 644 deltainfo.snapshotdepth
645 645 and revlog.length(deltainfo.base) < deltainfo.deltalen
646 646 ):
647 647 return False
648 648
649 649 return True
650 650
651 651
652 652 # If a revision's full text is that much bigger than a base candidate full
653 653 # text's, it is very unlikely that it will produce a valid delta. We no longer
654 654 # consider these candidates.
655 655 LIMIT_BASE2TEXT = 500
656 656
657 657
658 658 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
659 659 """Provides group of revision to be tested as delta base
660 660
661 661 This top level function focus on emitting groups with unique and worthwhile
662 662 content. See _raw_candidate_groups for details about the group order.
663 663 """
664 664 # should we try to build a delta?
665 665 if not (len(revlog) and revlog._storedeltachains):
666 666 yield None
667 667 return
668 668
669 669 deltalength = revlog.length
670 670 deltaparent = revlog.deltaparent
671 671 sparse = revlog._sparserevlog
672 672 good = None
673 673
674 674 deltas_limit = textlen * LIMIT_DELTA2TEXT
675 675
676 676 tested = {nullrev}
677 677 candidates = _refinedgroups(revlog, p1, p2, cachedelta)
678 678 while True:
679 679 temptative = candidates.send(good)
680 680 if temptative is None:
681 681 break
682 682 group = []
683 683 for rev in temptative:
684 684 # skip over empty delta (no need to include them in a chain)
685 685 while revlog._generaldelta and not (
686 686 rev == nullrev or rev in tested or deltalength(rev)
687 687 ):
688 688 tested.add(rev)
689 689 rev = deltaparent(rev)
690 690 # no need to try a delta against nullrev, this will be done as a
691 691 # last resort.
692 692 if rev == nullrev:
693 693 continue
694 694 # filter out revision we tested already
695 695 if rev in tested:
696 696 continue
697 697 tested.add(rev)
698 698 # filter out delta base that will never produce good delta
699 699 if deltas_limit < revlog.length(rev):
700 700 continue
701 701 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
702 702 continue
703 703 # no delta for rawtext-changing revs (see "candelta" for why)
704 704 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
705 705 continue
706 706 # If we reach here, we are about to build and test a delta.
707 707 # The delta building process will compute the chaininfo in all
708 708 # case, since that computation is cached, it is fine to access it
709 709 # here too.
710 710 chainlen, chainsize = revlog._chaininfo(rev)
711 711 # if chain will be too long, skip base
712 712 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
713 713 continue
714 714 # if chain already have too much data, skip base
715 715 if deltas_limit < chainsize:
716 716 continue
717 717 if sparse and revlog.upperboundcomp is not None:
718 718 maxcomp = revlog.upperboundcomp
719 719 basenotsnap = (p1, p2, nullrev)
720 720 if rev not in basenotsnap and revlog.issnapshot(rev):
721 721 snapshotdepth = revlog.snapshotdepth(rev)
722 722 # If text is significantly larger than the base, we can
723 723 # expect the resulting delta to be proportional to the size
724 724 # difference
725 725 revsize = revlog.rawsize(rev)
726 726 rawsizedistance = max(textlen - revsize, 0)
727 727 # use an estimate of the compression upper bound.
728 728 lowestrealisticdeltalen = rawsizedistance // maxcomp
729 729
730 730 # check the absolute constraint on the delta size
731 731 snapshotlimit = textlen >> snapshotdepth
732 732 if snapshotlimit < lowestrealisticdeltalen:
733 733 # delta lower bound is larger than accepted upper bound
734 734 continue
735 735
736 736 # check the relative constraint on the delta size
737 737 revlength = revlog.length(rev)
738 738 if revlength < lowestrealisticdeltalen:
739 739 # delta probable lower bound is larger than target base
740 740 continue
741 741
742 742 group.append(rev)
743 743 if group:
744 744 # XXX: in the sparse revlog case, group can become large,
745 745 # impacting performances. Some bounding or slicing mecanism
746 746 # would help to reduce this impact.
747 747 good = yield tuple(group)
748 748 yield None
749 749
750 750
751 751 def _findsnapshots(revlog, cache, start_rev):
752 752 """find snapshot from start_rev to tip"""
753 753 if util.safehasattr(revlog.index, b'findsnapshots'):
754 754 revlog.index.findsnapshots(cache, start_rev)
755 755 else:
756 756 deltaparent = revlog.deltaparent
757 757 issnapshot = revlog.issnapshot
758 758 for rev in revlog.revs(start_rev):
759 759 if issnapshot(rev):
760 760 cache[deltaparent(rev)].append(rev)
761 761
762 762
763 763 def _refinedgroups(revlog, p1, p2, cachedelta):
764 764 good = None
765 765 # First we try to reuse a the delta contained in the bundle.
766 766 # (or from the source revlog)
767 767 #
768 768 # This logic only applies to general delta repositories and can be disabled
769 769 # through configuration. Disabling reuse source delta is useful when
770 770 # we want to make sure we recomputed "optimal" deltas.
771 771 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
772 772 # Assume what we received from the server is a good choice
773 773 # build delta will reuse the cache
774 774 good = yield (cachedelta[0],)
775 775 if good is not None:
776 776 yield None
777 777 return
778 778 snapshots = collections.defaultdict(list)
779 779 for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots):
780 780 good = yield candidates
781 781 if good is not None:
782 782 break
783 783
784 784 # If sparse revlog is enabled, we can try to refine the available deltas
785 785 if not revlog._sparserevlog:
786 786 yield None
787 787 return
788 788
789 789 # if we have a refinable value, try to refine it
790 790 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
791 791 # refine snapshot down
792 792 previous = None
793 793 while previous != good:
794 794 previous = good
795 795 base = revlog.deltaparent(good)
796 796 if base == nullrev:
797 797 break
798 798 good = yield (base,)
799 799 # refine snapshot up
800 800 if not snapshots:
801 801 _findsnapshots(revlog, snapshots, good + 1)
802 802 previous = None
803 803 while good != previous:
804 804 previous = good
805 805 children = tuple(sorted(c for c in snapshots[good]))
806 806 good = yield children
807 807
808 808 # we have found nothing
809 809 yield None
810 810
811 811
812 812 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
813 813 """Provides group of revision to be tested as delta base
814 814
815 815 This lower level function focus on emitting delta theorically interresting
816 816 without looking it any practical details.
817 817
818 818 The group order aims at providing fast or small candidates first.
819 819 """
820 820 gdelta = revlog._generaldelta
821 821 # gate sparse behind general-delta because of issue6056
822 822 sparse = gdelta and revlog._sparserevlog
823 823 curr = len(revlog)
824 824 prev = curr - 1
825 825 deltachain = lambda rev: revlog._deltachain(rev)[0]
826 826
827 827 if gdelta:
828 828 # exclude already lazy tested base if any
829 829 parents = [p for p in (p1, p2) if p != nullrev]
830 830
831 831 if not revlog._deltabothparents and len(parents) == 2:
832 832 parents.sort()
833 833 # To minimize the chance of having to build a fulltext,
834 834 # pick first whichever parent is closest to us (max rev)
835 835 yield (parents[1],)
836 836 # then the other one (min rev) if the first did not fit
837 837 yield (parents[0],)
838 838 elif len(parents) > 0:
839 839 # Test all parents (1 or 2), and keep the best candidate
840 840 yield parents
841 841
842 842 if sparse and parents:
843 843 if snapshots is None:
844 844 # map: base-rev: snapshot-rev
845 845 snapshots = collections.defaultdict(list)
846 846 # See if we can use an existing snapshot in the parent chains to use as
847 847 # a base for a new intermediate-snapshot
848 848 #
849 849 # search for snapshot in parents delta chain
850 850 # map: snapshot-level: snapshot-rev
851 851 parents_snaps = collections.defaultdict(set)
852 852 candidate_chains = [deltachain(p) for p in parents]
853 853 for chain in candidate_chains:
854 854 for idx, s in enumerate(chain):
855 855 if not revlog.issnapshot(s):
856 856 break
857 857 parents_snaps[idx].add(s)
858 858 snapfloor = min(parents_snaps[0]) + 1
859 859 _findsnapshots(revlog, snapshots, snapfloor)
860 860 # search for the highest "unrelated" revision
861 861 #
862 862 # Adding snapshots used by "unrelated" revision increase the odd we
863 863 # reuse an independant, yet better snapshot chain.
864 864 #
865 865 # XXX instead of building a set of revisions, we could lazily enumerate
866 866 # over the chains. That would be more efficient, however we stick to
867 867 # simple code for now.
868 868 all_revs = set()
869 869 for chain in candidate_chains:
870 870 all_revs.update(chain)
871 871 other = None
872 872 for r in revlog.revs(prev, snapfloor):
873 873 if r not in all_revs:
874 874 other = r
875 875 break
876 876 if other is not None:
877 877 # To avoid unfair competition, we won't use unrelated intermediate
878 878 # snapshot that are deeper than the ones from the parent delta
879 879 # chain.
880 880 max_depth = max(parents_snaps.keys())
881 881 chain = deltachain(other)
882 882 for idx, s in enumerate(chain):
883 883 if s < snapfloor:
884 884 continue
885 885 if max_depth < idx:
886 886 break
887 887 if not revlog.issnapshot(s):
888 888 break
889 889 parents_snaps[idx].add(s)
890 890 # Test them as possible intermediate snapshot base
891 891 # We test them from highest to lowest level. High level one are more
892 892 # likely to result in small delta
893 893 floor = None
894 894 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
895 895 siblings = set()
896 896 for s in snaps:
897 897 siblings.update(snapshots[s])
898 898 # Before considering making a new intermediate snapshot, we check
899 899 # if an existing snapshot, children of base we consider, would be
900 900 # suitable.
901 901 #
902 902 # It give a change to reuse a delta chain "unrelated" to the
903 903 # current revision instead of starting our own. Without such
904 904 # re-use, topological branches would keep reopening new chains.
905 905 # Creating more and more snapshot as the repository grow.
906 906
907 907 if floor is not None:
908 908 # We only do this for siblings created after the one in our
909 909 # parent's delta chain. Those created before has less chances
910 910 # to be valid base since our ancestors had to create a new
911 911 # snapshot.
912 912 siblings = [r for r in siblings if floor < r]
913 913 yield tuple(sorted(siblings))
914 914 # then test the base from our parent's delta chain.
915 915 yield tuple(sorted(snaps))
916 916 floor = min(snaps)
917 917 # No suitable base found in the parent chain, search if any full
918 918 # snapshots emitted since parent's base would be a suitable base for an
919 919 # intermediate snapshot.
920 920 #
921 921 # It give a chance to reuse a delta chain unrelated to the current
922 922 # revisions instead of starting our own. Without such re-use,
923 923 # topological branches would keep reopening new full chains. Creating
924 924 # more and more snapshot as the repository grow.
925 925 yield tuple(snapshots[nullrev])
926 926
927 927 if not sparse:
928 928 # other approach failed try against prev to hopefully save us a
929 929 # fulltext.
930 930 yield (prev,)
931 931
932 932
933 933 class deltacomputer:
934 934 def __init__(self, revlog, write_debug=None, debug_search=False):
935 935 self.revlog = revlog
936 936 self._write_debug = write_debug
937 937 self._debug_search = debug_search
938 938
939 939 def buildtext(self, revinfo, fh):
940 940 """Builds a fulltext version of a revision
941 941
942 942 revinfo: revisioninfo instance that contains all needed info
943 943 fh: file handle to either the .i or the .d revlog file,
944 944 depending on whether it is inlined or not
945 945 """
946 946 btext = revinfo.btext
947 947 if btext[0] is not None:
948 948 return btext[0]
949 949
950 950 revlog = self.revlog
951 951 cachedelta = revinfo.cachedelta
952 952 baserev = cachedelta[0]
953 953 delta = cachedelta[1]
954 954
955 955 fulltext = btext[0] = _textfromdelta(
956 956 fh,
957 957 revlog,
958 958 baserev,
959 959 delta,
960 960 revinfo.p1,
961 961 revinfo.p2,
962 962 revinfo.flags,
963 963 revinfo.node,
964 964 )
965 965 return fulltext
966 966
967 967 def _builddeltadiff(self, base, revinfo, fh):
968 968 revlog = self.revlog
969 969 t = self.buildtext(revinfo, fh)
970 970 if revlog.iscensored(base):
971 971 # deltas based on a censored revision must replace the
972 972 # full content in one patch, so delta works everywhere
973 973 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
974 974 delta = header + t
975 975 else:
976 976 ptext = revlog.rawdata(base, _df=fh)
977 977 delta = mdiff.textdiff(ptext, t)
978 978
979 979 return delta
980 980
981 981 def _builddeltainfo(self, revinfo, base, fh):
982 982 # can we use the cached delta?
983 983 revlog = self.revlog
984 984 debug_search = self._write_debug is not None and self._debug_search
985 985 chainbase = revlog.chainbase(base)
986 986 if revlog._generaldelta:
987 987 deltabase = base
988 988 else:
989 989 deltabase = chainbase
990 990 snapshotdepth = None
991 991 if revlog._sparserevlog and deltabase == nullrev:
992 992 snapshotdepth = 0
993 993 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
994 994 # A delta chain should always be one full snapshot,
995 995 # zero or more semi-snapshots, and zero or more deltas
996 996 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
997 997 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
998 998 snapshotdepth = len(revlog._deltachain(deltabase)[0])
999 999 delta = None
1000 1000 if revinfo.cachedelta:
1001 1001 cachebase, cachediff = revinfo.cachedelta
1002 1002 # check if the diff still apply
1003 1003 currentbase = cachebase
1004 1004 while (
1005 1005 currentbase != nullrev
1006 1006 and currentbase != base
1007 1007 and self.revlog.length(currentbase) == 0
1008 1008 ):
1009 1009 currentbase = self.revlog.deltaparent(currentbase)
1010 1010 if self.revlog._lazydelta and currentbase == base:
1011 1011 delta = revinfo.cachedelta[1]
1012 1012 if delta is None:
1013 1013 delta = self._builddeltadiff(base, revinfo, fh)
1014 1014 if debug_search:
1015 1015 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1016 1016 msg %= len(delta)
1017 1017 self._write_debug(msg)
1018 1018 # snapshotdept need to be neither None nor 0 level snapshot
1019 1019 if revlog.upperboundcomp is not None and snapshotdepth:
1020 1020 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1021 1021 snapshotlimit = revinfo.textlen >> snapshotdepth
1022 1022 if debug_search:
1023 1023 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1024 1024 msg %= lowestrealisticdeltalen
1025 1025 self._write_debug(msg)
1026 1026 if snapshotlimit < lowestrealisticdeltalen:
1027 1027 if debug_search:
1028 1028 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1029 1029 self._write_debug(msg)
1030 1030 return None
1031 1031 if revlog.length(base) < lowestrealisticdeltalen:
1032 1032 if debug_search:
1033 1033 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1034 1034 self._write_debug(msg)
1035 1035 return None
1036 1036 header, data = revlog.compress(delta)
1037 1037 deltalen = len(header) + len(data)
1038 1038 offset = revlog.end(len(revlog) - 1)
1039 1039 dist = deltalen + offset - revlog.start(chainbase)
1040 1040 chainlen, compresseddeltalen = revlog._chaininfo(base)
1041 1041 chainlen += 1
1042 1042 compresseddeltalen += deltalen
1043 1043
1044 1044 return _deltainfo(
1045 1045 dist,
1046 1046 deltalen,
1047 1047 (header, data),
1048 1048 deltabase,
1049 1049 chainbase,
1050 1050 chainlen,
1051 1051 compresseddeltalen,
1052 1052 snapshotdepth,
1053 1053 )
1054 1054
1055 1055 def _fullsnapshotinfo(self, fh, revinfo, curr):
1056 1056 rawtext = self.buildtext(revinfo, fh)
1057 1057 data = self.revlog.compress(rawtext)
1058 1058 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
1059 1059 deltabase = chainbase = curr
1060 1060 snapshotdepth = 0
1061 1061 chainlen = 1
1062 1062
1063 1063 return _deltainfo(
1064 1064 dist,
1065 1065 deltalen,
1066 1066 data,
1067 1067 deltabase,
1068 1068 chainbase,
1069 1069 chainlen,
1070 1070 compresseddeltalen,
1071 1071 snapshotdepth,
1072 1072 )
1073 1073
1074 1074 def finddeltainfo(self, revinfo, fh, excluded_bases=None, target_rev=None):
1075 1075 """Find an acceptable delta against a candidate revision
1076 1076
1077 1077 revinfo: information about the revision (instance of _revisioninfo)
1078 1078 fh: file handle to either the .i or the .d revlog file,
1079 1079 depending on whether it is inlined or not
1080 1080
1081 1081 Returns the first acceptable candidate revision, as ordered by
1082 1082 _candidategroups
1083 1083
1084 1084 If no suitable deltabase is found, we return delta info for a full
1085 1085 snapshot.
1086 1086
1087 1087 `excluded_bases` is an optional set of revision that cannot be used as
1088 1088 a delta base. Use this to recompute delta suitable in censor or strip
1089 1089 context.
1090 1090 """
1091 1091 if target_rev is None:
1092 1092 target_rev = len(self.revlog)
1093 1093
1094 1094 if not revinfo.textlen:
1095 1095 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1096 1096
1097 1097 if excluded_bases is None:
1098 1098 excluded_bases = set()
1099 1099
1100 1100 # no delta for flag processor revision (see "candelta" for why)
1101 1101 # not calling candelta since only one revision needs test, also to
1102 1102 # avoid overhead fetching flags again.
1103 1103 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1104 1104 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1105 1105
1106 1106 if self._write_debug is not None:
1107 1107 start = util.timer()
1108 1108
1109 1109 debug_search = self._write_debug is not None and self._debug_search
1110 1110
1111 1111 # count the number of different delta we tried (for debug purpose)
1112 1112 dbg_try_count = 0
1113 1113 # count the number of "search round" we did. (for debug purpose)
1114 1114 dbg_try_rounds = 0
1115 1115 dbg_type = b'unknown'
1116 1116
1117 1117 cachedelta = revinfo.cachedelta
1118 1118 p1 = revinfo.p1
1119 1119 p2 = revinfo.p2
1120 1120 revlog = self.revlog
1121 1121
1122 1122 deltainfo = None
1123 1123 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1124 1124
1125 1125 if self._write_debug is not None:
1126 1126 if p1r != nullrev:
1127 1127 p1_chain_len = revlog._chaininfo(p1r)[0]
1128 1128 else:
1129 1129 p1_chain_len = -1
1130 1130 if p2r != nullrev:
1131 1131 p2_chain_len = revlog._chaininfo(p2r)[0]
1132 1132 else:
1133 1133 p2_chain_len = -1
1134 1134 if debug_search:
1135 1135 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1136 1136 msg %= target_rev
1137 1137 self._write_debug(msg)
1138 1138
1139 1139 groups = _candidategroups(
1140 1140 self.revlog, revinfo.textlen, p1r, p2r, cachedelta
1141 1141 )
1142 1142 candidaterevs = next(groups)
1143 1143 while candidaterevs is not None:
1144 1144 dbg_try_rounds += 1
1145 1145 if debug_search:
1146 1146 prev = None
1147 1147 if deltainfo is not None:
1148 1148 prev = deltainfo.base
1149 1149
1150 if p1 in candidaterevs or p2 in candidaterevs:
1150 if (
1151 cachedelta is not None
1152 and len(candidaterevs) == 1
1153 and cachedelta[0] in candidaterevs
1154 ):
1155 round_type = b"cached-delta"
1156 elif p1 in candidaterevs or p2 in candidaterevs:
1151 1157 round_type = b"parents"
1152 1158 elif prev is not None and all(c < prev for c in candidaterevs):
1153 1159 round_type = b"refine-down"
1154 1160 elif prev is not None and all(c > prev for c in candidaterevs):
1155 1161 round_type = b"refine-up"
1156 1162 else:
1157 1163 round_type = b"search-down"
1158 1164 msg = b"DBG-DELTAS-SEARCH: ROUND #%d - %d candidates - %s\n"
1159 1165 msg %= (dbg_try_rounds, len(candidaterevs), round_type)
1160 1166 self._write_debug(msg)
1161 1167 nominateddeltas = []
1162 1168 if deltainfo is not None:
1163 1169 if debug_search:
1164 1170 msg = (
1165 1171 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1166 1172 )
1167 1173 msg %= (deltainfo.base, deltainfo.deltalen)
1168 1174 self._write_debug(msg)
1169 1175 # if we already found a good delta,
1170 1176 # challenge it against refined candidates
1171 1177 nominateddeltas.append(deltainfo)
1172 1178 for candidaterev in candidaterevs:
1173 1179 if debug_search:
1174 1180 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1175 1181 msg %= candidaterev
1176 1182 self._write_debug(msg)
1177 1183 candidate_type = None
1178 1184 if candidaterev == p1:
1179 1185 candidate_type = b"p1"
1180 1186 elif candidaterev == p2:
1181 1187 candidate_type = b"p2"
1182 1188 elif self.revlog.issnapshot(candidaterev):
1183 1189 candidate_type = b"snapshot-%d"
1184 1190 candidate_type %= self.revlog.snapshotdepth(
1185 1191 candidaterev
1186 1192 )
1187 1193
1188 1194 if candidate_type is not None:
1189 1195 msg = b"DBG-DELTAS-SEARCH: type=%s\n"
1190 1196 msg %= candidate_type
1191 1197 self._write_debug(msg)
1192 1198 msg = b"DBG-DELTAS-SEARCH: size=%d\n"
1193 1199 msg %= self.revlog.length(candidaterev)
1194 1200 self._write_debug(msg)
1195 1201 msg = b"DBG-DELTAS-SEARCH: base=%d\n"
1196 1202 msg %= self.revlog.deltaparent(candidaterev)
1197 1203 self._write_debug(msg)
1198 1204 if candidaterev in excluded_bases:
1199 1205 if debug_search:
1200 1206 msg = b"DBG-DELTAS-SEARCH: EXCLUDED\n"
1201 1207 self._write_debug(msg)
1202 1208 continue
1203 1209 if candidaterev >= target_rev:
1204 1210 if debug_search:
1205 1211 msg = b"DBG-DELTAS-SEARCH: TOO-HIGH\n"
1206 1212 self._write_debug(msg)
1207 1213 continue
1208 1214 dbg_try_count += 1
1209 1215
1210 1216 if debug_search:
1211 1217 delta_start = util.timer()
1212 1218 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1213 1219 if debug_search:
1214 1220 delta_end = util.timer()
1215 1221 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1216 1222 msg %= delta_end - delta_start
1217 1223 self._write_debug(msg)
1218 1224 if candidatedelta is not None:
1219 1225 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1220 1226 if debug_search:
1221 1227 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1222 1228 msg %= candidatedelta.deltalen
1223 1229 self._write_debug(msg)
1224 1230 nominateddeltas.append(candidatedelta)
1225 1231 elif debug_search:
1226 1232 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1227 1233 msg %= candidatedelta.deltalen
1228 1234 self._write_debug(msg)
1229 1235 elif debug_search:
1230 1236 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1231 1237 self._write_debug(msg)
1232 1238 if nominateddeltas:
1233 1239 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1234 1240 if deltainfo is not None:
1235 1241 candidaterevs = groups.send(deltainfo.base)
1236 1242 else:
1237 1243 candidaterevs = next(groups)
1238 1244
1239 1245 if deltainfo is None:
1240 1246 dbg_type = b"full"
1241 1247 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1242 1248 elif deltainfo.snapshotdepth: # pytype: disable=attribute-error
1243 1249 dbg_type = b"snapshot"
1244 1250 else:
1245 1251 dbg_type = b"delta"
1246 1252
1247 1253 if self._write_debug is not None:
1248 1254 end = util.timer()
1255 assert deltainfo is not None # please pytype
1256 used_cached = (
1257 cachedelta is not None
1258 and dbg_try_rounds == 1
1259 and dbg_try_count == 1
1260 and deltainfo.base == cachedelta[0]
1261 )
1249 1262 dbg = {
1250 1263 'duration': end - start,
1251 1264 'revision': target_rev,
1265 'delta-base': deltainfo.base,
1252 1266 'search_round_count': dbg_try_rounds,
1267 'using-cached-base': used_cached,
1253 1268 'delta_try_count': dbg_try_count,
1254 1269 'type': dbg_type,
1255 1270 'p1-chain-len': p1_chain_len,
1256 1271 'p2-chain-len': p2_chain_len,
1257 1272 }
1258 1273 if (
1259 1274 deltainfo.snapshotdepth # pytype: disable=attribute-error
1260 1275 is not None
1261 1276 ):
1262 1277 dbg[
1263 1278 'snapshot-depth'
1264 1279 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1265 1280 else:
1266 1281 dbg['snapshot-depth'] = 0
1267 1282 target_revlog = b"UNKNOWN"
1268 1283 target_type = self.revlog.target[0]
1269 1284 target_key = self.revlog.target[1]
1270 1285 if target_type == KIND_CHANGELOG:
1271 1286 target_revlog = b'CHANGELOG:'
1272 1287 elif target_type == KIND_MANIFESTLOG:
1273 1288 target_revlog = b'MANIFESTLOG:'
1274 1289 if target_key:
1275 1290 target_revlog += b'%s:' % target_key
1276 1291 elif target_type == KIND_FILELOG:
1277 1292 target_revlog = b'FILELOG:'
1278 1293 if target_key:
1279 1294 target_revlog += b'%s:' % target_key
1280 1295 dbg['target-revlog'] = target_revlog
1281 1296
1282 1297 msg = (
1283 1298 b"DBG-DELTAS:"
1284 1299 b" %-12s"
1285 1300 b" rev=%d:"
1286 b" search-rounds=%d"
1301 b" delta-base=%d"
1302 b" is-cached=%d"
1303 b" - search-rounds=%d"
1287 1304 b" try-count=%d"
1288 1305 b" - delta-type=%-6s"
1289 1306 b" snap-depth=%d"
1290 1307 b" - p1-chain-length=%d"
1291 1308 b" p2-chain-length=%d"
1292 1309 b" - duration=%f"
1293 1310 b"\n"
1294 1311 )
1295 1312 msg %= (
1296 1313 dbg["target-revlog"],
1297 1314 dbg["revision"],
1315 dbg["delta-base"],
1316 dbg["using-cached-base"],
1298 1317 dbg["search_round_count"],
1299 1318 dbg["delta_try_count"],
1300 1319 dbg["type"],
1301 1320 dbg["snapshot-depth"],
1302 1321 dbg["p1-chain-len"],
1303 1322 dbg["p2-chain-len"],
1304 1323 dbg["duration"],
1305 1324 )
1306 1325 self._write_debug(msg)
1307 1326 return deltainfo
1308 1327
1309 1328
1310 1329 def delta_compression(default_compression_header, deltainfo):
1311 1330 """return (COMPRESSION_MODE, deltainfo)
1312 1331
1313 1332 used by revlog v2+ format to dispatch between PLAIN and DEFAULT
1314 1333 compression.
1315 1334 """
1316 1335 h, d = deltainfo.data
1317 1336 compression_mode = COMP_MODE_INLINE
1318 1337 if not h and not d:
1319 1338 # not data to store at all... declare them uncompressed
1320 1339 compression_mode = COMP_MODE_PLAIN
1321 1340 elif not h:
1322 1341 t = d[0:1]
1323 1342 if t == b'\0':
1324 1343 compression_mode = COMP_MODE_PLAIN
1325 1344 elif t == default_compression_header:
1326 1345 compression_mode = COMP_MODE_DEFAULT
1327 1346 elif h == b'u':
1328 1347 # we have a more efficient way to declare uncompressed
1329 1348 h = b''
1330 1349 compression_mode = COMP_MODE_PLAIN
1331 1350 deltainfo = drop_u_compression(deltainfo)
1332 1351 return compression_mode, deltainfo
@@ -1,1065 +1,1065 b''
1 1 Setting up test
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo 0 > afile
6 6 $ hg add afile
7 7 $ hg commit -m "0.0"
8 8 $ echo 1 >> afile
9 9 $ hg commit -m "0.1"
10 10 $ echo 2 >> afile
11 11 $ hg commit -m "0.2"
12 12 $ echo 3 >> afile
13 13 $ hg commit -m "0.3"
14 14 $ hg update -C 0
15 15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 16 $ echo 1 >> afile
17 17 $ hg commit -m "1.1"
18 18 created new head
19 19 $ echo 2 >> afile
20 20 $ hg commit -m "1.2"
21 21 $ echo "a line" > fred
22 22 $ echo 3 >> afile
23 23 $ hg add fred
24 24 $ hg commit -m "1.3"
25 25 $ hg mv afile adifferentfile
26 26 $ hg commit -m "1.3m"
27 27 $ hg update -C 3
28 28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 29 $ hg mv afile anotherfile
30 30 $ hg commit -m "0.3m"
31 31 $ hg verify
32 32 checking changesets
33 33 checking manifests
34 34 crosschecking files in changesets and manifests
35 35 checking files
36 36 checked 9 changesets with 7 changes to 4 files
37 37 $ cd ..
38 38 $ hg init empty
39 39
40 40 Bundle and phase
41 41
42 42 $ hg -R test phase --force --secret 0
43 43 $ hg -R test bundle phase.hg empty
44 44 searching for changes
45 45 no changes found (ignored 9 secret changesets)
46 46 [1]
47 47 $ hg -R test phase --draft -r 'head()'
48 48
49 49 Bundle --all
50 50
51 51 $ hg -R test bundle --all all.hg
52 52 9 changesets found
53 53
54 54 Bundle test to full.hg
55 55
56 56 $ hg -R test bundle full.hg empty
57 57 searching for changes
58 58 9 changesets found
59 59
60 60 Unbundle full.hg in test
61 61
62 62 $ hg -R test unbundle full.hg
63 63 adding changesets
64 64 adding manifests
65 65 adding file changes
66 66 added 0 changesets with 0 changes to 4 files
67 67 (run 'hg update' to get a working copy)
68 68
69 69 Verify empty
70 70
71 71 $ hg -R empty heads
72 72 [1]
73 73 $ hg -R empty verify
74 74 checking changesets
75 75 checking manifests
76 76 crosschecking files in changesets and manifests
77 77 checking files
78 78 checked 0 changesets with 0 changes to 0 files
79 79
80 80 #if repobundlerepo
81 81
82 82 Pull full.hg into test (using --cwd)
83 83
84 84 $ hg --cwd test pull ../full.hg
85 85 pulling from ../full.hg
86 86 searching for changes
87 87 no changes found
88 88
89 89 Verify that there are no leaked temporary files after pull (issue2797)
90 90
91 91 $ ls test/.hg | grep .hg10un
92 92 [1]
93 93
94 94 Pull full.hg into empty (using --cwd)
95 95
96 96 $ hg --cwd empty pull ../full.hg
97 97 pulling from ../full.hg
98 98 requesting all changes
99 99 adding changesets
100 100 adding manifests
101 101 adding file changes
102 102 added 9 changesets with 7 changes to 4 files (+1 heads)
103 103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
104 104 (run 'hg heads' to see heads, 'hg merge' to merge)
105 105
106 106 Rollback empty
107 107
108 108 $ hg -R empty rollback
109 109 repository tip rolled back to revision -1 (undo pull)
110 110
111 111 Pull full.hg into empty again (using --cwd)
112 112
113 113 $ hg --cwd empty pull ../full.hg
114 114 pulling from ../full.hg
115 115 requesting all changes
116 116 adding changesets
117 117 adding manifests
118 118 adding file changes
119 119 added 9 changesets with 7 changes to 4 files (+1 heads)
120 120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
121 121 (run 'hg heads' to see heads, 'hg merge' to merge)
122 122
123 123 Pull full.hg into test (using -R)
124 124
125 125 $ hg -R test pull full.hg
126 126 pulling from full.hg
127 127 searching for changes
128 128 no changes found
129 129
130 130 Pull full.hg into empty (using -R)
131 131
132 132 $ hg -R empty pull full.hg
133 133 pulling from full.hg
134 134 searching for changes
135 135 no changes found
136 136
137 137 Rollback empty
138 138
139 139 $ hg -R empty rollback
140 140 repository tip rolled back to revision -1 (undo pull)
141 141
142 142 Pull full.hg into empty again (using -R)
143 143
144 144 $ hg -R empty pull full.hg
145 145 pulling from full.hg
146 146 requesting all changes
147 147 adding changesets
148 148 adding manifests
149 149 adding file changes
150 150 added 9 changesets with 7 changes to 4 files (+1 heads)
151 151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
152 152 (run 'hg heads' to see heads, 'hg merge' to merge)
153 153
154 154 Log -R full.hg in fresh empty
155 155
156 156 $ rm -r empty
157 157 $ hg init empty
158 158 $ cd empty
159 159 $ hg -R bundle://../full.hg log
160 160 changeset: 8:aa35859c02ea
161 161 tag: tip
162 162 parent: 3:eebf5a27f8ca
163 163 user: test
164 164 date: Thu Jan 01 00:00:00 1970 +0000
165 165 summary: 0.3m
166 166
167 167 changeset: 7:a6a34bfa0076
168 168 user: test
169 169 date: Thu Jan 01 00:00:00 1970 +0000
170 170 summary: 1.3m
171 171
172 172 changeset: 6:7373c1169842
173 173 user: test
174 174 date: Thu Jan 01 00:00:00 1970 +0000
175 175 summary: 1.3
176 176
177 177 changeset: 5:1bb50a9436a7
178 178 user: test
179 179 date: Thu Jan 01 00:00:00 1970 +0000
180 180 summary: 1.2
181 181
182 182 changeset: 4:095197eb4973
183 183 parent: 0:f9ee2f85a263
184 184 user: test
185 185 date: Thu Jan 01 00:00:00 1970 +0000
186 186 summary: 1.1
187 187
188 188 changeset: 3:eebf5a27f8ca
189 189 user: test
190 190 date: Thu Jan 01 00:00:00 1970 +0000
191 191 summary: 0.3
192 192
193 193 changeset: 2:e38ba6f5b7e0
194 194 user: test
195 195 date: Thu Jan 01 00:00:00 1970 +0000
196 196 summary: 0.2
197 197
198 198 changeset: 1:34c2bf6b0626
199 199 user: test
200 200 date: Thu Jan 01 00:00:00 1970 +0000
201 201 summary: 0.1
202 202
203 203 changeset: 0:f9ee2f85a263
204 204 user: test
205 205 date: Thu Jan 01 00:00:00 1970 +0000
206 206 summary: 0.0
207 207
208 208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
209 209
210 210 $ ls .hg
211 211 00changelog.i
212 212 cache
213 213 requires
214 214 store
215 215 wcache
216 216
217 217 Pull ../full.hg into empty (with hook)
218 218
219 219 $ cat >> .hg/hgrc <<EOF
220 220 > [hooks]
221 221 > changegroup = sh -c "printenv.py --line changegroup"
222 222 > EOF
223 223
224 224 doesn't work (yet ?)
225 225 NOTE: msys is mangling the URL below
226 226
227 227 hg -R bundle://../full.hg verify
228 228
229 229 $ hg pull bundle://../full.hg
230 230 pulling from bundle:../full.hg
231 231 requesting all changes
232 232 adding changesets
233 233 adding manifests
234 234 adding file changes
235 235 added 9 changesets with 7 changes to 4 files (+1 heads)
236 236 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
237 237 changegroup hook: HG_HOOKNAME=changegroup
238 238 HG_HOOKTYPE=changegroup
239 239 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
240 240 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
241 241 HG_SOURCE=pull
242 242 HG_TXNID=TXN:$ID$
243 243 HG_TXNNAME=pull
244 244 bundle:../full.hg (no-msys !)
245 245 bundle;../full.hg (msys !)
246 246 HG_URL=bundle:../full.hg (no-msys !)
247 247 HG_URL=bundle;../full.hg (msys !)
248 248
249 249 (run 'hg heads' to see heads, 'hg merge' to merge)
250 250
251 251 Rollback empty
252 252
253 253 $ hg rollback
254 254 repository tip rolled back to revision -1 (undo pull)
255 255 $ cd ..
256 256
257 257 Log -R bundle:empty+full.hg
258 258
259 259 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
260 260 8 7 6 5 4 3 2 1 0
261 261
262 262 Pull full.hg into empty again (using -R; with hook)
263 263
264 264 $ hg -R empty pull full.hg
265 265 pulling from full.hg
266 266 requesting all changes
267 267 adding changesets
268 268 adding manifests
269 269 adding file changes
270 270 added 9 changesets with 7 changes to 4 files (+1 heads)
271 271 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
272 272 changegroup hook: HG_HOOKNAME=changegroup
273 273 HG_HOOKTYPE=changegroup
274 274 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
275 275 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
276 276 HG_SOURCE=pull
277 277 HG_TXNID=TXN:$ID$
278 278 HG_TXNNAME=pull
279 279 bundle:empty+full.hg
280 280 HG_URL=bundle:empty+full.hg
281 281
282 282 (run 'hg heads' to see heads, 'hg merge' to merge)
283 283
284 284 #endif
285 285
286 286 Cannot produce streaming clone bundles with "hg bundle"
287 287
288 288 $ hg -R test bundle -t packed1 packed.hg
289 289 abort: packed bundles cannot be produced by "hg bundle"
290 290 (use 'hg debugcreatestreamclonebundle')
291 291 [10]
292 292
293 293 packed1 is produced properly
294 294
295 295
296 296 #if reporevlogstore rust
297 297
298 298 $ hg -R test debugcreatestreamclonebundle packed.hg
299 299 writing 2665 bytes for 6 files
300 300 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
301 301
302 302 $ f -B 64 --size --sha1 --hexdump packed.hg
303 303 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
304 304 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
305 305 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
306 306 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
307 307 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
308 308 $ hg debugbundle --spec packed.hg
309 309 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
310 310 #endif
311 311
312 312 #if reporevlogstore no-rust zstd
313 313
314 314 $ hg -R test debugcreatestreamclonebundle packed.hg
315 315 writing 2665 bytes for 6 files
316 316 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
317 317
318 318 $ f -B 64 --size --sha1 --hexdump packed.hg
319 319 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
320 320 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
321 321 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
322 322 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
323 323 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
324 324 $ hg debugbundle --spec packed.hg
325 325 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
326 326 #endif
327 327
328 328 #if reporevlogstore no-rust no-zstd
329 329
330 330 $ hg -R test debugcreatestreamclonebundle packed.hg
331 331 writing 2664 bytes for 6 files
332 332 bundle requirements: generaldelta, revlogv1, sparserevlog
333 333
334 334 $ f -B 64 --size --sha1 --hexdump packed.hg
335 335 packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
336 336 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
337 337 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
338 338 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
339 339 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
340 340 $ hg debugbundle --spec packed.hg
341 341 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
342 342 #endif
343 343
344 344 #if reporevlogstore
345 345
346 346 generaldelta requirement is not listed in stream clone bundles unless used
347 347
348 348 $ hg --config format.usegeneraldelta=false init testnongd
349 349 $ cd testnongd
350 350 $ touch foo
351 351 $ hg -q commit -A -m initial
352 352 $ cd ..
353 353
354 354 #endif
355 355
356 356 #if reporevlogstore rust
357 357
358 358 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
359 359 writing 301 bytes for 3 files
360 360 bundle requirements: revlog-compression-zstd, revlogv1
361 361
362 362 $ f -B 64 --size --sha1 --hexdump packednongd.hg
363 363 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
364 364 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
365 365 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
366 366 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
367 367 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
368 368
369 369 $ hg debugbundle --spec packednongd.hg
370 370 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
371 371
372 372 #endif
373 373
374 374 #if reporevlogstore no-rust zstd
375 375
376 376 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
377 377 writing 301 bytes for 3 files
378 378 bundle requirements: revlog-compression-zstd, revlogv1
379 379
380 380 $ f -B 64 --size --sha1 --hexdump packednongd.hg
381 381 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
382 382 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
383 383 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
384 384 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
385 385 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
386 386
387 387 $ hg debugbundle --spec packednongd.hg
388 388 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
389 389
390 390
391 391 #endif
392 392
393 393 #if reporevlogstore no-rust no-zstd
394 394
395 395 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
396 396 writing 301 bytes for 3 files
397 397 bundle requirements: revlogv1
398 398
399 399 $ f -B 64 --size --sha1 --hexdump packednongd.hg
400 400 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
401 401 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
402 402 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
403 403 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
404 404 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
405 405
406 406 $ hg debugbundle --spec packednongd.hg
407 407 none-packed1;requirements%3Drevlogv1
408 408
409 409
410 410 #endif
411 411
412 412 #if reporevlogstore
413 413
414 414 Warning emitted when packed bundles contain secret changesets
415 415
416 416 $ hg init testsecret
417 417 $ cd testsecret
418 418 $ touch foo
419 419 $ hg -q commit -A -m initial
420 420 $ hg phase --force --secret -r .
421 421 $ cd ..
422 422
423 423 #endif
424 424
425 425 #if reporevlogstore rust
426 426
427 427 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
428 428 (warning: stream clone bundle will contain secret revisions)
429 429 writing 301 bytes for 3 files
430 430 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
431 431
432 432 #endif
433 433
434 434 #if reporevlogstore no-rust zstd
435 435
436 436 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
437 437 (warning: stream clone bundle will contain secret revisions)
438 438 writing 301 bytes for 3 files
439 439 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
440 440
441 441 #endif
442 442
443 443 #if reporevlogstore no-rust no-zstd
444 444
445 445 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
446 446 (warning: stream clone bundle will contain secret revisions)
447 447 writing 301 bytes for 3 files
448 448 bundle requirements: generaldelta, revlogv1, sparserevlog
449 449
450 450 #endif
451 451
452 452 #if reporevlogstore
453 453
454 454 Unpacking packed1 bundles with "hg unbundle" isn't allowed
455 455
456 456 $ hg init packed
457 457 $ hg -R packed unbundle packed.hg
458 458 abort: packed bundles cannot be applied with "hg unbundle"
459 459 (use "hg debugapplystreamclonebundle")
460 460 [10]
461 461
462 462 packed1 can be consumed from debug command
463 463
464 464 (this also confirms that streamclone-ed changes are visible via
465 465 @filecache properties to in-process procedures before closing
466 466 transaction)
467 467
468 468 $ cat > $TESTTMP/showtip.py <<EOF
469 469 >
470 470 > def showtip(ui, repo, hooktype, **kwargs):
471 471 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
472 472 >
473 473 > def reposetup(ui, repo):
474 474 > # this confirms (and ensures) that (empty) 00changelog.i
475 475 > # before streamclone is already cached as repo.changelog
476 476 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
477 477 >
478 478 > # this confirms that streamclone-ed changes are visible to
479 479 > # in-process procedures before closing transaction
480 480 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
481 481 >
482 482 > # this confirms that streamclone-ed changes are still visible
483 483 > # after closing transaction
484 484 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
485 485 > EOF
486 486 $ cat >> $HGRCPATH <<EOF
487 487 > [extensions]
488 488 > showtip = $TESTTMP/showtip.py
489 489 > EOF
490 490
491 491 $ hg -R packed debugapplystreamclonebundle packed.hg
492 492 6 files to transfer, 2.60 KB of data
493 493 pretxnopen: 000000000000
494 494 pretxnclose: aa35859c02ea
495 495 transferred 2.60 KB in * seconds (* */sec) (glob)
496 496 txnclose: aa35859c02ea
497 497
498 498 (for safety, confirm visibility of streamclone-ed changes by another
499 499 process, too)
500 500
501 501 $ hg -R packed tip -T "{node|short}\n"
502 502 aa35859c02ea
503 503
504 504 $ cat >> $HGRCPATH <<EOF
505 505 > [extensions]
506 506 > showtip = !
507 507 > EOF
508 508
509 509 Does not work on non-empty repo
510 510
511 511 $ hg -R packed debugapplystreamclonebundle packed.hg
512 512 abort: cannot apply stream clone bundle on non-empty repo
513 513 [255]
514 514
515 515 #endif
516 516
517 517 Create partial clones
518 518
519 519 $ rm -r empty
520 520 $ hg init empty
521 521 $ hg clone -r 3 test partial
522 522 adding changesets
523 523 adding manifests
524 524 adding file changes
525 525 added 4 changesets with 4 changes to 1 files
526 526 new changesets f9ee2f85a263:eebf5a27f8ca
527 527 updating to branch default
528 528 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
529 529 $ hg clone partial partial2
530 530 updating to branch default
531 531 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
532 532 $ cd partial
533 533
534 534 #if repobundlerepo
535 535
536 536 Log -R full.hg in partial
537 537
538 538 $ hg -R bundle://../full.hg log -T phases
539 539 changeset: 8:aa35859c02ea
540 540 tag: tip
541 541 phase: draft
542 542 parent: 3:eebf5a27f8ca
543 543 user: test
544 544 date: Thu Jan 01 00:00:00 1970 +0000
545 545 summary: 0.3m
546 546
547 547 changeset: 7:a6a34bfa0076
548 548 phase: draft
549 549 user: test
550 550 date: Thu Jan 01 00:00:00 1970 +0000
551 551 summary: 1.3m
552 552
553 553 changeset: 6:7373c1169842
554 554 phase: draft
555 555 user: test
556 556 date: Thu Jan 01 00:00:00 1970 +0000
557 557 summary: 1.3
558 558
559 559 changeset: 5:1bb50a9436a7
560 560 phase: draft
561 561 user: test
562 562 date: Thu Jan 01 00:00:00 1970 +0000
563 563 summary: 1.2
564 564
565 565 changeset: 4:095197eb4973
566 566 phase: draft
567 567 parent: 0:f9ee2f85a263
568 568 user: test
569 569 date: Thu Jan 01 00:00:00 1970 +0000
570 570 summary: 1.1
571 571
572 572 changeset: 3:eebf5a27f8ca
573 573 phase: public
574 574 user: test
575 575 date: Thu Jan 01 00:00:00 1970 +0000
576 576 summary: 0.3
577 577
578 578 changeset: 2:e38ba6f5b7e0
579 579 phase: public
580 580 user: test
581 581 date: Thu Jan 01 00:00:00 1970 +0000
582 582 summary: 0.2
583 583
584 584 changeset: 1:34c2bf6b0626
585 585 phase: public
586 586 user: test
587 587 date: Thu Jan 01 00:00:00 1970 +0000
588 588 summary: 0.1
589 589
590 590 changeset: 0:f9ee2f85a263
591 591 phase: public
592 592 user: test
593 593 date: Thu Jan 01 00:00:00 1970 +0000
594 594 summary: 0.0
595 595
596 596
597 597 Incoming full.hg in partial
598 598
599 599 $ hg incoming bundle://../full.hg
600 600 comparing with bundle:../full.hg
601 601 searching for changes
602 602 changeset: 4:095197eb4973
603 603 parent: 0:f9ee2f85a263
604 604 user: test
605 605 date: Thu Jan 01 00:00:00 1970 +0000
606 606 summary: 1.1
607 607
608 608 changeset: 5:1bb50a9436a7
609 609 user: test
610 610 date: Thu Jan 01 00:00:00 1970 +0000
611 611 summary: 1.2
612 612
613 613 changeset: 6:7373c1169842
614 614 user: test
615 615 date: Thu Jan 01 00:00:00 1970 +0000
616 616 summary: 1.3
617 617
618 618 changeset: 7:a6a34bfa0076
619 619 user: test
620 620 date: Thu Jan 01 00:00:00 1970 +0000
621 621 summary: 1.3m
622 622
623 623 changeset: 8:aa35859c02ea
624 624 tag: tip
625 625 parent: 3:eebf5a27f8ca
626 626 user: test
627 627 date: Thu Jan 01 00:00:00 1970 +0000
628 628 summary: 0.3m
629 629
630 630
631 631 Outgoing -R full.hg vs partial2 in partial
632 632
633 633 $ hg -R bundle://../full.hg outgoing ../partial2
634 634 comparing with ../partial2
635 635 searching for changes
636 636 changeset: 4:095197eb4973
637 637 parent: 0:f9ee2f85a263
638 638 user: test
639 639 date: Thu Jan 01 00:00:00 1970 +0000
640 640 summary: 1.1
641 641
642 642 changeset: 5:1bb50a9436a7
643 643 user: test
644 644 date: Thu Jan 01 00:00:00 1970 +0000
645 645 summary: 1.2
646 646
647 647 changeset: 6:7373c1169842
648 648 user: test
649 649 date: Thu Jan 01 00:00:00 1970 +0000
650 650 summary: 1.3
651 651
652 652 changeset: 7:a6a34bfa0076
653 653 user: test
654 654 date: Thu Jan 01 00:00:00 1970 +0000
655 655 summary: 1.3m
656 656
657 657 changeset: 8:aa35859c02ea
658 658 tag: tip
659 659 parent: 3:eebf5a27f8ca
660 660 user: test
661 661 date: Thu Jan 01 00:00:00 1970 +0000
662 662 summary: 0.3m
663 663
664 664
665 665 Outgoing -R does-not-exist.hg vs partial2 in partial
666 666
667 667 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
668 668 abort: *../does-not-exist.hg* (glob)
669 669 [255]
670 670
671 671 #endif
672 672
673 673 $ cd ..
674 674
675 675 hide outer repo
676 676 $ hg init
677 677
678 678 Direct clone from bundle (all-history)
679 679
680 680 #if repobundlerepo
681 681
682 682 $ hg clone full.hg full-clone
683 683 requesting all changes
684 684 adding changesets
685 685 adding manifests
686 686 adding file changes
687 687 added 9 changesets with 7 changes to 4 files (+1 heads)
688 688 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
689 689 updating to branch default
690 690 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
691 691 $ hg -R full-clone heads
692 692 changeset: 8:aa35859c02ea
693 693 tag: tip
694 694 parent: 3:eebf5a27f8ca
695 695 user: test
696 696 date: Thu Jan 01 00:00:00 1970 +0000
697 697 summary: 0.3m
698 698
699 699 changeset: 7:a6a34bfa0076
700 700 user: test
701 701 date: Thu Jan 01 00:00:00 1970 +0000
702 702 summary: 1.3m
703 703
704 704 $ rm -r full-clone
705 705
706 706 When cloning from a non-copiable repository into '', do not
707 707 recurse infinitely (issue2528)
708 708
709 709 $ hg clone full.hg ''
710 710 abort: empty destination path is not valid
711 711 [10]
712 712
713 713 test for https://bz.mercurial-scm.org/216
714 714
715 715 Unbundle incremental bundles into fresh empty in one go
716 716
717 717 $ rm -r empty
718 718 $ hg init empty
719 719 $ hg -R test bundle --base null -r 0 ../0.hg
720 720 1 changesets found
721 721 $ hg -R test bundle --exact -r 1 ../1.hg
722 722 1 changesets found
723 723 $ hg -R empty unbundle -u ../0.hg ../1.hg
724 724 adding changesets
725 725 adding manifests
726 726 adding file changes
727 727 added 1 changesets with 1 changes to 1 files
728 728 new changesets f9ee2f85a263 (1 drafts)
729 729 adding changesets
730 730 adding manifests
731 731 adding file changes
732 732 added 1 changesets with 1 changes to 1 files
733 733 new changesets 34c2bf6b0626 (1 drafts)
734 734 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
735 735
736 736 View full contents of the bundle
737 737 $ hg -R test bundle --base null -r 3 ../partial.hg
738 738 4 changesets found
739 739 $ cd test
740 740 $ hg -R ../../partial.hg log -r "bundle()"
741 741 changeset: 0:f9ee2f85a263
742 742 user: test
743 743 date: Thu Jan 01 00:00:00 1970 +0000
744 744 summary: 0.0
745 745
746 746 changeset: 1:34c2bf6b0626
747 747 user: test
748 748 date: Thu Jan 01 00:00:00 1970 +0000
749 749 summary: 0.1
750 750
751 751 changeset: 2:e38ba6f5b7e0
752 752 user: test
753 753 date: Thu Jan 01 00:00:00 1970 +0000
754 754 summary: 0.2
755 755
756 756 changeset: 3:eebf5a27f8ca
757 757 user: test
758 758 date: Thu Jan 01 00:00:00 1970 +0000
759 759 summary: 0.3
760 760
761 761 $ cd ..
762 762
763 763 #endif
764 764
765 765 test for 540d1059c802
766 766
767 767 $ hg init orig
768 768 $ cd orig
769 769 $ echo foo > foo
770 770 $ hg add foo
771 771 $ hg ci -m 'add foo'
772 772
773 773 $ hg clone . ../copy
774 774 updating to branch default
775 775 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
776 776 $ hg tag foo
777 777
778 778 $ cd ../copy
779 779 $ echo >> foo
780 780 $ hg ci -m 'change foo'
781 781 $ hg bundle ../bundle.hg ../orig
782 782 searching for changes
783 783 1 changesets found
784 784
785 785 $ cd ..
786 786
787 787 #if repobundlerepo
788 788 $ cd orig
789 789 $ hg incoming ../bundle.hg
790 790 comparing with ../bundle.hg
791 791 searching for changes
792 792 changeset: 2:ed1b79f46b9a
793 793 tag: tip
794 794 parent: 0:bbd179dfa0a7
795 795 user: test
796 796 date: Thu Jan 01 00:00:00 1970 +0000
797 797 summary: change foo
798 798
799 799 $ cd ..
800 800
801 801 test bundle with # in the filename (issue2154):
802 802
803 803 $ cp bundle.hg 'test#bundle.hg'
804 804 $ cd orig
805 805 $ hg incoming '../test#bundle.hg'
806 806 comparing with ../test
807 807 abort: unknown revision 'bundle.hg'
808 808 [10]
809 809
810 810 note that percent encoding is not handled:
811 811
812 812 $ hg incoming ../test%23bundle.hg
813 813 abort: repository ../test%23bundle.hg not found
814 814 [255]
815 815 $ cd ..
816 816
817 817 #endif
818 818
819 819 test to bundle revisions on the newly created branch (issue3828):
820 820
821 821 $ hg -q clone -U test test-clone
822 822 $ cd test
823 823
824 824 $ hg -q branch foo
825 825 $ hg commit -m "create foo branch"
826 826 $ hg -q outgoing ../test-clone
827 827 9:b4f5acb1ee27
828 828 $ hg -q bundle --branch foo foo.hg ../test-clone
829 829 #if repobundlerepo
830 830 $ hg -R foo.hg -q log -r "bundle()"
831 831 9:b4f5acb1ee27
832 832 #endif
833 833
834 834 $ cd ..
835 835
836 836 test for https://bz.mercurial-scm.org/1144
837 837
838 838 test that verify bundle does not traceback
839 839
840 840 partial history bundle, fails w/ unknown parent
841 841
842 842 $ hg -R bundle.hg verify
843 843 abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
844 844 [50]
845 845
846 846 full history bundle, refuses to verify non-local repo
847 847
848 848 #if repobundlerepo
849 849 $ hg -R all.hg verify
850 850 abort: cannot verify bundle or remote repos
851 851 [255]
852 852 #endif
853 853
854 854 but, regular verify must continue to work
855 855
856 856 $ hg -R orig verify
857 857 checking changesets
858 858 checking manifests
859 859 crosschecking files in changesets and manifests
860 860 checking files
861 861 checked 2 changesets with 2 changes to 2 files
862 862
863 863 #if repobundlerepo
864 864 diff against bundle
865 865
866 866 $ hg init b
867 867 $ cd b
868 868 $ hg -R ../all.hg diff -r tip
869 869 diff -r aa35859c02ea anotherfile
870 870 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
871 871 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
872 872 @@ -1,4 +0,0 @@
873 873 -0
874 874 -1
875 875 -2
876 876 -3
877 877 $ cd ..
878 878 #endif
879 879
880 880 bundle single branch
881 881
882 882 $ hg init branchy
883 883 $ cd branchy
884 884 $ echo a >a
885 885 $ echo x >x
886 886 $ hg ci -Ama
887 887 adding a
888 888 adding x
889 889 $ echo c >c
890 890 $ echo xx >x
891 891 $ hg ci -Amc
892 892 adding c
893 893 $ echo c1 >c1
894 894 $ hg ci -Amc1
895 895 adding c1
896 896 $ hg up 0
897 897 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
898 898 $ echo b >b
899 899 $ hg ci -Amb
900 900 adding b
901 901 created new head
902 902 $ echo b1 >b1
903 903 $ echo xx >x
904 904 $ hg ci -Amb1
905 905 adding b1
906 906 $ hg clone -q -r2 . part
907 907
908 908 == bundling via incoming
909 909
910 910 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
911 911 comparing with .
912 912 searching for changes
913 913 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
914 914 057f4db07f61970e1c11e83be79e9d08adc4dc31
915 915
916 916 == bundling
917 917
918 918 $ hg bundle bundle.hg part --debug --config progress.debug=true
919 919 query 1; heads
920 920 searching for changes
921 921 all remote heads known locally
922 922 2 changesets found
923 923 list of changesets:
924 924 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
925 925 057f4db07f61970e1c11e83be79e9d08adc4dc31
926 926 bundle2-output-bundle: "HG20", (1 params) 2 parts total
927 927 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
928 928 changesets: 1/2 chunks (50.00%)
929 929 changesets: 2/2 chunks (100.00%)
930 930 manifests: 1/2 chunks (50.00%)
931 931 manifests: 2/2 chunks (100.00%)
932 932 files: b 1/3 files (33.33%)
933 933 files: b1 2/3 files (66.67%)
934 934 files: x 3/3 files (100.00%)
935 935 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
936 936
937 937 #if repobundlerepo
938 938 == Test for issue3441
939 939
940 940 $ hg clone -q -r0 . part2
941 941 $ hg -q -R part2 pull bundle.hg
942 942 $ hg -R part2 verify
943 943 checking changesets
944 944 checking manifests
945 945 crosschecking files in changesets and manifests
946 946 checking files
947 947 checked 3 changesets with 5 changes to 4 files
948 948 #endif
949 949
950 950 == Test bundling no commits
951 951
952 952 $ hg bundle -r 'public()' no-output.hg
953 953 abort: no commits to bundle
954 954 [10]
955 955
956 956 $ cd ..
957 957
958 958 When user merges to the revision existing only in the bundle,
959 959 it should show warning that second parent of the working
960 960 directory does not exist
961 961
962 962 $ hg init update2bundled
963 963 $ cd update2bundled
964 964 $ cat <<EOF >> .hg/hgrc
965 965 > [extensions]
966 966 > strip =
967 967 > EOF
968 968 $ echo "aaa" >> a
969 969 $ hg commit -A -m 0
970 970 adding a
971 971 $ echo "bbb" >> b
972 972 $ hg commit -A -m 1
973 973 adding b
974 974 $ echo "ccc" >> c
975 975 $ hg commit -A -m 2
976 976 adding c
977 977 $ hg update -r 1
978 978 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
979 979 $ echo "ddd" >> d
980 980 $ hg commit -A -m 3
981 981 adding d
982 982 created new head
983 983 $ hg update -r 2
984 984 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
985 985 $ hg log -G
986 986 o changeset: 3:8bd3e1f196af
987 987 | tag: tip
988 988 | parent: 1:a01eca7af26d
989 989 | user: test
990 990 | date: Thu Jan 01 00:00:00 1970 +0000
991 991 | summary: 3
992 992 |
993 993 | @ changeset: 2:4652c276ac4f
994 994 |/ user: test
995 995 | date: Thu Jan 01 00:00:00 1970 +0000
996 996 | summary: 2
997 997 |
998 998 o changeset: 1:a01eca7af26d
999 999 | user: test
1000 1000 | date: Thu Jan 01 00:00:00 1970 +0000
1001 1001 | summary: 1
1002 1002 |
1003 1003 o changeset: 0:4fe08cd4693e
1004 1004 user: test
1005 1005 date: Thu Jan 01 00:00:00 1970 +0000
1006 1006 summary: 0
1007 1007
1008 1008
1009 1009 #if repobundlerepo
1010 1010 $ hg bundle --base 1 -r 3 ../update2bundled.hg
1011 1011 1 changesets found
1012 1012 $ hg strip -r 3
1013 1013 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
1014 1014 $ hg merge -R ../update2bundled.hg -r 3
1015 1015 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1016 1016 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1017 1017 (branch merge, don't forget to commit)
1018 1018
1019 1019 When user updates to the revision existing only in the bundle,
1020 1020 it should show warning
1021 1021
1022 1022 $ hg update -R ../update2bundled.hg --clean -r 3
1023 1023 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1024 1024 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1025 1025
1026 1026 When user updates to the revision existing in the local repository
1027 1027 the warning shouldn't be emitted
1028 1028
1029 1029 $ hg update -R ../update2bundled.hg -r 0
1030 1030 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1031 1031 #endif
1032 1032
1033 1033 Test the option that create slim bundle
1034 1034
1035 1035 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
1036 1036 3 changesets found
1037 1037
1038 1038 Test the option that create and no-delta's bundle
1039 1039 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
1040 1040 3 changesets found
1041 1041
1042 1042 Test the debug output when applying delta
1043 1043 -----------------------------------------
1044 1044
1045 1045 $ hg init foo
1046 1046 $ hg -R foo unbundle ./slim.hg \
1047 1047 > --config debug.revlog.debug-delta=yes \
1048 1048 > --config storage.revlog.reuse-external-delta=no \
1049 1049 > --config storage.revlog.reuse-external-delta-parent=no
1050 1050 adding changesets
1051 DBG-DELTAS: CHANGELOG: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1052 DBG-DELTAS: CHANGELOG: rev=1: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1053 DBG-DELTAS: CHANGELOG: rev=2: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1051 DBG-DELTAS: CHANGELOG: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1052 DBG-DELTAS: CHANGELOG: rev=1: delta-base=1 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1053 DBG-DELTAS: CHANGELOG: rev=2: delta-base=2 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1054 1054 adding manifests
1055 DBG-DELTAS: MANIFESTLOG: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1056 DBG-DELTAS: MANIFESTLOG: rev=1: search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1057 DBG-DELTAS: MANIFESTLOG: rev=2: search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1055 DBG-DELTAS: MANIFESTLOG: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1056 DBG-DELTAS: MANIFESTLOG: rev=1: delta-base=0 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1057 DBG-DELTAS: MANIFESTLOG: rev=2: delta-base=1 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1058 1058 adding file changes
1059 DBG-DELTAS: FILELOG:a: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1060 DBG-DELTAS: FILELOG:b: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1061 DBG-DELTAS: FILELOG:c: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1059 DBG-DELTAS: FILELOG:a: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1060 DBG-DELTAS: FILELOG:b: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1061 DBG-DELTAS: FILELOG:c: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1062 1062 added 3 changesets with 3 changes to 3 files
1063 1063 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1064 1064 (run 'hg update' to get a working copy)
1065 1065
@@ -1,346 +1,346 b''
1 1 ====================================
2 2 Test delta choice with sparse revlog
3 3 ====================================
4 4
5 5 Sparse-revlog usually shows the most gain on Manifest. However, it is simpler
6 6 to general an appropriate file, so we test with a single file instead. The
7 7 goal is to observe intermediate snapshot being created.
8 8
9 9 We need a large enough file. Part of the content needs to be replaced
10 10 repeatedly while some of it changes rarely.
11 11
12 12 $ bundlepath="$TESTDIR/artifacts/cache/big-file-churn.hg"
13 13
14 14 $ expectedhash=`cat "$bundlepath".md5`
15 15
16 16 #if slow
17 17
18 18 $ if [ ! -f "$bundlepath" ]; then
19 19 > "$TESTDIR"/artifacts/scripts/generate-churning-bundle.py > /dev/null
20 20 > fi
21 21
22 22 #else
23 23
24 24 $ if [ ! -f "$bundlepath" ]; then
25 25 > echo 'skipped: missing artifact, run "'"$TESTDIR"'/artifacts/scripts/generate-churning-bundle.py"'
26 26 > exit 80
27 27 > fi
28 28
29 29 #endif
30 30
31 31 $ currenthash=`f -M "$bundlepath" | cut -d = -f 2`
32 32 $ if [ "$currenthash" != "$expectedhash" ]; then
33 33 > echo 'skipped: outdated artifact, md5 "'"$currenthash"'" expected "'"$expectedhash"'" run "'"$TESTDIR"'/artifacts/scripts/generate-churning-bundle.py"'
34 34 > exit 80
35 35 > fi
36 36
37 37 $ cat >> $HGRCPATH << EOF
38 38 > [format]
39 39 > sparse-revlog = yes
40 40 > maxchainlen = 15
41 41 > [storage]
42 42 > revlog.optimize-delta-parent-choice = yes
43 43 > revlog.reuse-external-delta = no
44 44 > EOF
45 45 $ hg init sparse-repo
46 46 $ cd sparse-repo
47 47 $ hg unbundle $bundlepath
48 48 adding changesets
49 49 adding manifests
50 50 adding file changes
51 51 added 5001 changesets with 5001 changes to 1 files (+89 heads)
52 52 new changesets 9706f5af64f4:d9032adc8114 (5001 drafts)
53 53 (run 'hg heads' to see heads, 'hg merge' to merge)
54 54 $ hg up
55 55 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
56 56 updated to "d9032adc8114: commit #5000"
57 57 89 other heads for branch "default"
58 58
59 59 $ hg log --stat -r 0:3
60 60 changeset: 0:9706f5af64f4
61 61 user: test
62 62 date: Thu Jan 01 00:00:00 1970 +0000
63 63 summary: initial commit
64 64
65 65 SPARSE-REVLOG-TEST-FILE | 10500 ++++++++++++++++++++++++++++++++++++++++++++++
66 66 1 files changed, 10500 insertions(+), 0 deletions(-)
67 67
68 68 changeset: 1:724907deaa5e
69 69 user: test
70 70 date: Thu Jan 01 00:00:00 1970 +0000
71 71 summary: commit #1
72 72
73 73 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
74 74 1 files changed, 534 insertions(+), 534 deletions(-)
75 75
76 76 changeset: 2:62c41bce3e5d
77 77 user: test
78 78 date: Thu Jan 01 00:00:00 1970 +0000
79 79 summary: commit #2
80 80
81 81 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
82 82 1 files changed, 534 insertions(+), 534 deletions(-)
83 83
84 84 changeset: 3:348a9cbd6959
85 85 user: test
86 86 date: Thu Jan 01 00:00:00 1970 +0000
87 87 summary: commit #3
88 88
89 89 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
90 90 1 files changed, 534 insertions(+), 534 deletions(-)
91 91
92 92
93 93 $ f -s .hg/store/data/*.d
94 94 .hg/store/data/_s_p_a_r_s_e-_r_e_v_l_o_g-_t_e_s_t-_f_i_l_e.d: size=58616973
95 95 $ hg debugrevlog *
96 96 format : 1
97 97 flags : generaldelta
98 98
99 99 revisions : 5001
100 100 merges : 625 (12.50%)
101 101 normal : 4376 (87.50%)
102 102 revisions : 5001
103 103 empty : 0 ( 0.00%)
104 104 text : 0 (100.00%)
105 105 delta : 0 (100.00%)
106 106 snapshot : 383 ( 7.66%)
107 107 lvl-0 : 3 ( 0.06%)
108 108 lvl-1 : 18 ( 0.36%)
109 109 lvl-2 : 62 ( 1.24%)
110 110 lvl-3 : 108 ( 2.16%)
111 111 lvl-4 : 191 ( 3.82%)
112 112 lvl-5 : 1 ( 0.02%)
113 113 deltas : 4618 (92.34%)
114 114 revision size : 58616973
115 115 snapshot : 9247844 (15.78%)
116 116 lvl-0 : 539532 ( 0.92%)
117 117 lvl-1 : 1467743 ( 2.50%)
118 118 lvl-2 : 1873820 ( 3.20%)
119 119 lvl-3 : 2326874 ( 3.97%)
120 120 lvl-4 : 3029118 ( 5.17%)
121 121 lvl-5 : 10757 ( 0.02%)
122 122 deltas : 49369129 (84.22%)
123 123
124 124 chunks : 5001
125 125 0x28 : 5001 (100.00%)
126 126 chunks size : 58616973
127 127 0x28 : 58616973 (100.00%)
128 128
129 129 avg chain length : 9
130 130 max chain length : 15
131 131 max chain reach : 27366701
132 132 compression ratio : 29
133 133
134 134 uncompressed data size (min/max/avg) : 346468 / 346472 / 346471
135 135 full revision size (min/max/avg) : 179288 / 180786 / 179844
136 136 inter-snapshot size (min/max/avg) : 10757 / 169507 / 22916
137 137 level-1 (min/max/avg) : 13905 / 169507 / 81541
138 138 level-2 (min/max/avg) : 10887 / 83873 / 30222
139 139 level-3 (min/max/avg) : 10911 / 43047 / 21545
140 140 level-4 (min/max/avg) : 10838 / 21390 / 15859
141 141 level-5 (min/max/avg) : 10757 / 10757 / 10757
142 142 delta size (min/max/avg) : 9672 / 108072 / 10690
143 143
144 144 deltas against prev : 3906 (84.58%)
145 145 where prev = p1 : 3906 (100.00%)
146 146 where prev = p2 : 0 ( 0.00%)
147 147 other : 0 ( 0.00%)
148 148 deltas against p1 : 649 (14.05%)
149 149 deltas against p2 : 63 ( 1.36%)
150 150 deltas against other : 0 ( 0.00%)
151 151
152 152
153 153 Test `debug-delta-find`
154 154 -----------------------
155 155
156 156 $ ls -1
157 157 SPARSE-REVLOG-TEST-FILE
158 158 $ hg debugdeltachain SPARSE-REVLOG-TEST-FILE | grep snap | tail -1
159 159 4971 4970 -1 3 5 4930 snap 19179 346472 427596 1.23414 15994877 15567281 36.40652 427596 179288 1.00000 5
160 160 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971
161 161 DBG-DELTAS-SEARCH: SEARCH rev=4971
162 162 DBG-DELTAS-SEARCH: ROUND #1 - 2 candidates - search-down
163 163 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
164 164 DBG-DELTAS-SEARCH: type=snapshot-4
165 165 DBG-DELTAS-SEARCH: size=18296
166 166 DBG-DELTAS-SEARCH: base=4930
167 167 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
168 168 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
169 169 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
170 170 DBG-DELTAS-SEARCH: CANDIDATE: rev=4971
171 171 DBG-DELTAS-SEARCH: type=snapshot-4
172 172 DBG-DELTAS-SEARCH: size=19179
173 173 DBG-DELTAS-SEARCH: base=4930
174 174 DBG-DELTAS-SEARCH: TOO-HIGH
175 175 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
176 176 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
177 177 DBG-DELTAS-SEARCH: type=snapshot-3
178 178 DBG-DELTAS-SEARCH: size=39228
179 179 DBG-DELTAS-SEARCH: base=4799
180 180 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
181 181 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
182 182 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
183 183 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
184 184 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
185 185 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
186 186 DBG-DELTAS-SEARCH: type=snapshot-2
187 187 DBG-DELTAS-SEARCH: size=50213
188 188 DBG-DELTAS-SEARCH: base=4623
189 189 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
190 190 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
191 191 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
192 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
192 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
193 193
194 194 $ cat << EOF >>.hg/hgrc
195 195 > [storage]
196 196 > revlog.optimize-delta-parent-choice = no
197 197 > revlog.reuse-external-delta = yes
198 198 > EOF
199 199
200 200 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --quiet
201 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
201 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
202 202 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source full
203 203 DBG-DELTAS-SEARCH: SEARCH rev=4971
204 204 DBG-DELTAS-SEARCH: ROUND #1 - 2 candidates - search-down
205 205 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
206 206 DBG-DELTAS-SEARCH: type=snapshot-4
207 207 DBG-DELTAS-SEARCH: size=18296
208 208 DBG-DELTAS-SEARCH: base=4930
209 209 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
210 210 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
211 211 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
212 212 DBG-DELTAS-SEARCH: CANDIDATE: rev=4971
213 213 DBG-DELTAS-SEARCH: type=snapshot-4
214 214 DBG-DELTAS-SEARCH: size=19179
215 215 DBG-DELTAS-SEARCH: base=4930
216 216 DBG-DELTAS-SEARCH: TOO-HIGH
217 217 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
218 218 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
219 219 DBG-DELTAS-SEARCH: type=snapshot-3
220 220 DBG-DELTAS-SEARCH: size=39228
221 221 DBG-DELTAS-SEARCH: base=4799
222 222 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
223 223 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
224 224 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
225 225 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
226 226 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
227 227 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
228 228 DBG-DELTAS-SEARCH: type=snapshot-2
229 229 DBG-DELTAS-SEARCH: size=50213
230 230 DBG-DELTAS-SEARCH: base=4623
231 231 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
232 232 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
233 233 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
234 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
234 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
235 235 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source storage
236 236 DBG-DELTAS-SEARCH: SEARCH rev=4971
237 DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
237 DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - cached-delta
238 238 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
239 239 DBG-DELTAS-SEARCH: type=snapshot-3
240 240 DBG-DELTAS-SEARCH: size=39228
241 241 DBG-DELTAS-SEARCH: base=4799
242 242 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
243 243 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
244 244 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
245 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=1 try-count=1 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
245 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=1 - search-rounds=1 try-count=1 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
246 246 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source p1
247 247 DBG-DELTAS-SEARCH: SEARCH rev=4971
248 248 DBG-DELTAS-SEARCH: ROUND #1 - 2 candidates - search-down
249 249 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
250 250 DBG-DELTAS-SEARCH: type=snapshot-4
251 251 DBG-DELTAS-SEARCH: size=18296
252 252 DBG-DELTAS-SEARCH: base=4930
253 253 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
254 254 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
255 255 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
256 256 DBG-DELTAS-SEARCH: CANDIDATE: rev=4971
257 257 DBG-DELTAS-SEARCH: type=snapshot-4
258 258 DBG-DELTAS-SEARCH: size=19179
259 259 DBG-DELTAS-SEARCH: base=4930
260 260 DBG-DELTAS-SEARCH: TOO-HIGH
261 261 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
262 262 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
263 263 DBG-DELTAS-SEARCH: type=snapshot-3
264 264 DBG-DELTAS-SEARCH: size=39228
265 265 DBG-DELTAS-SEARCH: base=4799
266 266 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
267 267 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
268 268 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
269 269 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
270 270 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
271 271 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
272 272 DBG-DELTAS-SEARCH: type=snapshot-2
273 273 DBG-DELTAS-SEARCH: size=50213
274 274 DBG-DELTAS-SEARCH: base=4623
275 275 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
276 276 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
277 277 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
278 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
278 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
279 279 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source p2
280 280 DBG-DELTAS-SEARCH: SEARCH rev=4971
281 281 DBG-DELTAS-SEARCH: ROUND #1 - 2 candidates - search-down
282 282 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
283 283 DBG-DELTAS-SEARCH: type=snapshot-4
284 284 DBG-DELTAS-SEARCH: size=18296
285 285 DBG-DELTAS-SEARCH: base=4930
286 286 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
287 287 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
288 288 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
289 289 DBG-DELTAS-SEARCH: CANDIDATE: rev=4971
290 290 DBG-DELTAS-SEARCH: type=snapshot-4
291 291 DBG-DELTAS-SEARCH: size=19179
292 292 DBG-DELTAS-SEARCH: base=4930
293 293 DBG-DELTAS-SEARCH: TOO-HIGH
294 294 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
295 295 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
296 296 DBG-DELTAS-SEARCH: type=snapshot-3
297 297 DBG-DELTAS-SEARCH: size=39228
298 298 DBG-DELTAS-SEARCH: base=4799
299 299 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
300 300 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
301 301 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
302 302 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
303 303 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
304 304 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
305 305 DBG-DELTAS-SEARCH: type=snapshot-2
306 306 DBG-DELTAS-SEARCH: size=50213
307 307 DBG-DELTAS-SEARCH: base=4623
308 308 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
309 309 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
310 310 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
311 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
311 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
312 312 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source prev
313 313 DBG-DELTAS-SEARCH: SEARCH rev=4971
314 314 DBG-DELTAS-SEARCH: ROUND #1 - 2 candidates - search-down
315 315 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
316 316 DBG-DELTAS-SEARCH: type=snapshot-4
317 317 DBG-DELTAS-SEARCH: size=18296
318 318 DBG-DELTAS-SEARCH: base=4930
319 319 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
320 320 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
321 321 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
322 322 DBG-DELTAS-SEARCH: CANDIDATE: rev=4971
323 323 DBG-DELTAS-SEARCH: type=snapshot-4
324 324 DBG-DELTAS-SEARCH: size=19179
325 325 DBG-DELTAS-SEARCH: base=4930
326 326 DBG-DELTAS-SEARCH: TOO-HIGH
327 327 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
328 328 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
329 329 DBG-DELTAS-SEARCH: type=snapshot-3
330 330 DBG-DELTAS-SEARCH: size=39228
331 331 DBG-DELTAS-SEARCH: base=4799
332 332 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
333 333 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
334 334 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
335 335 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
336 336 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
337 337 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
338 338 DBG-DELTAS-SEARCH: type=snapshot-2
339 339 DBG-DELTAS-SEARCH: size=50213
340 340 DBG-DELTAS-SEARCH: base=4623
341 341 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
342 342 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
343 343 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
344 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
344 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
345 345
346 346 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now