##// END OF EJS Templates
revlog: fix pure python slicing test when chain contains nullrev...
Boris Feld -
r41110:88d813cd default
parent child Browse files
Show More
@@ -1,1000 +1,1012 b''
1 # revlogdeltas.py - Logic around delta computation for revlog
1 # revlogdeltas.py - Logic around delta computation for revlog
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2018 Octobus <contact@octobus.net>
4 # Copyright 2018 Octobus <contact@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """Helper class to compute deltas stored inside revlogs"""
8 """Helper class to compute deltas stored inside revlogs"""
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import collections
12 import collections
13 import struct
13 import struct
14
14
15 # import stuff from node for others to import from revlog
15 # import stuff from node for others to import from revlog
16 from ..node import (
16 from ..node import (
17 nullrev,
17 nullrev,
18 )
18 )
19 from ..i18n import _
19 from ..i18n import _
20
20
21 from .constants import (
21 from .constants import (
22 REVIDX_ISCENSORED,
22 REVIDX_ISCENSORED,
23 REVIDX_RAWTEXT_CHANGING_FLAGS,
23 REVIDX_RAWTEXT_CHANGING_FLAGS,
24 )
24 )
25
25
26 from ..thirdparty import (
26 from ..thirdparty import (
27 attr,
27 attr,
28 )
28 )
29
29
30 from .. import (
30 from .. import (
31 error,
31 error,
32 mdiff,
32 mdiff,
33 )
33 )
34
34
35 # maximum <delta-chain-data>/<revision-text-length> ratio
35 # maximum <delta-chain-data>/<revision-text-length> ratio
36 LIMIT_DELTA2TEXT = 2
36 LIMIT_DELTA2TEXT = 2
37
37
38 class _testrevlog(object):
38 class _testrevlog(object):
39 """minimalist fake revlog to use in doctests"""
39 """minimalist fake revlog to use in doctests"""
40
40
41 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
41 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
42 """data is an list of revision payload boundaries"""
42 """data is an list of revision payload boundaries"""
43 self._data = data
43 self._data = data
44 self._srdensitythreshold = density
44 self._srdensitythreshold = density
45 self._srmingapsize = mingap
45 self._srmingapsize = mingap
46 self._snapshot = set(snapshot)
46 self._snapshot = set(snapshot)
47 self.index = None
47 self.index = None
48
48
49 def start(self, rev):
49 def start(self, rev):
50 if rev == nullrev:
51 return 0
50 if rev == 0:
52 if rev == 0:
51 return 0
53 return 0
52 return self._data[rev - 1]
54 return self._data[rev - 1]
53
55
54 def end(self, rev):
56 def end(self, rev):
57 if rev == nullrev:
58 return 0
55 return self._data[rev]
59 return self._data[rev]
56
60
57 def length(self, rev):
61 def length(self, rev):
58 return self.end(rev) - self.start(rev)
62 return self.end(rev) - self.start(rev)
59
63
60 def __len__(self):
64 def __len__(self):
61 return len(self._data)
65 return len(self._data)
62
66
63 def issnapshot(self, rev):
67 def issnapshot(self, rev):
68 if rev == nullrev:
69 return True
64 return rev in self._snapshot
70 return rev in self._snapshot
65
71
66 def slicechunk(revlog, revs, targetsize=None):
72 def slicechunk(revlog, revs, targetsize=None):
67 """slice revs to reduce the amount of unrelated data to be read from disk.
73 """slice revs to reduce the amount of unrelated data to be read from disk.
68
74
69 ``revs`` is sliced into groups that should be read in one time.
75 ``revs`` is sliced into groups that should be read in one time.
70 Assume that revs are sorted.
76 Assume that revs are sorted.
71
77
72 The initial chunk is sliced until the overall density (payload/chunks-span
78 The initial chunk is sliced until the overall density (payload/chunks-span
73 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
79 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
74 `revlog._srmingapsize` is skipped.
80 `revlog._srmingapsize` is skipped.
75
81
76 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
82 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
77 For consistency with other slicing choice, this limit won't go lower than
83 For consistency with other slicing choice, this limit won't go lower than
78 `revlog._srmingapsize`.
84 `revlog._srmingapsize`.
79
85
80 If individual revisions chunk are larger than this limit, they will still
86 If individual revisions chunk are larger than this limit, they will still
81 be raised individually.
87 be raised individually.
82
88
83 >>> data = [
89 >>> data = [
84 ... 5, #00 (5)
90 ... 5, #00 (5)
85 ... 10, #01 (5)
91 ... 10, #01 (5)
86 ... 12, #02 (2)
92 ... 12, #02 (2)
87 ... 12, #03 (empty)
93 ... 12, #03 (empty)
88 ... 27, #04 (15)
94 ... 27, #04 (15)
89 ... 31, #05 (4)
95 ... 31, #05 (4)
90 ... 31, #06 (empty)
96 ... 31, #06 (empty)
91 ... 42, #07 (11)
97 ... 42, #07 (11)
92 ... 47, #08 (5)
98 ... 47, #08 (5)
93 ... 47, #09 (empty)
99 ... 47, #09 (empty)
94 ... 48, #10 (1)
100 ... 48, #10 (1)
95 ... 51, #11 (3)
101 ... 51, #11 (3)
96 ... 74, #12 (23)
102 ... 74, #12 (23)
97 ... 85, #13 (11)
103 ... 85, #13 (11)
98 ... 86, #14 (1)
104 ... 86, #14 (1)
99 ... 91, #15 (5)
105 ... 91, #15 (5)
100 ... ]
106 ... ]
101 >>> revlog = _testrevlog(data, snapshot=range(16))
107 >>> revlog = _testrevlog(data, snapshot=range(16))
102
108
103 >>> list(slicechunk(revlog, list(range(16))))
109 >>> list(slicechunk(revlog, list(range(16))))
104 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
110 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
105 >>> list(slicechunk(revlog, [0, 15]))
111 >>> list(slicechunk(revlog, [0, 15]))
106 [[0], [15]]
112 [[0], [15]]
107 >>> list(slicechunk(revlog, [0, 11, 15]))
113 >>> list(slicechunk(revlog, [0, 11, 15]))
108 [[0], [11], [15]]
114 [[0], [11], [15]]
109 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
115 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
110 [[0], [11, 13, 15]]
116 [[0], [11, 13, 15]]
111 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
117 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
112 [[1, 2], [5, 8, 10, 11], [14]]
118 [[1, 2], [5, 8, 10, 11], [14]]
113
119
114 Slicing with a maximum chunk size
120 Slicing with a maximum chunk size
115 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
121 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
116 [[0], [11], [13], [15]]
122 [[0], [11], [13], [15]]
117 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
123 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
118 [[0], [11], [13, 15]]
124 [[0], [11], [13, 15]]
125
126 Slicing involving nullrev
127 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
128 [[-1, 0], [11], [13, 15]]
129 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
130 [[-1], [13], [15]]
119 """
131 """
120 if targetsize is not None:
132 if targetsize is not None:
121 targetsize = max(targetsize, revlog._srmingapsize)
133 targetsize = max(targetsize, revlog._srmingapsize)
122 # targetsize should not be specified when evaluating delta candidates:
134 # targetsize should not be specified when evaluating delta candidates:
123 # * targetsize is used to ensure we stay within specification when reading,
135 # * targetsize is used to ensure we stay within specification when reading,
124 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
136 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
125 if densityslicing is None:
137 if densityslicing is None:
126 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
138 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
127 for chunk in densityslicing(revs,
139 for chunk in densityslicing(revs,
128 revlog._srdensitythreshold,
140 revlog._srdensitythreshold,
129 revlog._srmingapsize):
141 revlog._srmingapsize):
130 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
142 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
131 yield subchunk
143 yield subchunk
132
144
133 def _slicechunktosize(revlog, revs, targetsize=None):
145 def _slicechunktosize(revlog, revs, targetsize=None):
134 """slice revs to match the target size
146 """slice revs to match the target size
135
147
136 This is intended to be used on chunk that density slicing selected by that
148 This is intended to be used on chunk that density slicing selected by that
137 are still too large compared to the read garantee of revlog. This might
149 are still too large compared to the read garantee of revlog. This might
138 happens when "minimal gap size" interrupted the slicing or when chain are
150 happens when "minimal gap size" interrupted the slicing or when chain are
139 built in a way that create large blocks next to each other.
151 built in a way that create large blocks next to each other.
140
152
141 >>> data = [
153 >>> data = [
142 ... 3, #0 (3)
154 ... 3, #0 (3)
143 ... 5, #1 (2)
155 ... 5, #1 (2)
144 ... 6, #2 (1)
156 ... 6, #2 (1)
145 ... 8, #3 (2)
157 ... 8, #3 (2)
146 ... 8, #4 (empty)
158 ... 8, #4 (empty)
147 ... 11, #5 (3)
159 ... 11, #5 (3)
148 ... 12, #6 (1)
160 ... 12, #6 (1)
149 ... 13, #7 (1)
161 ... 13, #7 (1)
150 ... 14, #8 (1)
162 ... 14, #8 (1)
151 ... ]
163 ... ]
152
164
153 == All snapshots cases ==
165 == All snapshots cases ==
154 >>> revlog = _testrevlog(data, snapshot=range(9))
166 >>> revlog = _testrevlog(data, snapshot=range(9))
155
167
156 Cases where chunk is already small enough
168 Cases where chunk is already small enough
157 >>> list(_slicechunktosize(revlog, [0], 3))
169 >>> list(_slicechunktosize(revlog, [0], 3))
158 [[0]]
170 [[0]]
159 >>> list(_slicechunktosize(revlog, [6, 7], 3))
171 >>> list(_slicechunktosize(revlog, [6, 7], 3))
160 [[6, 7]]
172 [[6, 7]]
161 >>> list(_slicechunktosize(revlog, [0], None))
173 >>> list(_slicechunktosize(revlog, [0], None))
162 [[0]]
174 [[0]]
163 >>> list(_slicechunktosize(revlog, [6, 7], None))
175 >>> list(_slicechunktosize(revlog, [6, 7], None))
164 [[6, 7]]
176 [[6, 7]]
165
177
166 cases where we need actual slicing
178 cases where we need actual slicing
167 >>> list(_slicechunktosize(revlog, [0, 1], 3))
179 >>> list(_slicechunktosize(revlog, [0, 1], 3))
168 [[0], [1]]
180 [[0], [1]]
169 >>> list(_slicechunktosize(revlog, [1, 3], 3))
181 >>> list(_slicechunktosize(revlog, [1, 3], 3))
170 [[1], [3]]
182 [[1], [3]]
171 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
183 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
172 [[1, 2], [3]]
184 [[1, 2], [3]]
173 >>> list(_slicechunktosize(revlog, [3, 5], 3))
185 >>> list(_slicechunktosize(revlog, [3, 5], 3))
174 [[3], [5]]
186 [[3], [5]]
175 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
187 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
176 [[3], [5]]
188 [[3], [5]]
177 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
189 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
178 [[5], [6, 7, 8]]
190 [[5], [6, 7, 8]]
179 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
191 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
180 [[0], [1, 2], [3], [5], [6, 7, 8]]
192 [[0], [1, 2], [3], [5], [6, 7, 8]]
181
193
182 Case with too large individual chunk (must return valid chunk)
194 Case with too large individual chunk (must return valid chunk)
183 >>> list(_slicechunktosize(revlog, [0, 1], 2))
195 >>> list(_slicechunktosize(revlog, [0, 1], 2))
184 [[0], [1]]
196 [[0], [1]]
185 >>> list(_slicechunktosize(revlog, [1, 3], 1))
197 >>> list(_slicechunktosize(revlog, [1, 3], 1))
186 [[1], [3]]
198 [[1], [3]]
187 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
199 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
188 [[3], [5]]
200 [[3], [5]]
189
201
190 == No Snapshot cases ==
202 == No Snapshot cases ==
191 >>> revlog = _testrevlog(data)
203 >>> revlog = _testrevlog(data)
192
204
193 Cases where chunk is already small enough
205 Cases where chunk is already small enough
194 >>> list(_slicechunktosize(revlog, [0], 3))
206 >>> list(_slicechunktosize(revlog, [0], 3))
195 [[0]]
207 [[0]]
196 >>> list(_slicechunktosize(revlog, [6, 7], 3))
208 >>> list(_slicechunktosize(revlog, [6, 7], 3))
197 [[6, 7]]
209 [[6, 7]]
198 >>> list(_slicechunktosize(revlog, [0], None))
210 >>> list(_slicechunktosize(revlog, [0], None))
199 [[0]]
211 [[0]]
200 >>> list(_slicechunktosize(revlog, [6, 7], None))
212 >>> list(_slicechunktosize(revlog, [6, 7], None))
201 [[6, 7]]
213 [[6, 7]]
202
214
203 cases where we need actual slicing
215 cases where we need actual slicing
204 >>> list(_slicechunktosize(revlog, [0, 1], 3))
216 >>> list(_slicechunktosize(revlog, [0, 1], 3))
205 [[0], [1]]
217 [[0], [1]]
206 >>> list(_slicechunktosize(revlog, [1, 3], 3))
218 >>> list(_slicechunktosize(revlog, [1, 3], 3))
207 [[1], [3]]
219 [[1], [3]]
208 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
220 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
209 [[1], [2, 3]]
221 [[1], [2, 3]]
210 >>> list(_slicechunktosize(revlog, [3, 5], 3))
222 >>> list(_slicechunktosize(revlog, [3, 5], 3))
211 [[3], [5]]
223 [[3], [5]]
212 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
224 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
213 [[3], [4, 5]]
225 [[3], [4, 5]]
214 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
226 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
215 [[5], [6, 7, 8]]
227 [[5], [6, 7, 8]]
216 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
228 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
217 [[0], [1, 2], [3], [5], [6, 7, 8]]
229 [[0], [1, 2], [3], [5], [6, 7, 8]]
218
230
219 Case with too large individual chunk (must return valid chunk)
231 Case with too large individual chunk (must return valid chunk)
220 >>> list(_slicechunktosize(revlog, [0, 1], 2))
232 >>> list(_slicechunktosize(revlog, [0, 1], 2))
221 [[0], [1]]
233 [[0], [1]]
222 >>> list(_slicechunktosize(revlog, [1, 3], 1))
234 >>> list(_slicechunktosize(revlog, [1, 3], 1))
223 [[1], [3]]
235 [[1], [3]]
224 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
236 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
225 [[3], [5]]
237 [[3], [5]]
226
238
227 == mixed case ==
239 == mixed case ==
228 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
240 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
229 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
241 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
230 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
242 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
231 """
243 """
232 assert targetsize is None or 0 <= targetsize
244 assert targetsize is None or 0 <= targetsize
233 startdata = revlog.start(revs[0])
245 startdata = revlog.start(revs[0])
234 enddata = revlog.end(revs[-1])
246 enddata = revlog.end(revs[-1])
235 fullspan = enddata - startdata
247 fullspan = enddata - startdata
236 if targetsize is None or fullspan <= targetsize:
248 if targetsize is None or fullspan <= targetsize:
237 yield revs
249 yield revs
238 return
250 return
239
251
240 startrevidx = 0
252 startrevidx = 0
241 endrevidx = 1
253 endrevidx = 1
242 iterrevs = enumerate(revs)
254 iterrevs = enumerate(revs)
243 next(iterrevs) # skip first rev.
255 next(iterrevs) # skip first rev.
244 # first step: get snapshots out of the way
256 # first step: get snapshots out of the way
245 for idx, r in iterrevs:
257 for idx, r in iterrevs:
246 span = revlog.end(r) - startdata
258 span = revlog.end(r) - startdata
247 snapshot = revlog.issnapshot(r)
259 snapshot = revlog.issnapshot(r)
248 if span <= targetsize and snapshot:
260 if span <= targetsize and snapshot:
249 endrevidx = idx + 1
261 endrevidx = idx + 1
250 else:
262 else:
251 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
263 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
252 if chunk:
264 if chunk:
253 yield chunk
265 yield chunk
254 startrevidx = idx
266 startrevidx = idx
255 startdata = revlog.start(r)
267 startdata = revlog.start(r)
256 endrevidx = idx + 1
268 endrevidx = idx + 1
257 if not snapshot:
269 if not snapshot:
258 break
270 break
259
271
260 # for the others, we use binary slicing to quickly converge toward valid
272 # for the others, we use binary slicing to quickly converge toward valid
261 # chunks (otherwise, we might end up looking for start/end of many
273 # chunks (otherwise, we might end up looking for start/end of many
262 # revisions). This logic is not looking for the perfect slicing point, it
274 # revisions). This logic is not looking for the perfect slicing point, it
263 # focuses on quickly converging toward valid chunks.
275 # focuses on quickly converging toward valid chunks.
264 nbitem = len(revs)
276 nbitem = len(revs)
265 while (enddata - startdata) > targetsize:
277 while (enddata - startdata) > targetsize:
266 endrevidx = nbitem
278 endrevidx = nbitem
267 if nbitem - startrevidx <= 1:
279 if nbitem - startrevidx <= 1:
268 break # protect against individual chunk larger than limit
280 break # protect against individual chunk larger than limit
269 localenddata = revlog.end(revs[endrevidx - 1])
281 localenddata = revlog.end(revs[endrevidx - 1])
270 span = localenddata - startdata
282 span = localenddata - startdata
271 while span > targetsize:
283 while span > targetsize:
272 if endrevidx - startrevidx <= 1:
284 if endrevidx - startrevidx <= 1:
273 break # protect against individual chunk larger than limit
285 break # protect against individual chunk larger than limit
274 endrevidx -= (endrevidx - startrevidx) // 2
286 endrevidx -= (endrevidx - startrevidx) // 2
275 localenddata = revlog.end(revs[endrevidx - 1])
287 localenddata = revlog.end(revs[endrevidx - 1])
276 span = localenddata - startdata
288 span = localenddata - startdata
277 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
289 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
278 if chunk:
290 if chunk:
279 yield chunk
291 yield chunk
280 startrevidx = endrevidx
292 startrevidx = endrevidx
281 startdata = revlog.start(revs[startrevidx])
293 startdata = revlog.start(revs[startrevidx])
282
294
283 chunk = _trimchunk(revlog, revs, startrevidx)
295 chunk = _trimchunk(revlog, revs, startrevidx)
284 if chunk:
296 if chunk:
285 yield chunk
297 yield chunk
286
298
287 def _slicechunktodensity(revlog, revs, targetdensity=0.5,
299 def _slicechunktodensity(revlog, revs, targetdensity=0.5,
288 mingapsize=0):
300 mingapsize=0):
289 """slice revs to reduce the amount of unrelated data to be read from disk.
301 """slice revs to reduce the amount of unrelated data to be read from disk.
290
302
291 ``revs`` is sliced into groups that should be read in one time.
303 ``revs`` is sliced into groups that should be read in one time.
292 Assume that revs are sorted.
304 Assume that revs are sorted.
293
305
294 The initial chunk is sliced until the overall density (payload/chunks-span
306 The initial chunk is sliced until the overall density (payload/chunks-span
295 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
307 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
296 skipped.
308 skipped.
297
309
298 >>> revlog = _testrevlog([
310 >>> revlog = _testrevlog([
299 ... 5, #00 (5)
311 ... 5, #00 (5)
300 ... 10, #01 (5)
312 ... 10, #01 (5)
301 ... 12, #02 (2)
313 ... 12, #02 (2)
302 ... 12, #03 (empty)
314 ... 12, #03 (empty)
303 ... 27, #04 (15)
315 ... 27, #04 (15)
304 ... 31, #05 (4)
316 ... 31, #05 (4)
305 ... 31, #06 (empty)
317 ... 31, #06 (empty)
306 ... 42, #07 (11)
318 ... 42, #07 (11)
307 ... 47, #08 (5)
319 ... 47, #08 (5)
308 ... 47, #09 (empty)
320 ... 47, #09 (empty)
309 ... 48, #10 (1)
321 ... 48, #10 (1)
310 ... 51, #11 (3)
322 ... 51, #11 (3)
311 ... 74, #12 (23)
323 ... 74, #12 (23)
312 ... 85, #13 (11)
324 ... 85, #13 (11)
313 ... 86, #14 (1)
325 ... 86, #14 (1)
314 ... 91, #15 (5)
326 ... 91, #15 (5)
315 ... ])
327 ... ])
316
328
317 >>> list(_slicechunktodensity(revlog, list(range(16))))
329 >>> list(_slicechunktodensity(revlog, list(range(16))))
318 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
330 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
319 >>> list(_slicechunktodensity(revlog, [0, 15]))
331 >>> list(_slicechunktodensity(revlog, [0, 15]))
320 [[0], [15]]
332 [[0], [15]]
321 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
333 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
322 [[0], [11], [15]]
334 [[0], [11], [15]]
323 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
335 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
324 [[0], [11, 13, 15]]
336 [[0], [11, 13, 15]]
325 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
337 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
326 [[1, 2], [5, 8, 10, 11], [14]]
338 [[1, 2], [5, 8, 10, 11], [14]]
327 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
339 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
328 ... mingapsize=20))
340 ... mingapsize=20))
329 [[1, 2, 3, 5, 8, 10, 11], [14]]
341 [[1, 2, 3, 5, 8, 10, 11], [14]]
330 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
342 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
331 ... targetdensity=0.95))
343 ... targetdensity=0.95))
332 [[1, 2], [5], [8, 10, 11], [14]]
344 [[1, 2], [5], [8, 10, 11], [14]]
333 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
345 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
334 ... targetdensity=0.95, mingapsize=12))
346 ... targetdensity=0.95, mingapsize=12))
335 [[1, 2], [5, 8, 10, 11], [14]]
347 [[1, 2], [5, 8, 10, 11], [14]]
336 """
348 """
337 start = revlog.start
349 start = revlog.start
338 length = revlog.length
350 length = revlog.length
339
351
340 if len(revs) <= 1:
352 if len(revs) <= 1:
341 yield revs
353 yield revs
342 return
354 return
343
355
344 deltachainspan = segmentspan(revlog, revs)
356 deltachainspan = segmentspan(revlog, revs)
345
357
346 if deltachainspan < mingapsize:
358 if deltachainspan < mingapsize:
347 yield revs
359 yield revs
348 return
360 return
349
361
350 readdata = deltachainspan
362 readdata = deltachainspan
351 chainpayload = sum(length(r) for r in revs)
363 chainpayload = sum(length(r) for r in revs)
352
364
353 if deltachainspan:
365 if deltachainspan:
354 density = chainpayload / float(deltachainspan)
366 density = chainpayload / float(deltachainspan)
355 else:
367 else:
356 density = 1.0
368 density = 1.0
357
369
358 if density >= targetdensity:
370 if density >= targetdensity:
359 yield revs
371 yield revs
360 return
372 return
361
373
362 # Store the gaps in a heap to have them sorted by decreasing size
374 # Store the gaps in a heap to have them sorted by decreasing size
363 gaps = []
375 gaps = []
364 prevend = None
376 prevend = None
365 for i, rev in enumerate(revs):
377 for i, rev in enumerate(revs):
366 revstart = start(rev)
378 revstart = start(rev)
367 revlen = length(rev)
379 revlen = length(rev)
368
380
369 # Skip empty revisions to form larger holes
381 # Skip empty revisions to form larger holes
370 if revlen == 0:
382 if revlen == 0:
371 continue
383 continue
372
384
373 if prevend is not None:
385 if prevend is not None:
374 gapsize = revstart - prevend
386 gapsize = revstart - prevend
375 # only consider holes that are large enough
387 # only consider holes that are large enough
376 if gapsize > mingapsize:
388 if gapsize > mingapsize:
377 gaps.append((gapsize, i))
389 gaps.append((gapsize, i))
378
390
379 prevend = revstart + revlen
391 prevend = revstart + revlen
380 # sort the gaps to pop them from largest to small
392 # sort the gaps to pop them from largest to small
381 gaps.sort()
393 gaps.sort()
382
394
383 # Collect the indices of the largest holes until the density is acceptable
395 # Collect the indices of the largest holes until the density is acceptable
384 selected = []
396 selected = []
385 while gaps and density < targetdensity:
397 while gaps and density < targetdensity:
386 gapsize, gapidx = gaps.pop()
398 gapsize, gapidx = gaps.pop()
387
399
388 selected.append(gapidx)
400 selected.append(gapidx)
389
401
390 # the gap sizes are stored as negatives to be sorted decreasingly
402 # the gap sizes are stored as negatives to be sorted decreasingly
391 # by the heap
403 # by the heap
392 readdata -= gapsize
404 readdata -= gapsize
393 if readdata > 0:
405 if readdata > 0:
394 density = chainpayload / float(readdata)
406 density = chainpayload / float(readdata)
395 else:
407 else:
396 density = 1.0
408 density = 1.0
397 selected.sort()
409 selected.sort()
398
410
399 # Cut the revs at collected indices
411 # Cut the revs at collected indices
400 previdx = 0
412 previdx = 0
401 for idx in selected:
413 for idx in selected:
402
414
403 chunk = _trimchunk(revlog, revs, previdx, idx)
415 chunk = _trimchunk(revlog, revs, previdx, idx)
404 if chunk:
416 if chunk:
405 yield chunk
417 yield chunk
406
418
407 previdx = idx
419 previdx = idx
408
420
409 chunk = _trimchunk(revlog, revs, previdx)
421 chunk = _trimchunk(revlog, revs, previdx)
410 if chunk:
422 if chunk:
411 yield chunk
423 yield chunk
412
424
413 def _trimchunk(revlog, revs, startidx, endidx=None):
425 def _trimchunk(revlog, revs, startidx, endidx=None):
414 """returns revs[startidx:endidx] without empty trailing revs
426 """returns revs[startidx:endidx] without empty trailing revs
415
427
416 Doctest Setup
428 Doctest Setup
417 >>> revlog = _testrevlog([
429 >>> revlog = _testrevlog([
418 ... 5, #0
430 ... 5, #0
419 ... 10, #1
431 ... 10, #1
420 ... 12, #2
432 ... 12, #2
421 ... 12, #3 (empty)
433 ... 12, #3 (empty)
422 ... 17, #4
434 ... 17, #4
423 ... 21, #5
435 ... 21, #5
424 ... 21, #6 (empty)
436 ... 21, #6 (empty)
425 ... ])
437 ... ])
426
438
427 Contiguous cases:
439 Contiguous cases:
428 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
440 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
429 [0, 1, 2, 3, 4, 5]
441 [0, 1, 2, 3, 4, 5]
430 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
442 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
431 [0, 1, 2, 3, 4]
443 [0, 1, 2, 3, 4]
432 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
444 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
433 [0, 1, 2]
445 [0, 1, 2]
434 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
446 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
435 [2]
447 [2]
436 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
448 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
437 [3, 4, 5]
449 [3, 4, 5]
438 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
450 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
439 [3, 4]
451 [3, 4]
440
452
441 Discontiguous cases:
453 Discontiguous cases:
442 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
454 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
443 [1, 3, 5]
455 [1, 3, 5]
444 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
456 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
445 [1]
457 [1]
446 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
458 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
447 [3, 5]
459 [3, 5]
448 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
460 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
449 [3, 5]
461 [3, 5]
450 """
462 """
451 length = revlog.length
463 length = revlog.length
452
464
453 if endidx is None:
465 if endidx is None:
454 endidx = len(revs)
466 endidx = len(revs)
455
467
456 # If we have a non-emtpy delta candidate, there are nothing to trim
468 # If we have a non-emtpy delta candidate, there are nothing to trim
457 if revs[endidx - 1] < len(revlog):
469 if revs[endidx - 1] < len(revlog):
458 # Trim empty revs at the end, except the very first revision of a chain
470 # Trim empty revs at the end, except the very first revision of a chain
459 while (endidx > 1
471 while (endidx > 1
460 and endidx > startidx
472 and endidx > startidx
461 and length(revs[endidx - 1]) == 0):
473 and length(revs[endidx - 1]) == 0):
462 endidx -= 1
474 endidx -= 1
463
475
464 return revs[startidx:endidx]
476 return revs[startidx:endidx]
465
477
466 def segmentspan(revlog, revs):
478 def segmentspan(revlog, revs):
467 """Get the byte span of a segment of revisions
479 """Get the byte span of a segment of revisions
468
480
469 revs is a sorted array of revision numbers
481 revs is a sorted array of revision numbers
470
482
471 >>> revlog = _testrevlog([
483 >>> revlog = _testrevlog([
472 ... 5, #0
484 ... 5, #0
473 ... 10, #1
485 ... 10, #1
474 ... 12, #2
486 ... 12, #2
475 ... 12, #3 (empty)
487 ... 12, #3 (empty)
476 ... 17, #4
488 ... 17, #4
477 ... ])
489 ... ])
478
490
479 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
491 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
480 17
492 17
481 >>> segmentspan(revlog, [0, 4])
493 >>> segmentspan(revlog, [0, 4])
482 17
494 17
483 >>> segmentspan(revlog, [3, 4])
495 >>> segmentspan(revlog, [3, 4])
484 5
496 5
485 >>> segmentspan(revlog, [1, 2, 3,])
497 >>> segmentspan(revlog, [1, 2, 3,])
486 7
498 7
487 >>> segmentspan(revlog, [1, 3])
499 >>> segmentspan(revlog, [1, 3])
488 7
500 7
489 """
501 """
490 if not revs:
502 if not revs:
491 return 0
503 return 0
492 end = revlog.end(revs[-1])
504 end = revlog.end(revs[-1])
493 return end - revlog.start(revs[0])
505 return end - revlog.start(revs[0])
494
506
495 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
507 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
496 """build full text from a (base, delta) pair and other metadata"""
508 """build full text from a (base, delta) pair and other metadata"""
497 # special case deltas which replace entire base; no need to decode
509 # special case deltas which replace entire base; no need to decode
498 # base revision. this neatly avoids censored bases, which throw when
510 # base revision. this neatly avoids censored bases, which throw when
499 # they're decoded.
511 # they're decoded.
500 hlen = struct.calcsize(">lll")
512 hlen = struct.calcsize(">lll")
501 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
513 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
502 len(delta) - hlen):
514 len(delta) - hlen):
503 fulltext = delta[hlen:]
515 fulltext = delta[hlen:]
504 else:
516 else:
505 # deltabase is rawtext before changed by flag processors, which is
517 # deltabase is rawtext before changed by flag processors, which is
506 # equivalent to non-raw text
518 # equivalent to non-raw text
507 basetext = revlog.revision(baserev, _df=fh, raw=False)
519 basetext = revlog.revision(baserev, _df=fh, raw=False)
508 fulltext = mdiff.patch(basetext, delta)
520 fulltext = mdiff.patch(basetext, delta)
509
521
510 try:
522 try:
511 res = revlog._processflags(fulltext, flags, 'read', raw=True)
523 res = revlog._processflags(fulltext, flags, 'read', raw=True)
512 fulltext, validatehash = res
524 fulltext, validatehash = res
513 if validatehash:
525 if validatehash:
514 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
526 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
515 if flags & REVIDX_ISCENSORED:
527 if flags & REVIDX_ISCENSORED:
516 raise error.StorageError(_('node %s is not censored') %
528 raise error.StorageError(_('node %s is not censored') %
517 expectednode)
529 expectednode)
518 except error.CensoredNodeError:
530 except error.CensoredNodeError:
519 # must pass the censored index flag to add censored revisions
531 # must pass the censored index flag to add censored revisions
520 if not flags & REVIDX_ISCENSORED:
532 if not flags & REVIDX_ISCENSORED:
521 raise
533 raise
522 return fulltext
534 return fulltext
523
535
524 @attr.s(slots=True, frozen=True)
536 @attr.s(slots=True, frozen=True)
525 class _deltainfo(object):
537 class _deltainfo(object):
526 distance = attr.ib()
538 distance = attr.ib()
527 deltalen = attr.ib()
539 deltalen = attr.ib()
528 data = attr.ib()
540 data = attr.ib()
529 base = attr.ib()
541 base = attr.ib()
530 chainbase = attr.ib()
542 chainbase = attr.ib()
531 chainlen = attr.ib()
543 chainlen = attr.ib()
532 compresseddeltalen = attr.ib()
544 compresseddeltalen = attr.ib()
533 snapshotdepth = attr.ib()
545 snapshotdepth = attr.ib()
534
546
535 def isgooddeltainfo(revlog, deltainfo, revinfo):
547 def isgooddeltainfo(revlog, deltainfo, revinfo):
536 """Returns True if the given delta is good. Good means that it is within
548 """Returns True if the given delta is good. Good means that it is within
537 the disk span, disk size, and chain length bounds that we know to be
549 the disk span, disk size, and chain length bounds that we know to be
538 performant."""
550 performant."""
539 if deltainfo is None:
551 if deltainfo is None:
540 return False
552 return False
541
553
542 # - 'deltainfo.distance' is the distance from the base revision --
554 # - 'deltainfo.distance' is the distance from the base revision --
543 # bounding it limits the amount of I/O we need to do.
555 # bounding it limits the amount of I/O we need to do.
544 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
556 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
545 # deltas we need to apply -- bounding it limits the amount of CPU
557 # deltas we need to apply -- bounding it limits the amount of CPU
546 # we consume.
558 # we consume.
547
559
548 textlen = revinfo.textlen
560 textlen = revinfo.textlen
549 defaultmax = textlen * 4
561 defaultmax = textlen * 4
550 maxdist = revlog._maxdeltachainspan
562 maxdist = revlog._maxdeltachainspan
551 if not maxdist:
563 if not maxdist:
552 maxdist = deltainfo.distance # ensure the conditional pass
564 maxdist = deltainfo.distance # ensure the conditional pass
553 maxdist = max(maxdist, defaultmax)
565 maxdist = max(maxdist, defaultmax)
554
566
555 # Bad delta from read span:
567 # Bad delta from read span:
556 #
568 #
557 # If the span of data read is larger than the maximum allowed.
569 # If the span of data read is larger than the maximum allowed.
558 #
570 #
559 # In the sparse-revlog case, we rely on the associated "sparse reading"
571 # In the sparse-revlog case, we rely on the associated "sparse reading"
560 # to avoid issue related to the span of data. In theory, it would be
572 # to avoid issue related to the span of data. In theory, it would be
561 # possible to build pathological revlog where delta pattern would lead
573 # possible to build pathological revlog where delta pattern would lead
562 # to too many reads. However, they do not happen in practice at all. So
574 # to too many reads. However, they do not happen in practice at all. So
563 # we skip the span check entirely.
575 # we skip the span check entirely.
564 if not revlog._sparserevlog and maxdist < deltainfo.distance:
576 if not revlog._sparserevlog and maxdist < deltainfo.distance:
565 return False
577 return False
566
578
567 # Bad delta from new delta size:
579 # Bad delta from new delta size:
568 #
580 #
569 # If the delta size is larger than the target text, storing the
581 # If the delta size is larger than the target text, storing the
570 # delta will be inefficient.
582 # delta will be inefficient.
571 if textlen < deltainfo.deltalen:
583 if textlen < deltainfo.deltalen:
572 return False
584 return False
573
585
574 # Bad delta from cumulated payload size:
586 # Bad delta from cumulated payload size:
575 #
587 #
576 # If the sum of delta get larger than K * target text length.
588 # If the sum of delta get larger than K * target text length.
577 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
589 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
578 return False
590 return False
579
591
580 # Bad delta from chain length:
592 # Bad delta from chain length:
581 #
593 #
582 # If the number of delta in the chain gets too high.
594 # If the number of delta in the chain gets too high.
583 if (revlog._maxchainlen
595 if (revlog._maxchainlen
584 and revlog._maxchainlen < deltainfo.chainlen):
596 and revlog._maxchainlen < deltainfo.chainlen):
585 return False
597 return False
586
598
587 # bad delta from intermediate snapshot size limit
599 # bad delta from intermediate snapshot size limit
588 #
600 #
589 # If an intermediate snapshot size is higher than the limit. The
601 # If an intermediate snapshot size is higher than the limit. The
590 # limit exist to prevent endless chain of intermediate delta to be
602 # limit exist to prevent endless chain of intermediate delta to be
591 # created.
603 # created.
592 if (deltainfo.snapshotdepth is not None and
604 if (deltainfo.snapshotdepth is not None and
593 (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
605 (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
594 return False
606 return False
595
607
596 # bad delta if new intermediate snapshot is larger than the previous
608 # bad delta if new intermediate snapshot is larger than the previous
597 # snapshot
609 # snapshot
598 if (deltainfo.snapshotdepth
610 if (deltainfo.snapshotdepth
599 and revlog.length(deltainfo.base) < deltainfo.deltalen):
611 and revlog.length(deltainfo.base) < deltainfo.deltalen):
600 return False
612 return False
601
613
602 return True
614 return True
603
615
604 # If a revision's full text is that much bigger than a base candidate full
616 # If a revision's full text is that much bigger than a base candidate full
605 # text's, it is very unlikely that it will produce a valid delta. We no longer
617 # text's, it is very unlikely that it will produce a valid delta. We no longer
606 # consider these candidates.
618 # consider these candidates.
607 LIMIT_BASE2TEXT = 500
619 LIMIT_BASE2TEXT = 500
608
620
609 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
621 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
610 """Provides group of revision to be tested as delta base
622 """Provides group of revision to be tested as delta base
611
623
612 This top level function focus on emitting groups with unique and worthwhile
624 This top level function focus on emitting groups with unique and worthwhile
613 content. See _raw_candidate_groups for details about the group order.
625 content. See _raw_candidate_groups for details about the group order.
614 """
626 """
615 # should we try to build a delta?
627 # should we try to build a delta?
616 if not (len(revlog) and revlog._storedeltachains):
628 if not (len(revlog) and revlog._storedeltachains):
617 yield None
629 yield None
618 return
630 return
619
631
620 deltalength = revlog.length
632 deltalength = revlog.length
621 deltaparent = revlog.deltaparent
633 deltaparent = revlog.deltaparent
622 sparse = revlog._sparserevlog
634 sparse = revlog._sparserevlog
623 good = None
635 good = None
624
636
625 deltas_limit = textlen * LIMIT_DELTA2TEXT
637 deltas_limit = textlen * LIMIT_DELTA2TEXT
626
638
627 tested = set([nullrev])
639 tested = set([nullrev])
628 candidates = _refinedgroups(revlog, p1, p2, cachedelta)
640 candidates = _refinedgroups(revlog, p1, p2, cachedelta)
629 while True:
641 while True:
630 temptative = candidates.send(good)
642 temptative = candidates.send(good)
631 if temptative is None:
643 if temptative is None:
632 break
644 break
633 group = []
645 group = []
634 for rev in temptative:
646 for rev in temptative:
635 # skip over empty delta (no need to include them in a chain)
647 # skip over empty delta (no need to include them in a chain)
636 while (revlog._generaldelta
648 while (revlog._generaldelta
637 and not (rev == nullrev
649 and not (rev == nullrev
638 or rev in tested
650 or rev in tested
639 or deltalength(rev))):
651 or deltalength(rev))):
640 tested.add(rev)
652 tested.add(rev)
641 rev = deltaparent(rev)
653 rev = deltaparent(rev)
642 # no need to try a delta against nullrev, this will be done as a
654 # no need to try a delta against nullrev, this will be done as a
643 # last resort.
655 # last resort.
644 if rev == nullrev:
656 if rev == nullrev:
645 continue
657 continue
646 # filter out revision we tested already
658 # filter out revision we tested already
647 if rev in tested:
659 if rev in tested:
648 continue
660 continue
649 tested.add(rev)
661 tested.add(rev)
650 # filter out delta base that will never produce good delta
662 # filter out delta base that will never produce good delta
651 if deltas_limit < revlog.length(rev):
663 if deltas_limit < revlog.length(rev):
652 continue
664 continue
653 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
665 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
654 continue
666 continue
655 # no delta for rawtext-changing revs (see "candelta" for why)
667 # no delta for rawtext-changing revs (see "candelta" for why)
656 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
668 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
657 continue
669 continue
658 # If we reach here, we are about to build and test a delta.
670 # If we reach here, we are about to build and test a delta.
659 # The delta building process will compute the chaininfo in all
671 # The delta building process will compute the chaininfo in all
660 # case, since that computation is cached, it is fine to access it
672 # case, since that computation is cached, it is fine to access it
661 # here too.
673 # here too.
662 chainlen, chainsize = revlog._chaininfo(rev)
674 chainlen, chainsize = revlog._chaininfo(rev)
663 # if chain will be too long, skip base
675 # if chain will be too long, skip base
664 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
676 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
665 continue
677 continue
666 # if chain already have too much data, skip base
678 # if chain already have too much data, skip base
667 if deltas_limit < chainsize:
679 if deltas_limit < chainsize:
668 continue
680 continue
669 group.append(rev)
681 group.append(rev)
670 if group:
682 if group:
671 # XXX: in the sparse revlog case, group can become large,
683 # XXX: in the sparse revlog case, group can become large,
672 # impacting performances. Some bounding or slicing mecanism
684 # impacting performances. Some bounding or slicing mecanism
673 # would help to reduce this impact.
685 # would help to reduce this impact.
674 good = yield tuple(group)
686 good = yield tuple(group)
675 yield None
687 yield None
676
688
677 def _findsnapshots(revlog, cache, start_rev):
689 def _findsnapshots(revlog, cache, start_rev):
678 """find snapshot from start_rev to tip"""
690 """find snapshot from start_rev to tip"""
679 deltaparent = revlog.deltaparent
691 deltaparent = revlog.deltaparent
680 issnapshot = revlog.issnapshot
692 issnapshot = revlog.issnapshot
681 for rev in revlog.revs(start_rev):
693 for rev in revlog.revs(start_rev):
682 if issnapshot(rev):
694 if issnapshot(rev):
683 cache[deltaparent(rev)].append(rev)
695 cache[deltaparent(rev)].append(rev)
684
696
685 def _refinedgroups(revlog, p1, p2, cachedelta):
697 def _refinedgroups(revlog, p1, p2, cachedelta):
686 good = None
698 good = None
687 # First we try to reuse a the delta contained in the bundle.
699 # First we try to reuse a the delta contained in the bundle.
688 # (or from the source revlog)
700 # (or from the source revlog)
689 #
701 #
690 # This logic only applies to general delta repositories and can be disabled
702 # This logic only applies to general delta repositories and can be disabled
691 # through configuration. Disabling reuse source delta is useful when
703 # through configuration. Disabling reuse source delta is useful when
692 # we want to make sure we recomputed "optimal" deltas.
704 # we want to make sure we recomputed "optimal" deltas.
693 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
705 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
694 # Assume what we received from the server is a good choice
706 # Assume what we received from the server is a good choice
695 # build delta will reuse the cache
707 # build delta will reuse the cache
696 good = yield (cachedelta[0],)
708 good = yield (cachedelta[0],)
697 if good is not None:
709 if good is not None:
698 yield None
710 yield None
699 return
711 return
700 for candidates in _rawgroups(revlog, p1, p2, cachedelta):
712 for candidates in _rawgroups(revlog, p1, p2, cachedelta):
701 good = yield candidates
713 good = yield candidates
702 if good is not None:
714 if good is not None:
703 break
715 break
704
716
705 # If sparse revlog is enabled, we can try to refine the available deltas
717 # If sparse revlog is enabled, we can try to refine the available deltas
706 if not revlog._sparserevlog:
718 if not revlog._sparserevlog:
707 yield None
719 yield None
708 return
720 return
709
721
710 # if we have a refinable value, try to refine it
722 # if we have a refinable value, try to refine it
711 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
723 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
712 # refine snapshot down
724 # refine snapshot down
713 previous = None
725 previous = None
714 while previous != good:
726 while previous != good:
715 previous = good
727 previous = good
716 base = revlog.deltaparent(good)
728 base = revlog.deltaparent(good)
717 if base == nullrev:
729 if base == nullrev:
718 break
730 break
719 good = yield (base,)
731 good = yield (base,)
720 # refine snapshot up
732 # refine snapshot up
721 #
733 #
722 # XXX the _findsnapshots call can be expensive and is "duplicated" with
734 # XXX the _findsnapshots call can be expensive and is "duplicated" with
723 # the one done in `_rawgroups`. Once we start working on performance,
735 # the one done in `_rawgroups`. Once we start working on performance,
724 # we should make the two logics share this computation.
736 # we should make the two logics share this computation.
725 snapshots = collections.defaultdict(list)
737 snapshots = collections.defaultdict(list)
726 _findsnapshots(revlog, snapshots, good + 1)
738 _findsnapshots(revlog, snapshots, good + 1)
727 previous = None
739 previous = None
728 while good != previous:
740 while good != previous:
729 previous = good
741 previous = good
730 children = tuple(sorted(c for c in snapshots[good]))
742 children = tuple(sorted(c for c in snapshots[good]))
731 good = yield children
743 good = yield children
732
744
733 # we have found nothing
745 # we have found nothing
734 yield None
746 yield None
735
747
736 def _rawgroups(revlog, p1, p2, cachedelta):
748 def _rawgroups(revlog, p1, p2, cachedelta):
737 """Provides group of revision to be tested as delta base
749 """Provides group of revision to be tested as delta base
738
750
739 This lower level function focus on emitting delta theorically interresting
751 This lower level function focus on emitting delta theorically interresting
740 without looking it any practical details.
752 without looking it any practical details.
741
753
742 The group order aims at providing fast or small candidates first.
754 The group order aims at providing fast or small candidates first.
743 """
755 """
744 gdelta = revlog._generaldelta
756 gdelta = revlog._generaldelta
745 sparse = revlog._sparserevlog
757 sparse = revlog._sparserevlog
746 curr = len(revlog)
758 curr = len(revlog)
747 prev = curr - 1
759 prev = curr - 1
748 deltachain = lambda rev: revlog._deltachain(rev)[0]
760 deltachain = lambda rev: revlog._deltachain(rev)[0]
749
761
750 if gdelta:
762 if gdelta:
751 # exclude already lazy tested base if any
763 # exclude already lazy tested base if any
752 parents = [p for p in (p1, p2) if p != nullrev]
764 parents = [p for p in (p1, p2) if p != nullrev]
753
765
754 if not revlog._deltabothparents and len(parents) == 2:
766 if not revlog._deltabothparents and len(parents) == 2:
755 parents.sort()
767 parents.sort()
756 # To minimize the chance of having to build a fulltext,
768 # To minimize the chance of having to build a fulltext,
757 # pick first whichever parent is closest to us (max rev)
769 # pick first whichever parent is closest to us (max rev)
758 yield (parents[1],)
770 yield (parents[1],)
759 # then the other one (min rev) if the first did not fit
771 # then the other one (min rev) if the first did not fit
760 yield (parents[0],)
772 yield (parents[0],)
761 elif len(parents) > 0:
773 elif len(parents) > 0:
762 # Test all parents (1 or 2), and keep the best candidate
774 # Test all parents (1 or 2), and keep the best candidate
763 yield parents
775 yield parents
764
776
765 if sparse and parents:
777 if sparse and parents:
766 snapshots = collections.defaultdict(list) # map: base-rev: snapshot-rev
778 snapshots = collections.defaultdict(list) # map: base-rev: snapshot-rev
767 # See if we can use an existing snapshot in the parent chains to use as
779 # See if we can use an existing snapshot in the parent chains to use as
768 # a base for a new intermediate-snapshot
780 # a base for a new intermediate-snapshot
769 #
781 #
770 # search for snapshot in parents delta chain
782 # search for snapshot in parents delta chain
771 # map: snapshot-level: snapshot-rev
783 # map: snapshot-level: snapshot-rev
772 parents_snaps = collections.defaultdict(set)
784 parents_snaps = collections.defaultdict(set)
773 candidate_chains = [deltachain(p) for p in parents]
785 candidate_chains = [deltachain(p) for p in parents]
774 for chain in candidate_chains:
786 for chain in candidate_chains:
775 for idx, s in enumerate(chain):
787 for idx, s in enumerate(chain):
776 if not revlog.issnapshot(s):
788 if not revlog.issnapshot(s):
777 break
789 break
778 parents_snaps[idx].add(s)
790 parents_snaps[idx].add(s)
779 snapfloor = min(parents_snaps[0]) + 1
791 snapfloor = min(parents_snaps[0]) + 1
780 _findsnapshots(revlog, snapshots, snapfloor)
792 _findsnapshots(revlog, snapshots, snapfloor)
781 # search for the highest "unrelated" revision
793 # search for the highest "unrelated" revision
782 #
794 #
783 # Adding snapshots used by "unrelated" revision increase the odd we
795 # Adding snapshots used by "unrelated" revision increase the odd we
784 # reuse an independant, yet better snapshot chain.
796 # reuse an independant, yet better snapshot chain.
785 #
797 #
786 # XXX instead of building a set of revisions, we could lazily enumerate
798 # XXX instead of building a set of revisions, we could lazily enumerate
787 # over the chains. That would be more efficient, however we stick to
799 # over the chains. That would be more efficient, however we stick to
788 # simple code for now.
800 # simple code for now.
789 all_revs = set()
801 all_revs = set()
790 for chain in candidate_chains:
802 for chain in candidate_chains:
791 all_revs.update(chain)
803 all_revs.update(chain)
792 other = None
804 other = None
793 for r in revlog.revs(prev, snapfloor):
805 for r in revlog.revs(prev, snapfloor):
794 if r not in all_revs:
806 if r not in all_revs:
795 other = r
807 other = r
796 break
808 break
797 if other is not None:
809 if other is not None:
798 # To avoid unfair competition, we won't use unrelated intermediate
810 # To avoid unfair competition, we won't use unrelated intermediate
799 # snapshot that are deeper than the ones from the parent delta
811 # snapshot that are deeper than the ones from the parent delta
800 # chain.
812 # chain.
801 max_depth = max(parents_snaps.keys())
813 max_depth = max(parents_snaps.keys())
802 chain = deltachain(other)
814 chain = deltachain(other)
803 for idx, s in enumerate(chain):
815 for idx, s in enumerate(chain):
804 if s < snapfloor:
816 if s < snapfloor:
805 continue
817 continue
806 if max_depth < idx:
818 if max_depth < idx:
807 break
819 break
808 if not revlog.issnapshot(s):
820 if not revlog.issnapshot(s):
809 break
821 break
810 parents_snaps[idx].add(s)
822 parents_snaps[idx].add(s)
811 # Test them as possible intermediate snapshot base
823 # Test them as possible intermediate snapshot base
812 # We test them from highest to lowest level. High level one are more
824 # We test them from highest to lowest level. High level one are more
813 # likely to result in small delta
825 # likely to result in small delta
814 floor = None
826 floor = None
815 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
827 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
816 siblings = set()
828 siblings = set()
817 for s in snaps:
829 for s in snaps:
818 siblings.update(snapshots[s])
830 siblings.update(snapshots[s])
819 # Before considering making a new intermediate snapshot, we check
831 # Before considering making a new intermediate snapshot, we check
820 # if an existing snapshot, children of base we consider, would be
832 # if an existing snapshot, children of base we consider, would be
821 # suitable.
833 # suitable.
822 #
834 #
823 # It give a change to reuse a delta chain "unrelated" to the
835 # It give a change to reuse a delta chain "unrelated" to the
824 # current revision instead of starting our own. Without such
836 # current revision instead of starting our own. Without such
825 # re-use, topological branches would keep reopening new chains.
837 # re-use, topological branches would keep reopening new chains.
826 # Creating more and more snapshot as the repository grow.
838 # Creating more and more snapshot as the repository grow.
827
839
828 if floor is not None:
840 if floor is not None:
829 # We only do this for siblings created after the one in our
841 # We only do this for siblings created after the one in our
830 # parent's delta chain. Those created before has less chances
842 # parent's delta chain. Those created before has less chances
831 # to be valid base since our ancestors had to create a new
843 # to be valid base since our ancestors had to create a new
832 # snapshot.
844 # snapshot.
833 siblings = [r for r in siblings if floor < r]
845 siblings = [r for r in siblings if floor < r]
834 yield tuple(sorted(siblings))
846 yield tuple(sorted(siblings))
835 # then test the base from our parent's delta chain.
847 # then test the base from our parent's delta chain.
836 yield tuple(sorted(snaps))
848 yield tuple(sorted(snaps))
837 floor = min(snaps)
849 floor = min(snaps)
838 # No suitable base found in the parent chain, search if any full
850 # No suitable base found in the parent chain, search if any full
839 # snapshots emitted since parent's base would be a suitable base for an
851 # snapshots emitted since parent's base would be a suitable base for an
840 # intermediate snapshot.
852 # intermediate snapshot.
841 #
853 #
842 # It give a chance to reuse a delta chain unrelated to the current
854 # It give a chance to reuse a delta chain unrelated to the current
843 # revisions instead of starting our own. Without such re-use,
855 # revisions instead of starting our own. Without such re-use,
844 # topological branches would keep reopening new full chains. Creating
856 # topological branches would keep reopening new full chains. Creating
845 # more and more snapshot as the repository grow.
857 # more and more snapshot as the repository grow.
846 yield tuple(snapshots[nullrev])
858 yield tuple(snapshots[nullrev])
847
859
848 if not sparse:
860 if not sparse:
849 # other approach failed try against prev to hopefully save us a
861 # other approach failed try against prev to hopefully save us a
850 # fulltext.
862 # fulltext.
851 yield (prev,)
863 yield (prev,)
852
864
853 class deltacomputer(object):
865 class deltacomputer(object):
854 def __init__(self, revlog):
866 def __init__(self, revlog):
855 self.revlog = revlog
867 self.revlog = revlog
856
868
857 def buildtext(self, revinfo, fh):
869 def buildtext(self, revinfo, fh):
858 """Builds a fulltext version of a revision
870 """Builds a fulltext version of a revision
859
871
860 revinfo: _revisioninfo instance that contains all needed info
872 revinfo: _revisioninfo instance that contains all needed info
861 fh: file handle to either the .i or the .d revlog file,
873 fh: file handle to either the .i or the .d revlog file,
862 depending on whether it is inlined or not
874 depending on whether it is inlined or not
863 """
875 """
864 btext = revinfo.btext
876 btext = revinfo.btext
865 if btext[0] is not None:
877 if btext[0] is not None:
866 return btext[0]
878 return btext[0]
867
879
868 revlog = self.revlog
880 revlog = self.revlog
869 cachedelta = revinfo.cachedelta
881 cachedelta = revinfo.cachedelta
870 baserev = cachedelta[0]
882 baserev = cachedelta[0]
871 delta = cachedelta[1]
883 delta = cachedelta[1]
872
884
873 fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta,
885 fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta,
874 revinfo.p1, revinfo.p2,
886 revinfo.p1, revinfo.p2,
875 revinfo.flags, revinfo.node)
887 revinfo.flags, revinfo.node)
876 return fulltext
888 return fulltext
877
889
878 def _builddeltadiff(self, base, revinfo, fh):
890 def _builddeltadiff(self, base, revinfo, fh):
879 revlog = self.revlog
891 revlog = self.revlog
880 t = self.buildtext(revinfo, fh)
892 t = self.buildtext(revinfo, fh)
881 if revlog.iscensored(base):
893 if revlog.iscensored(base):
882 # deltas based on a censored revision must replace the
894 # deltas based on a censored revision must replace the
883 # full content in one patch, so delta works everywhere
895 # full content in one patch, so delta works everywhere
884 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
896 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
885 delta = header + t
897 delta = header + t
886 else:
898 else:
887 ptext = revlog.revision(base, _df=fh, raw=True)
899 ptext = revlog.revision(base, _df=fh, raw=True)
888 delta = mdiff.textdiff(ptext, t)
900 delta = mdiff.textdiff(ptext, t)
889
901
890 return delta
902 return delta
891
903
892 def _builddeltainfo(self, revinfo, base, fh):
904 def _builddeltainfo(self, revinfo, base, fh):
893 # can we use the cached delta?
905 # can we use the cached delta?
894 delta = None
906 delta = None
895 if revinfo.cachedelta:
907 if revinfo.cachedelta:
896 cachebase, cachediff = revinfo.cachedelta
908 cachebase, cachediff = revinfo.cachedelta
897 #check if the diff still apply
909 #check if the diff still apply
898 currentbase = cachebase
910 currentbase = cachebase
899 while (currentbase != nullrev
911 while (currentbase != nullrev
900 and currentbase != base
912 and currentbase != base
901 and self.revlog.length(currentbase) == 0):
913 and self.revlog.length(currentbase) == 0):
902 currentbase = self.revlog.deltaparent(currentbase)
914 currentbase = self.revlog.deltaparent(currentbase)
903 if currentbase == base:
915 if currentbase == base:
904 delta = revinfo.cachedelta[1]
916 delta = revinfo.cachedelta[1]
905 if delta is None:
917 if delta is None:
906 delta = self._builddeltadiff(base, revinfo, fh)
918 delta = self._builddeltadiff(base, revinfo, fh)
907 revlog = self.revlog
919 revlog = self.revlog
908 header, data = revlog.compress(delta)
920 header, data = revlog.compress(delta)
909 deltalen = len(header) + len(data)
921 deltalen = len(header) + len(data)
910 chainbase = revlog.chainbase(base)
922 chainbase = revlog.chainbase(base)
911 offset = revlog.end(len(revlog) - 1)
923 offset = revlog.end(len(revlog) - 1)
912 dist = deltalen + offset - revlog.start(chainbase)
924 dist = deltalen + offset - revlog.start(chainbase)
913 if revlog._generaldelta:
925 if revlog._generaldelta:
914 deltabase = base
926 deltabase = base
915 else:
927 else:
916 deltabase = chainbase
928 deltabase = chainbase
917 chainlen, compresseddeltalen = revlog._chaininfo(base)
929 chainlen, compresseddeltalen = revlog._chaininfo(base)
918 chainlen += 1
930 chainlen += 1
919 compresseddeltalen += deltalen
931 compresseddeltalen += deltalen
920
932
921 revlog = self.revlog
933 revlog = self.revlog
922 snapshotdepth = None
934 snapshotdepth = None
923 if deltabase == nullrev:
935 if deltabase == nullrev:
924 snapshotdepth = 0
936 snapshotdepth = 0
925 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
937 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
926 # A delta chain should always be one full snapshot,
938 # A delta chain should always be one full snapshot,
927 # zero or more semi-snapshots, and zero or more deltas
939 # zero or more semi-snapshots, and zero or more deltas
928 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
940 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
929 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
941 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
930 snapshotdepth = len(revlog._deltachain(deltabase)[0])
942 snapshotdepth = len(revlog._deltachain(deltabase)[0])
931
943
932 return _deltainfo(dist, deltalen, (header, data), deltabase,
944 return _deltainfo(dist, deltalen, (header, data), deltabase,
933 chainbase, chainlen, compresseddeltalen,
945 chainbase, chainlen, compresseddeltalen,
934 snapshotdepth)
946 snapshotdepth)
935
947
936 def _fullsnapshotinfo(self, fh, revinfo):
948 def _fullsnapshotinfo(self, fh, revinfo):
937 curr = len(self.revlog)
949 curr = len(self.revlog)
938 rawtext = self.buildtext(revinfo, fh)
950 rawtext = self.buildtext(revinfo, fh)
939 data = self.revlog.compress(rawtext)
951 data = self.revlog.compress(rawtext)
940 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
952 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
941 deltabase = chainbase = curr
953 deltabase = chainbase = curr
942 snapshotdepth = 0
954 snapshotdepth = 0
943 chainlen = 1
955 chainlen = 1
944
956
945 return _deltainfo(dist, deltalen, data, deltabase,
957 return _deltainfo(dist, deltalen, data, deltabase,
946 chainbase, chainlen, compresseddeltalen,
958 chainbase, chainlen, compresseddeltalen,
947 snapshotdepth)
959 snapshotdepth)
948
960
949 def finddeltainfo(self, revinfo, fh):
961 def finddeltainfo(self, revinfo, fh):
950 """Find an acceptable delta against a candidate revision
962 """Find an acceptable delta against a candidate revision
951
963
952 revinfo: information about the revision (instance of _revisioninfo)
964 revinfo: information about the revision (instance of _revisioninfo)
953 fh: file handle to either the .i or the .d revlog file,
965 fh: file handle to either the .i or the .d revlog file,
954 depending on whether it is inlined or not
966 depending on whether it is inlined or not
955
967
956 Returns the first acceptable candidate revision, as ordered by
968 Returns the first acceptable candidate revision, as ordered by
957 _candidategroups
969 _candidategroups
958
970
959 If no suitable deltabase is found, we return delta info for a full
971 If no suitable deltabase is found, we return delta info for a full
960 snapshot.
972 snapshot.
961 """
973 """
962 if not revinfo.textlen:
974 if not revinfo.textlen:
963 return self._fullsnapshotinfo(fh, revinfo)
975 return self._fullsnapshotinfo(fh, revinfo)
964
976
965 # no delta for flag processor revision (see "candelta" for why)
977 # no delta for flag processor revision (see "candelta" for why)
966 # not calling candelta since only one revision needs test, also to
978 # not calling candelta since only one revision needs test, also to
967 # avoid overhead fetching flags again.
979 # avoid overhead fetching flags again.
968 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
980 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
969 return self._fullsnapshotinfo(fh, revinfo)
981 return self._fullsnapshotinfo(fh, revinfo)
970
982
971 cachedelta = revinfo.cachedelta
983 cachedelta = revinfo.cachedelta
972 p1 = revinfo.p1
984 p1 = revinfo.p1
973 p2 = revinfo.p2
985 p2 = revinfo.p2
974 revlog = self.revlog
986 revlog = self.revlog
975
987
976 deltainfo = None
988 deltainfo = None
977 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
989 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
978 groups = _candidategroups(self.revlog, revinfo.textlen,
990 groups = _candidategroups(self.revlog, revinfo.textlen,
979 p1r, p2r, cachedelta)
991 p1r, p2r, cachedelta)
980 candidaterevs = next(groups)
992 candidaterevs = next(groups)
981 while candidaterevs is not None:
993 while candidaterevs is not None:
982 nominateddeltas = []
994 nominateddeltas = []
983 if deltainfo is not None:
995 if deltainfo is not None:
984 # if we already found a good delta,
996 # if we already found a good delta,
985 # challenge it against refined candidates
997 # challenge it against refined candidates
986 nominateddeltas.append(deltainfo)
998 nominateddeltas.append(deltainfo)
987 for candidaterev in candidaterevs:
999 for candidaterev in candidaterevs:
988 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1000 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
989 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1001 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
990 nominateddeltas.append(candidatedelta)
1002 nominateddeltas.append(candidatedelta)
991 if nominateddeltas:
1003 if nominateddeltas:
992 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1004 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
993 if deltainfo is not None:
1005 if deltainfo is not None:
994 candidaterevs = groups.send(deltainfo.base)
1006 candidaterevs = groups.send(deltainfo.base)
995 else:
1007 else:
996 candidaterevs = next(groups)
1008 candidaterevs = next(groups)
997
1009
998 if deltainfo is None:
1010 if deltainfo is None:
999 deltainfo = self._fullsnapshotinfo(fh, revinfo)
1011 deltainfo = self._fullsnapshotinfo(fh, revinfo)
1000 return deltainfo
1012 return deltainfo
General Comments 0
You need to be logged in to leave comments. Login now