##// END OF EJS Templates
changelog-delay: move the appender class next to randomaccessfile...
marmoute -
r51996:222b8922 default
parent child Browse files
Show More
@@ -1,644 +1,575 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 from .i18n import _
9 from .i18n import _
10 from .node import (
10 from .node import (
11 bin,
11 bin,
12 hex,
12 hex,
13 )
13 )
14 from .thirdparty import attr
14 from .thirdparty import attr
15
15
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 metadata,
19 metadata,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 )
22 )
23 from .utils import (
23 from .utils import (
24 dateutil,
24 dateutil,
25 stringutil,
25 stringutil,
26 )
26 )
27 from .revlogutils import (
27 from .revlogutils import (
28 constants as revlog_constants,
28 constants as revlog_constants,
29 flagutil,
29 flagutil,
30 randomaccessfile,
30 )
31 )
31
32
32 _defaultextra = {b'branch': b'default'}
33 _defaultextra = {b'branch': b'default'}
33
34
34
35
35 def _string_escape(text):
36 def _string_escape(text):
36 """
37 """
37 >>> from .pycompat import bytechr as chr
38 >>> from .pycompat import bytechr as chr
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s
41 >>> s
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 >>> res = _string_escape(s)
43 >>> res = _string_escape(s)
43 >>> s == _string_unescape(res)
44 >>> s == _string_unescape(res)
44 True
45 True
45 """
46 """
46 # subset of the string_escape codec
47 # subset of the string_escape codec
47 text = (
48 text = (
48 text.replace(b'\\', b'\\\\')
49 text.replace(b'\\', b'\\\\')
49 .replace(b'\n', b'\\n')
50 .replace(b'\n', b'\\n')
50 .replace(b'\r', b'\\r')
51 .replace(b'\r', b'\\r')
51 )
52 )
52 return text.replace(b'\0', b'\\0')
53 return text.replace(b'\0', b'\\0')
53
54
54
55
55 def _string_unescape(text):
56 def _string_unescape(text):
56 if b'\\0' in text:
57 if b'\\0' in text:
57 # fix up \0 without getting into trouble with \\0
58 # fix up \0 without getting into trouble with \\0
58 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\n', b'')
61 text = text.replace(b'\n', b'')
61 return stringutil.unescapestr(text)
62 return stringutil.unescapestr(text)
62
63
63
64
64 def decodeextra(text):
65 def decodeextra(text):
65 """
66 """
66 >>> from .pycompat import bytechr as chr
67 >>> from .pycompat import bytechr as chr
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 ... ).items())
69 ... ).items())
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... ).items())
73 ... ).items())
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 """
75 """
75 extra = _defaultextra.copy()
76 extra = _defaultextra.copy()
76 for l in text.split(b'\0'):
77 for l in text.split(b'\0'):
77 if l:
78 if l:
78 k, v = _string_unescape(l).split(b':', 1)
79 k, v = _string_unescape(l).split(b':', 1)
79 extra[k] = v
80 extra[k] = v
80 return extra
81 return extra
81
82
82
83
83 def encodeextra(d):
84 def encodeextra(d):
84 # keys must be sorted to produce a deterministic changelog entry
85 # keys must be sorted to produce a deterministic changelog entry
85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 return b"\0".join(items)
87 return b"\0".join(items)
87
88
88
89
89 def stripdesc(desc):
90 def stripdesc(desc):
90 """strip trailing whitespace and leading and trailing empty lines"""
91 """strip trailing whitespace and leading and trailing empty lines"""
91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92
93
93
94
94 class appender:
95 """the changelog index must be updated last on disk, so we use this class
96 to delay writes to it"""
97
98 def __init__(self, vfs, name, mode, buf):
99 self.data = buf
100 fp = vfs(name, mode)
101 self.fp = fp
102 self.offset = fp.tell()
103 self.size = vfs.fstat(fp).st_size
104 self._end = self.size
105
106 def end(self):
107 return self._end
108
109 def tell(self):
110 return self.offset
111
112 def flush(self):
113 pass
114
115 @property
116 def closed(self):
117 return self.fp.closed
118
119 def close(self):
120 self.fp.close()
121
122 def seek(self, offset, whence=0):
123 '''virtual file offset spans real file and data'''
124 if whence == 0:
125 self.offset = offset
126 elif whence == 1:
127 self.offset += offset
128 elif whence == 2:
129 self.offset = self.end() + offset
130 if self.offset < self.size:
131 self.fp.seek(self.offset)
132
133 def read(self, count=-1):
134 '''only trick here is reads that span real file and data'''
135 ret = b""
136 if self.offset < self.size:
137 s = self.fp.read(count)
138 ret = s
139 self.offset += len(s)
140 if count > 0:
141 count -= len(s)
142 if count != 0:
143 doff = self.offset - self.size
144 self.data.insert(0, b"".join(self.data))
145 del self.data[1:]
146 s = self.data[0][doff : doff + count]
147 self.offset += len(s)
148 ret += s
149 return ret
150
151 def write(self, s):
152 self.data.append(bytes(s))
153 self.offset += len(s)
154 self._end += len(s)
155
156 def __enter__(self):
157 self.fp.__enter__()
158 return self
159
160 def __exit__(self, *args):
161 return self.fp.__exit__(*args)
162
163
164 class _divertopener:
95 class _divertopener:
165 def __init__(self, opener, target):
96 def __init__(self, opener, target):
166 self._opener = opener
97 self._opener = opener
167 self._target = target
98 self._target = target
168
99
169 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
100 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 if name != self._target:
101 if name != self._target:
171 return self._opener(name, mode, **kwargs)
102 return self._opener(name, mode, **kwargs)
172 return self._opener(name + b".a", mode, **kwargs)
103 return self._opener(name + b".a", mode, **kwargs)
173
104
174 def __getattr__(self, attr):
105 def __getattr__(self, attr):
175 return getattr(self._opener, attr)
106 return getattr(self._opener, attr)
176
107
177
108
178 class _delayopener:
109 class _delayopener:
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
110 """build an opener that stores chunks in 'buf' instead of 'target'"""
180
111
181 def __init__(self, opener, target, buf):
112 def __init__(self, opener, target, buf):
182 self._opener = opener
113 self._opener = opener
183 self._target = target
114 self._target = target
184 self._buf = buf
115 self._buf = buf
185
116
186 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
117 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
187 if name != self._target:
118 if name != self._target:
188 return self._opener(name, mode, **kwargs)
119 return self._opener(name, mode, **kwargs)
189 assert not kwargs
120 assert not kwargs
190 return appender(self._opener, name, mode, self._buf)
121 return randomaccessfile.appender(self._opener, name, mode, self._buf)
191
122
192 def __getattr__(self, attr):
123 def __getattr__(self, attr):
193 return getattr(self._opener, attr)
124 return getattr(self._opener, attr)
194
125
195
126
196 @attr.s
127 @attr.s
197 class _changelogrevision:
128 class _changelogrevision:
198 # Extensions might modify _defaultextra, so let the constructor below pass
129 # Extensions might modify _defaultextra, so let the constructor below pass
199 # it in
130 # it in
200 extra = attr.ib()
131 extra = attr.ib()
201 manifest = attr.ib()
132 manifest = attr.ib()
202 user = attr.ib(default=b'')
133 user = attr.ib(default=b'')
203 date = attr.ib(default=(0, 0))
134 date = attr.ib(default=(0, 0))
204 files = attr.ib(default=attr.Factory(list))
135 files = attr.ib(default=attr.Factory(list))
205 filesadded = attr.ib(default=None)
136 filesadded = attr.ib(default=None)
206 filesremoved = attr.ib(default=None)
137 filesremoved = attr.ib(default=None)
207 p1copies = attr.ib(default=None)
138 p1copies = attr.ib(default=None)
208 p2copies = attr.ib(default=None)
139 p2copies = attr.ib(default=None)
209 description = attr.ib(default=b'')
140 description = attr.ib(default=b'')
210 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
141 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
211
142
212
143
213 class changelogrevision:
144 class changelogrevision:
214 """Holds results of a parsed changelog revision.
145 """Holds results of a parsed changelog revision.
215
146
216 Changelog revisions consist of multiple pieces of data, including
147 Changelog revisions consist of multiple pieces of data, including
217 the manifest node, user, and date. This object exposes a view into
148 the manifest node, user, and date. This object exposes a view into
218 the parsed object.
149 the parsed object.
219 """
150 """
220
151
221 __slots__ = (
152 __slots__ = (
222 '_offsets',
153 '_offsets',
223 '_text',
154 '_text',
224 '_sidedata',
155 '_sidedata',
225 '_cpsd',
156 '_cpsd',
226 '_changes',
157 '_changes',
227 )
158 )
228
159
229 def __new__(cls, cl, text, sidedata, cpsd):
160 def __new__(cls, cl, text, sidedata, cpsd):
230 if not text:
161 if not text:
231 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
162 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
232
163
233 self = super(changelogrevision, cls).__new__(cls)
164 self = super(changelogrevision, cls).__new__(cls)
234 # We could return here and implement the following as an __init__.
165 # We could return here and implement the following as an __init__.
235 # But doing it here is equivalent and saves an extra function call.
166 # But doing it here is equivalent and saves an extra function call.
236
167
237 # format used:
168 # format used:
238 # nodeid\n : manifest node in ascii
169 # nodeid\n : manifest node in ascii
239 # user\n : user, no \n or \r allowed
170 # user\n : user, no \n or \r allowed
240 # time tz extra\n : date (time is int or float, timezone is int)
171 # time tz extra\n : date (time is int or float, timezone is int)
241 # : extra is metadata, encoded and separated by '\0'
172 # : extra is metadata, encoded and separated by '\0'
242 # : older versions ignore it
173 # : older versions ignore it
243 # files\n\n : files modified by the cset, no \n or \r allowed
174 # files\n\n : files modified by the cset, no \n or \r allowed
244 # (.*) : comment (free text, ideally utf-8)
175 # (.*) : comment (free text, ideally utf-8)
245 #
176 #
246 # changelog v0 doesn't use extra
177 # changelog v0 doesn't use extra
247
178
248 nl1 = text.index(b'\n')
179 nl1 = text.index(b'\n')
249 nl2 = text.index(b'\n', nl1 + 1)
180 nl2 = text.index(b'\n', nl1 + 1)
250 nl3 = text.index(b'\n', nl2 + 1)
181 nl3 = text.index(b'\n', nl2 + 1)
251
182
252 # The list of files may be empty. Which means nl3 is the first of the
183 # The list of files may be empty. Which means nl3 is the first of the
253 # double newline that precedes the description.
184 # double newline that precedes the description.
254 if text[nl3 + 1 : nl3 + 2] == b'\n':
185 if text[nl3 + 1 : nl3 + 2] == b'\n':
255 doublenl = nl3
186 doublenl = nl3
256 else:
187 else:
257 doublenl = text.index(b'\n\n', nl3 + 1)
188 doublenl = text.index(b'\n\n', nl3 + 1)
258
189
259 self._offsets = (nl1, nl2, nl3, doublenl)
190 self._offsets = (nl1, nl2, nl3, doublenl)
260 self._text = text
191 self._text = text
261 self._sidedata = sidedata
192 self._sidedata = sidedata
262 self._cpsd = cpsd
193 self._cpsd = cpsd
263 self._changes = None
194 self._changes = None
264
195
265 return self
196 return self
266
197
267 @property
198 @property
268 def manifest(self):
199 def manifest(self):
269 return bin(self._text[0 : self._offsets[0]])
200 return bin(self._text[0 : self._offsets[0]])
270
201
271 @property
202 @property
272 def user(self):
203 def user(self):
273 off = self._offsets
204 off = self._offsets
274 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
205 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
275
206
276 @property
207 @property
277 def _rawdate(self):
208 def _rawdate(self):
278 off = self._offsets
209 off = self._offsets
279 dateextra = self._text[off[1] + 1 : off[2]]
210 dateextra = self._text[off[1] + 1 : off[2]]
280 return dateextra.split(b' ', 2)[0:2]
211 return dateextra.split(b' ', 2)[0:2]
281
212
282 @property
213 @property
283 def _rawextra(self):
214 def _rawextra(self):
284 off = self._offsets
215 off = self._offsets
285 dateextra = self._text[off[1] + 1 : off[2]]
216 dateextra = self._text[off[1] + 1 : off[2]]
286 fields = dateextra.split(b' ', 2)
217 fields = dateextra.split(b' ', 2)
287 if len(fields) != 3:
218 if len(fields) != 3:
288 return None
219 return None
289
220
290 return fields[2]
221 return fields[2]
291
222
292 @property
223 @property
293 def date(self):
224 def date(self):
294 raw = self._rawdate
225 raw = self._rawdate
295 time = float(raw[0])
226 time = float(raw[0])
296 # Various tools did silly things with the timezone.
227 # Various tools did silly things with the timezone.
297 try:
228 try:
298 timezone = int(raw[1])
229 timezone = int(raw[1])
299 except ValueError:
230 except ValueError:
300 timezone = 0
231 timezone = 0
301
232
302 return time, timezone
233 return time, timezone
303
234
304 @property
235 @property
305 def extra(self):
236 def extra(self):
306 raw = self._rawextra
237 raw = self._rawextra
307 if raw is None:
238 if raw is None:
308 return _defaultextra
239 return _defaultextra
309
240
310 return decodeextra(raw)
241 return decodeextra(raw)
311
242
312 @property
243 @property
313 def changes(self):
244 def changes(self):
314 if self._changes is not None:
245 if self._changes is not None:
315 return self._changes
246 return self._changes
316 if self._cpsd:
247 if self._cpsd:
317 changes = metadata.decode_files_sidedata(self._sidedata)
248 changes = metadata.decode_files_sidedata(self._sidedata)
318 else:
249 else:
319 changes = metadata.ChangingFiles(
250 changes = metadata.ChangingFiles(
320 touched=self.files or (),
251 touched=self.files or (),
321 added=self.filesadded or (),
252 added=self.filesadded or (),
322 removed=self.filesremoved or (),
253 removed=self.filesremoved or (),
323 p1_copies=self.p1copies or {},
254 p1_copies=self.p1copies or {},
324 p2_copies=self.p2copies or {},
255 p2_copies=self.p2copies or {},
325 )
256 )
326 self._changes = changes
257 self._changes = changes
327 return changes
258 return changes
328
259
329 @property
260 @property
330 def files(self):
261 def files(self):
331 if self._cpsd:
262 if self._cpsd:
332 return sorted(self.changes.touched)
263 return sorted(self.changes.touched)
333 off = self._offsets
264 off = self._offsets
334 if off[2] == off[3]:
265 if off[2] == off[3]:
335 return []
266 return []
336
267
337 return self._text[off[2] + 1 : off[3]].split(b'\n')
268 return self._text[off[2] + 1 : off[3]].split(b'\n')
338
269
339 @property
270 @property
340 def filesadded(self):
271 def filesadded(self):
341 if self._cpsd:
272 if self._cpsd:
342 return self.changes.added
273 return self.changes.added
343 else:
274 else:
344 rawindices = self.extra.get(b'filesadded')
275 rawindices = self.extra.get(b'filesadded')
345 if rawindices is None:
276 if rawindices is None:
346 return None
277 return None
347 return metadata.decodefileindices(self.files, rawindices)
278 return metadata.decodefileindices(self.files, rawindices)
348
279
349 @property
280 @property
350 def filesremoved(self):
281 def filesremoved(self):
351 if self._cpsd:
282 if self._cpsd:
352 return self.changes.removed
283 return self.changes.removed
353 else:
284 else:
354 rawindices = self.extra.get(b'filesremoved')
285 rawindices = self.extra.get(b'filesremoved')
355 if rawindices is None:
286 if rawindices is None:
356 return None
287 return None
357 return metadata.decodefileindices(self.files, rawindices)
288 return metadata.decodefileindices(self.files, rawindices)
358
289
359 @property
290 @property
360 def p1copies(self):
291 def p1copies(self):
361 if self._cpsd:
292 if self._cpsd:
362 return self.changes.copied_from_p1
293 return self.changes.copied_from_p1
363 else:
294 else:
364 rawcopies = self.extra.get(b'p1copies')
295 rawcopies = self.extra.get(b'p1copies')
365 if rawcopies is None:
296 if rawcopies is None:
366 return None
297 return None
367 return metadata.decodecopies(self.files, rawcopies)
298 return metadata.decodecopies(self.files, rawcopies)
368
299
369 @property
300 @property
370 def p2copies(self):
301 def p2copies(self):
371 if self._cpsd:
302 if self._cpsd:
372 return self.changes.copied_from_p2
303 return self.changes.copied_from_p2
373 else:
304 else:
374 rawcopies = self.extra.get(b'p2copies')
305 rawcopies = self.extra.get(b'p2copies')
375 if rawcopies is None:
306 if rawcopies is None:
376 return None
307 return None
377 return metadata.decodecopies(self.files, rawcopies)
308 return metadata.decodecopies(self.files, rawcopies)
378
309
379 @property
310 @property
380 def description(self):
311 def description(self):
381 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
312 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
382
313
383 @property
314 @property
384 def branchinfo(self):
315 def branchinfo(self):
385 extra = self.extra
316 extra = self.extra
386 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
317 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
387
318
388
319
389 class changelog(revlog.revlog):
320 class changelog(revlog.revlog):
390 def __init__(self, opener, trypending=False, concurrencychecker=None):
321 def __init__(self, opener, trypending=False, concurrencychecker=None):
391 """Load a changelog revlog using an opener.
322 """Load a changelog revlog using an opener.
392
323
393 If ``trypending`` is true, we attempt to load the index from a
324 If ``trypending`` is true, we attempt to load the index from a
394 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
325 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
395 The ``00changelog.i.a`` file contains index (and possibly inline
326 The ``00changelog.i.a`` file contains index (and possibly inline
396 revision) data for a transaction that hasn't been finalized yet.
327 revision) data for a transaction that hasn't been finalized yet.
397 It exists in a separate file to facilitate readers (such as
328 It exists in a separate file to facilitate readers (such as
398 hooks processes) accessing data before a transaction is finalized.
329 hooks processes) accessing data before a transaction is finalized.
399
330
400 ``concurrencychecker`` will be passed to the revlog init function, see
331 ``concurrencychecker`` will be passed to the revlog init function, see
401 the documentation there.
332 the documentation there.
402 """
333 """
403 revlog.revlog.__init__(
334 revlog.revlog.__init__(
404 self,
335 self,
405 opener,
336 opener,
406 target=(revlog_constants.KIND_CHANGELOG, None),
337 target=(revlog_constants.KIND_CHANGELOG, None),
407 radix=b'00changelog',
338 radix=b'00changelog',
408 checkambig=True,
339 checkambig=True,
409 mmaplargeindex=True,
340 mmaplargeindex=True,
410 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
341 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
411 concurrencychecker=concurrencychecker,
342 concurrencychecker=concurrencychecker,
412 trypending=trypending,
343 trypending=trypending,
413 )
344 )
414
345
415 if self._initempty and (self._format_version == revlog.REVLOGV1):
346 if self._initempty and (self._format_version == revlog.REVLOGV1):
416 # changelogs don't benefit from generaldelta.
347 # changelogs don't benefit from generaldelta.
417
348
418 self._format_flags &= ~revlog.FLAG_GENERALDELTA
349 self._format_flags &= ~revlog.FLAG_GENERALDELTA
419 self.delta_config.general_delta = False
350 self.delta_config.general_delta = False
420
351
421 # Delta chains for changelogs tend to be very small because entries
352 # Delta chains for changelogs tend to be very small because entries
422 # tend to be small and don't delta well with each. So disable delta
353 # tend to be small and don't delta well with each. So disable delta
423 # chains.
354 # chains.
424 self._storedeltachains = False
355 self._storedeltachains = False
425
356
426 self._realopener = opener
357 self._realopener = opener
427 self._delayed = False
358 self._delayed = False
428 self._delaybuf = None
359 self._delaybuf = None
429 self._divert = False
360 self._divert = False
430 self._filteredrevs = frozenset()
361 self._filteredrevs = frozenset()
431 self._filteredrevs_hashcache = {}
362 self._filteredrevs_hashcache = {}
432 self._copiesstorage = opener.options.get(b'copies-storage')
363 self._copiesstorage = opener.options.get(b'copies-storage')
433
364
434 @property
365 @property
435 def filteredrevs(self):
366 def filteredrevs(self):
436 return self._filteredrevs
367 return self._filteredrevs
437
368
438 @filteredrevs.setter
369 @filteredrevs.setter
439 def filteredrevs(self, val):
370 def filteredrevs(self, val):
440 # Ensure all updates go through this function
371 # Ensure all updates go through this function
441 assert isinstance(val, frozenset)
372 assert isinstance(val, frozenset)
442 self._filteredrevs = val
373 self._filteredrevs = val
443 self._filteredrevs_hashcache = {}
374 self._filteredrevs_hashcache = {}
444
375
445 def _write_docket(self, tr):
376 def _write_docket(self, tr):
446 if not self._delayed:
377 if not self._delayed:
447 super(changelog, self)._write_docket(tr)
378 super(changelog, self)._write_docket(tr)
448
379
449 def delayupdate(self, tr):
380 def delayupdate(self, tr):
450 """delay visibility of index updates to other readers"""
381 """delay visibility of index updates to other readers"""
451 assert not self._inner.is_open
382 assert not self._inner.is_open
452 if self._docket is None and not self._delayed:
383 if self._docket is None and not self._delayed:
453 if len(self) == 0:
384 if len(self) == 0:
454 self._divert = True
385 self._divert = True
455 if self._realopener.exists(self._indexfile + b'.a'):
386 if self._realopener.exists(self._indexfile + b'.a'):
456 self._realopener.unlink(self._indexfile + b'.a')
387 self._realopener.unlink(self._indexfile + b'.a')
457 self.opener = _divertopener(self._realopener, self._indexfile)
388 self.opener = _divertopener(self._realopener, self._indexfile)
458 else:
389 else:
459 self._delaybuf = []
390 self._delaybuf = []
460 self.opener = _delayopener(
391 self.opener = _delayopener(
461 self._realopener, self._indexfile, self._delaybuf
392 self._realopener, self._indexfile, self._delaybuf
462 )
393 )
463 self._inner.opener = self.opener
394 self._inner.opener = self.opener
464 self._inner._segmentfile.opener = self.opener
395 self._inner._segmentfile.opener = self.opener
465 self._inner._segmentfile_sidedata.opener = self.opener
396 self._inner._segmentfile_sidedata.opener = self.opener
466 self._delayed = True
397 self._delayed = True
467 tr.addpending(b'cl-%i' % id(self), self._writepending)
398 tr.addpending(b'cl-%i' % id(self), self._writepending)
468 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
399 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
469
400
470 def _finalize(self, tr):
401 def _finalize(self, tr):
471 """finalize index updates"""
402 """finalize index updates"""
472 assert not self._inner.is_open
403 assert not self._inner.is_open
473 self._delayed = False
404 self._delayed = False
474 self.opener = self._realopener
405 self.opener = self._realopener
475 self._inner.opener = self.opener
406 self._inner.opener = self.opener
476 self._inner._segmentfile.opener = self.opener
407 self._inner._segmentfile.opener = self.opener
477 self._inner._segmentfile_sidedata.opener = self.opener
408 self._inner._segmentfile_sidedata.opener = self.opener
478 # move redirected index data back into place
409 # move redirected index data back into place
479 if self._docket is not None:
410 if self._docket is not None:
480 self._write_docket(tr)
411 self._write_docket(tr)
481 elif self._divert:
412 elif self._divert:
482 assert not self._delaybuf
413 assert not self._delaybuf
483 tmpname = self._indexfile + b".a"
414 tmpname = self._indexfile + b".a"
484 nfile = self.opener.open(tmpname)
415 nfile = self.opener.open(tmpname)
485 nfile.close()
416 nfile.close()
486 self.opener.rename(tmpname, self._indexfile, checkambig=True)
417 self.opener.rename(tmpname, self._indexfile, checkambig=True)
487 elif self._delaybuf:
418 elif self._delaybuf:
488 fp = self.opener(self._indexfile, b'a', checkambig=True)
419 fp = self.opener(self._indexfile, b'a', checkambig=True)
489 fp.write(b"".join(self._delaybuf))
420 fp.write(b"".join(self._delaybuf))
490 fp.close()
421 fp.close()
491 self._delaybuf = None
422 self._delaybuf = None
492 self._divert = False
423 self._divert = False
493 # split when we're done
424 # split when we're done
494 self._enforceinlinesize(tr, side_write=False)
425 self._enforceinlinesize(tr, side_write=False)
495
426
496 def _writepending(self, tr):
427 def _writepending(self, tr):
497 """create a file containing the unfinalized state for
428 """create a file containing the unfinalized state for
498 pretxnchangegroup"""
429 pretxnchangegroup"""
499 assert not self._inner.is_open
430 assert not self._inner.is_open
500 if self._docket:
431 if self._docket:
501 return self._docket.write(tr, pending=True)
432 return self._docket.write(tr, pending=True)
502 if self._delaybuf:
433 if self._delaybuf:
503 # make a temporary copy of the index
434 # make a temporary copy of the index
504 fp1 = self._realopener(self._indexfile)
435 fp1 = self._realopener(self._indexfile)
505 pendingfilename = self._indexfile + b".a"
436 pendingfilename = self._indexfile + b".a"
506 # register as a temp file to ensure cleanup on failure
437 # register as a temp file to ensure cleanup on failure
507 tr.registertmp(pendingfilename)
438 tr.registertmp(pendingfilename)
508 # write existing data
439 # write existing data
509 fp2 = self._realopener(pendingfilename, b"w")
440 fp2 = self._realopener(pendingfilename, b"w")
510 fp2.write(fp1.read())
441 fp2.write(fp1.read())
511 # add pending data
442 # add pending data
512 fp2.write(b"".join(self._delaybuf))
443 fp2.write(b"".join(self._delaybuf))
513 fp2.close()
444 fp2.close()
514 # switch modes so finalize can simply rename
445 # switch modes so finalize can simply rename
515 self._delaybuf = None
446 self._delaybuf = None
516 self._divert = True
447 self._divert = True
517 self.opener = _divertopener(self._realopener, self._indexfile)
448 self.opener = _divertopener(self._realopener, self._indexfile)
518 self._inner.opener = self.opener
449 self._inner.opener = self.opener
519 self._inner._segmentfile.opener = self.opener
450 self._inner._segmentfile.opener = self.opener
520 self._inner._segmentfile_sidedata.opener = self.opener
451 self._inner._segmentfile_sidedata.opener = self.opener
521
452
522 if self._divert:
453 if self._divert:
523 return True
454 return True
524
455
525 return False
456 return False
526
457
527 def _enforceinlinesize(self, tr, side_write=True):
458 def _enforceinlinesize(self, tr, side_write=True):
528 if not self._delayed:
459 if not self._delayed:
529 revlog.revlog._enforceinlinesize(self, tr, side_write=side_write)
460 revlog.revlog._enforceinlinesize(self, tr, side_write=side_write)
530
461
531 def read(self, nodeorrev):
462 def read(self, nodeorrev):
532 """Obtain data from a parsed changelog revision.
463 """Obtain data from a parsed changelog revision.
533
464
534 Returns a 6-tuple of:
465 Returns a 6-tuple of:
535
466
536 - manifest node in binary
467 - manifest node in binary
537 - author/user as a localstr
468 - author/user as a localstr
538 - date as a 2-tuple of (time, timezone)
469 - date as a 2-tuple of (time, timezone)
539 - list of files
470 - list of files
540 - commit message as a localstr
471 - commit message as a localstr
541 - dict of extra metadata
472 - dict of extra metadata
542
473
543 Unless you need to access all fields, consider calling
474 Unless you need to access all fields, consider calling
544 ``changelogrevision`` instead, as it is faster for partial object
475 ``changelogrevision`` instead, as it is faster for partial object
545 access.
476 access.
546 """
477 """
547 d = self._revisiondata(nodeorrev)
478 d = self._revisiondata(nodeorrev)
548 sidedata = self.sidedata(nodeorrev)
479 sidedata = self.sidedata(nodeorrev)
549 copy_sd = self._copiesstorage == b'changeset-sidedata'
480 copy_sd = self._copiesstorage == b'changeset-sidedata'
550 c = changelogrevision(self, d, sidedata, copy_sd)
481 c = changelogrevision(self, d, sidedata, copy_sd)
551 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
482 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
552
483
553 def changelogrevision(self, nodeorrev):
484 def changelogrevision(self, nodeorrev):
554 """Obtain a ``changelogrevision`` for a node or revision."""
485 """Obtain a ``changelogrevision`` for a node or revision."""
555 text = self._revisiondata(nodeorrev)
486 text = self._revisiondata(nodeorrev)
556 sidedata = self.sidedata(nodeorrev)
487 sidedata = self.sidedata(nodeorrev)
557 return changelogrevision(
488 return changelogrevision(
558 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
489 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
559 )
490 )
560
491
561 def readfiles(self, nodeorrev):
492 def readfiles(self, nodeorrev):
562 """
493 """
563 short version of read that only returns the files modified by the cset
494 short version of read that only returns the files modified by the cset
564 """
495 """
565 text = self.revision(nodeorrev)
496 text = self.revision(nodeorrev)
566 if not text:
497 if not text:
567 return []
498 return []
568 last = text.index(b"\n\n")
499 last = text.index(b"\n\n")
569 l = text[:last].split(b'\n')
500 l = text[:last].split(b'\n')
570 return l[3:]
501 return l[3:]
571
502
572 def add(
503 def add(
573 self,
504 self,
574 manifest,
505 manifest,
575 files,
506 files,
576 desc,
507 desc,
577 transaction,
508 transaction,
578 p1,
509 p1,
579 p2,
510 p2,
580 user,
511 user,
581 date=None,
512 date=None,
582 extra=None,
513 extra=None,
583 ):
514 ):
584 # Convert to UTF-8 encoded bytestrings as the very first
515 # Convert to UTF-8 encoded bytestrings as the very first
585 # thing: calling any method on a localstr object will turn it
516 # thing: calling any method on a localstr object will turn it
586 # into a str object and the cached UTF-8 string is thus lost.
517 # into a str object and the cached UTF-8 string is thus lost.
587 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
518 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
588
519
589 user = user.strip()
520 user = user.strip()
590 # An empty username or a username with a "\n" will make the
521 # An empty username or a username with a "\n" will make the
591 # revision text contain two "\n\n" sequences -> corrupt
522 # revision text contain two "\n\n" sequences -> corrupt
592 # repository since read cannot unpack the revision.
523 # repository since read cannot unpack the revision.
593 if not user:
524 if not user:
594 raise error.StorageError(_(b"empty username"))
525 raise error.StorageError(_(b"empty username"))
595 if b"\n" in user:
526 if b"\n" in user:
596 raise error.StorageError(
527 raise error.StorageError(
597 _(b"username %r contains a newline") % pycompat.bytestr(user)
528 _(b"username %r contains a newline") % pycompat.bytestr(user)
598 )
529 )
599
530
600 desc = stripdesc(desc)
531 desc = stripdesc(desc)
601
532
602 if date:
533 if date:
603 parseddate = b"%d %d" % dateutil.parsedate(date)
534 parseddate = b"%d %d" % dateutil.parsedate(date)
604 else:
535 else:
605 parseddate = b"%d %d" % dateutil.makedate()
536 parseddate = b"%d %d" % dateutil.makedate()
606 if extra:
537 if extra:
607 branch = extra.get(b"branch")
538 branch = extra.get(b"branch")
608 if branch in (b"default", b""):
539 if branch in (b"default", b""):
609 del extra[b"branch"]
540 del extra[b"branch"]
610 elif branch in (b".", b"null", b"tip"):
541 elif branch in (b".", b"null", b"tip"):
611 raise error.StorageError(
542 raise error.StorageError(
612 _(b'the name \'%s\' is reserved') % branch
543 _(b'the name \'%s\' is reserved') % branch
613 )
544 )
614 sortedfiles = sorted(files.touched)
545 sortedfiles = sorted(files.touched)
615 flags = 0
546 flags = 0
616 sidedata = None
547 sidedata = None
617 if self._copiesstorage == b'changeset-sidedata':
548 if self._copiesstorage == b'changeset-sidedata':
618 if files.has_copies_info:
549 if files.has_copies_info:
619 flags |= flagutil.REVIDX_HASCOPIESINFO
550 flags |= flagutil.REVIDX_HASCOPIESINFO
620 sidedata = metadata.encode_files_sidedata(files)
551 sidedata = metadata.encode_files_sidedata(files)
621
552
622 if extra:
553 if extra:
623 extra = encodeextra(extra)
554 extra = encodeextra(extra)
624 parseddate = b"%s %s" % (parseddate, extra)
555 parseddate = b"%s %s" % (parseddate, extra)
625 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
556 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
626 text = b"\n".join(l)
557 text = b"\n".join(l)
627 rev = self.addrevision(
558 rev = self.addrevision(
628 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
559 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
629 )
560 )
630 return self.node(rev)
561 return self.node(rev)
631
562
632 def branchinfo(self, rev):
563 def branchinfo(self, rev):
633 """return the branch name and open/close state of a revision
564 """return the branch name and open/close state of a revision
634
565
635 This function exists because creating a changectx object
566 This function exists because creating a changectx object
636 just to access this is costly."""
567 just to access this is costly."""
637 return self.changelogrevision(rev).branchinfo
568 return self.changelogrevision(rev).branchinfo
638
569
639 def _nodeduplicatecallback(self, transaction, rev):
570 def _nodeduplicatecallback(self, transaction, rev):
640 # keep track of revisions that got "re-added", eg: unbunde of know rev.
571 # keep track of revisions that got "re-added", eg: unbunde of know rev.
641 #
572 #
642 # We track them in a list to preserve their order from the source bundle
573 # We track them in a list to preserve their order from the source bundle
643 duplicates = transaction.changes.setdefault(b'revduplicates', [])
574 duplicates = transaction.changes.setdefault(b'revduplicates', [])
644 duplicates.append(rev)
575 duplicates.append(rev)
@@ -1,164 +1,234 b''
1 # Copyright Mercurial Contributors
1 # Copyright Mercurial Contributors
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 import contextlib
6 import contextlib
7
7
8 from ..i18n import _
8 from ..i18n import _
9 from .. import (
9 from .. import (
10 error,
10 error,
11 util,
11 util,
12 )
12 )
13
13
14
14
15 _MAX_CACHED_CHUNK_SIZE = 1048576 # 1 MiB
15 _MAX_CACHED_CHUNK_SIZE = 1048576 # 1 MiB
16
16
17 PARTIAL_READ_MSG = _(
17 PARTIAL_READ_MSG = _(
18 b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
18 b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
19 )
19 )
20
20
21
21
22 def _is_power_of_two(n):
22 def _is_power_of_two(n):
23 return (n & (n - 1) == 0) and n != 0
23 return (n & (n - 1) == 0) and n != 0
24
24
25
25
26 class appender:
27 """the changelog index must be updated last on disk, so we use this class
28 to delay writes to it"""
29
30 def __init__(self, vfs, name, mode, buf):
31 self.data = buf
32 fp = vfs(name, mode)
33 self.fp = fp
34 self.offset = fp.tell()
35 self.size = vfs.fstat(fp).st_size
36 self._end = self.size
37
38 def end(self):
39 return self._end
40
41 def tell(self):
42 return self.offset
43
44 def flush(self):
45 pass
46
47 @property
48 def closed(self):
49 return self.fp.closed
50
51 def close(self):
52 self.fp.close()
53
54 def seek(self, offset, whence=0):
55 '''virtual file offset spans real file and data'''
56 if whence == 0:
57 self.offset = offset
58 elif whence == 1:
59 self.offset += offset
60 elif whence == 2:
61 self.offset = self.end() + offset
62 if self.offset < self.size:
63 self.fp.seek(self.offset)
64
65 def read(self, count=-1):
66 '''only trick here is reads that span real file and data'''
67 ret = b""
68 if self.offset < self.size:
69 s = self.fp.read(count)
70 ret = s
71 self.offset += len(s)
72 if count > 0:
73 count -= len(s)
74 if count != 0:
75 doff = self.offset - self.size
76 self.data.insert(0, b"".join(self.data))
77 del self.data[1:]
78 s = self.data[0][doff : doff + count]
79 self.offset += len(s)
80 ret += s
81 return ret
82
83 def write(self, s):
84 self.data.append(bytes(s))
85 self.offset += len(s)
86 self._end += len(s)
87
88 def __enter__(self):
89 self.fp.__enter__()
90 return self
91
92 def __exit__(self, *args):
93 return self.fp.__exit__(*args)
94
95
26 class randomaccessfile:
96 class randomaccessfile:
27 """Accessing arbitrary chuncks of data within a file, with some caching"""
97 """Accessing arbitrary chuncks of data within a file, with some caching"""
28
98
29 def __init__(
99 def __init__(
30 self,
100 self,
31 opener,
101 opener,
32 filename,
102 filename,
33 default_cached_chunk_size,
103 default_cached_chunk_size,
34 initial_cache=None,
104 initial_cache=None,
35 ):
105 ):
36 # Required by bitwise manipulation below
106 # Required by bitwise manipulation below
37 assert _is_power_of_two(default_cached_chunk_size)
107 assert _is_power_of_two(default_cached_chunk_size)
38
108
39 self.opener = opener
109 self.opener = opener
40 self.filename = filename
110 self.filename = filename
41 self.default_cached_chunk_size = default_cached_chunk_size
111 self.default_cached_chunk_size = default_cached_chunk_size
42 self.writing_handle = None # This is set from revlog.py
112 self.writing_handle = None # This is set from revlog.py
43 self.reading_handle = None
113 self.reading_handle = None
44 self._cached_chunk = b''
114 self._cached_chunk = b''
45 self._cached_chunk_position = 0 # Offset from the start of the file
115 self._cached_chunk_position = 0 # Offset from the start of the file
46 if initial_cache:
116 if initial_cache:
47 self._cached_chunk_position, self._cached_chunk = initial_cache
117 self._cached_chunk_position, self._cached_chunk = initial_cache
48
118
49 def clear_cache(self):
119 def clear_cache(self):
50 self._cached_chunk = b''
120 self._cached_chunk = b''
51 self._cached_chunk_position = 0
121 self._cached_chunk_position = 0
52
122
53 @property
123 @property
54 def is_open(self):
124 def is_open(self):
55 """True if any file handle is being held
125 """True if any file handle is being held
56
126
57 Used for assert and debug in the python code"""
127 Used for assert and debug in the python code"""
58 return (
128 return (
59 self.reading_handle is not None or self.writing_handle is not None
129 self.reading_handle is not None or self.writing_handle is not None
60 )
130 )
61
131
62 def _open(self, mode=b'r'):
132 def _open(self, mode=b'r'):
63 """Return a file object"""
133 """Return a file object"""
64 return self.opener(self.filename, mode=mode)
134 return self.opener(self.filename, mode=mode)
65
135
66 @contextlib.contextmanager
136 @contextlib.contextmanager
67 def _read_handle(self):
137 def _read_handle(self):
68 """File object suitable for reading data"""
138 """File object suitable for reading data"""
69 # Use a file handle being actively used for writes, if available.
139 # Use a file handle being actively used for writes, if available.
70 # There is some danger to doing this because reads will seek the
140 # There is some danger to doing this because reads will seek the
71 # file. However, revlog._writeentry performs a SEEK_END before all
141 # file. However, revlog._writeentry performs a SEEK_END before all
72 # writes, so we should be safe.
142 # writes, so we should be safe.
73 if self.writing_handle:
143 if self.writing_handle:
74 yield self.writing_handle
144 yield self.writing_handle
75
145
76 elif self.reading_handle:
146 elif self.reading_handle:
77 yield self.reading_handle
147 yield self.reading_handle
78
148
79 # Otherwise open a new file handle.
149 # Otherwise open a new file handle.
80 else:
150 else:
81 with self._open() as fp:
151 with self._open() as fp:
82 yield fp
152 yield fp
83
153
84 @contextlib.contextmanager
154 @contextlib.contextmanager
85 def reading(self):
155 def reading(self):
86 """Context manager that keeps the file open for reading"""
156 """Context manager that keeps the file open for reading"""
87 if (
157 if (
88 self.reading_handle is None
158 self.reading_handle is None
89 and self.writing_handle is None
159 and self.writing_handle is None
90 and self.filename is not None
160 and self.filename is not None
91 ):
161 ):
92 with self._open() as fp:
162 with self._open() as fp:
93 self.reading_handle = fp
163 self.reading_handle = fp
94 try:
164 try:
95 yield
165 yield
96 finally:
166 finally:
97 self.reading_handle = None
167 self.reading_handle = None
98 else:
168 else:
99 yield
169 yield
100
170
101 def read_chunk(self, offset, length):
171 def read_chunk(self, offset, length):
102 """Read a chunk of bytes from the file.
172 """Read a chunk of bytes from the file.
103
173
104 Accepts an absolute offset, length to read, and an optional existing
174 Accepts an absolute offset, length to read, and an optional existing
105 file handle to read from.
175 file handle to read from.
106
176
107 If an existing file handle is passed, it will be seeked and the
177 If an existing file handle is passed, it will be seeked and the
108 original seek position will NOT be restored.
178 original seek position will NOT be restored.
109
179
110 Returns a str or buffer of raw byte data.
180 Returns a str or buffer of raw byte data.
111
181
112 Raises if the requested number of bytes could not be read.
182 Raises if the requested number of bytes could not be read.
113 """
183 """
114 end = offset + length
184 end = offset + length
115 cache_start = self._cached_chunk_position
185 cache_start = self._cached_chunk_position
116 cache_end = cache_start + len(self._cached_chunk)
186 cache_end = cache_start + len(self._cached_chunk)
117 # Is the requested chunk within the cache?
187 # Is the requested chunk within the cache?
118 if cache_start <= offset and end <= cache_end:
188 if cache_start <= offset and end <= cache_end:
119 if cache_start == offset and end == cache_end:
189 if cache_start == offset and end == cache_end:
120 return self._cached_chunk # avoid a copy
190 return self._cached_chunk # avoid a copy
121 relative_start = offset - cache_start
191 relative_start = offset - cache_start
122 return util.buffer(self._cached_chunk, relative_start, length)
192 return util.buffer(self._cached_chunk, relative_start, length)
123
193
124 return self._read_and_update_cache(offset, length)
194 return self._read_and_update_cache(offset, length)
125
195
126 def _read_and_update_cache(self, offset, length):
196 def _read_and_update_cache(self, offset, length):
127 # Cache data both forward and backward around the requested
197 # Cache data both forward and backward around the requested
128 # data, in a fixed size window. This helps speed up operations
198 # data, in a fixed size window. This helps speed up operations
129 # involving reading the revlog backwards.
199 # involving reading the revlog backwards.
130 real_offset = offset & ~(self.default_cached_chunk_size - 1)
200 real_offset = offset & ~(self.default_cached_chunk_size - 1)
131 real_length = (
201 real_length = (
132 (offset + length + self.default_cached_chunk_size)
202 (offset + length + self.default_cached_chunk_size)
133 & ~(self.default_cached_chunk_size - 1)
203 & ~(self.default_cached_chunk_size - 1)
134 ) - real_offset
204 ) - real_offset
135 with self._read_handle() as file_obj:
205 with self._read_handle() as file_obj:
136 file_obj.seek(real_offset)
206 file_obj.seek(real_offset)
137 data = file_obj.read(real_length)
207 data = file_obj.read(real_length)
138
208
139 self._add_cached_chunk(real_offset, data)
209 self._add_cached_chunk(real_offset, data)
140
210
141 relative_offset = offset - real_offset
211 relative_offset = offset - real_offset
142 got = len(data) - relative_offset
212 got = len(data) - relative_offset
143 if got < length:
213 if got < length:
144 message = PARTIAL_READ_MSG % (self.filename, length, offset, got)
214 message = PARTIAL_READ_MSG % (self.filename, length, offset, got)
145 raise error.RevlogError(message)
215 raise error.RevlogError(message)
146
216
147 if offset != real_offset or real_length != length:
217 if offset != real_offset or real_length != length:
148 return util.buffer(data, relative_offset, length)
218 return util.buffer(data, relative_offset, length)
149 return data
219 return data
150
220
151 def _add_cached_chunk(self, offset, data):
221 def _add_cached_chunk(self, offset, data):
152 """Add to or replace the cached data chunk.
222 """Add to or replace the cached data chunk.
153
223
154 Accepts an absolute offset and the data that is at that location.
224 Accepts an absolute offset and the data that is at that location.
155 """
225 """
156 if (
226 if (
157 self._cached_chunk_position + len(self._cached_chunk) == offset
227 self._cached_chunk_position + len(self._cached_chunk) == offset
158 and len(self._cached_chunk) + len(data) < _MAX_CACHED_CHUNK_SIZE
228 and len(self._cached_chunk) + len(data) < _MAX_CACHED_CHUNK_SIZE
159 ):
229 ):
160 # add to existing cache
230 # add to existing cache
161 self._cached_chunk += data
231 self._cached_chunk += data
162 else:
232 else:
163 self._cached_chunk = data
233 self._cached_chunk = data
164 self._cached_chunk_position = offset
234 self._cached_chunk_position = offset
General Comments 0
You need to be logged in to leave comments. Login now