##// END OF EJS Templates
branchmap: make "closed" a set from beginning instead of converting from list...
Martin von Zweigbergk -
r44086:5cdc3c12 default
parent child Browse files
Show More
@@ -1,756 +1,756
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 repoviewutil,
26 repoviewutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 if not globals():
30 if not globals():
31 from typing import (
31 from typing import (
32 Any,
32 Any,
33 Callable,
33 Callable,
34 Dict,
34 Dict,
35 Iterable,
35 Iterable,
36 List,
36 List,
37 Optional,
37 Optional,
38 Set,
38 Set,
39 Tuple,
39 Tuple,
40 Union,
40 Union,
41 )
41 )
42
42
43 assert any(
43 assert any(
44 (Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union,)
44 (Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union,)
45 )
45 )
46
46
47 subsettable = repoviewutil.subsettable
47 subsettable = repoviewutil.subsettable
48
48
49 calcsize = struct.calcsize
49 calcsize = struct.calcsize
50 pack_into = struct.pack_into
50 pack_into = struct.pack_into
51 unpack_from = struct.unpack_from
51 unpack_from = struct.unpack_from
52
52
53
53
54 class BranchMapCache(object):
54 class BranchMapCache(object):
55 """mapping of filtered views of repo with their branchcache"""
55 """mapping of filtered views of repo with their branchcache"""
56
56
57 def __init__(self):
57 def __init__(self):
58 self._per_filter = {}
58 self._per_filter = {}
59
59
60 def __getitem__(self, repo):
60 def __getitem__(self, repo):
61 self.updatecache(repo)
61 self.updatecache(repo)
62 return self._per_filter[repo.filtername]
62 return self._per_filter[repo.filtername]
63
63
64 def updatecache(self, repo):
64 def updatecache(self, repo):
65 """Update the cache for the given filtered view on a repository"""
65 """Update the cache for the given filtered view on a repository"""
66 # This can trigger updates for the caches for subsets of the filtered
66 # This can trigger updates for the caches for subsets of the filtered
67 # view, e.g. when there is no cache for this filtered view or the cache
67 # view, e.g. when there is no cache for this filtered view or the cache
68 # is stale.
68 # is stale.
69
69
70 cl = repo.changelog
70 cl = repo.changelog
71 filtername = repo.filtername
71 filtername = repo.filtername
72 bcache = self._per_filter.get(filtername)
72 bcache = self._per_filter.get(filtername)
73 if bcache is None or not bcache.validfor(repo):
73 if bcache is None or not bcache.validfor(repo):
74 # cache object missing or cache object stale? Read from disk
74 # cache object missing or cache object stale? Read from disk
75 bcache = branchcache.fromfile(repo)
75 bcache = branchcache.fromfile(repo)
76
76
77 revs = []
77 revs = []
78 if bcache is None:
78 if bcache is None:
79 # no (fresh) cache available anymore, perhaps we can re-use
79 # no (fresh) cache available anymore, perhaps we can re-use
80 # the cache for a subset, then extend that to add info on missing
80 # the cache for a subset, then extend that to add info on missing
81 # revisions.
81 # revisions.
82 subsetname = subsettable.get(filtername)
82 subsetname = subsettable.get(filtername)
83 if subsetname is not None:
83 if subsetname is not None:
84 subset = repo.filtered(subsetname)
84 subset = repo.filtered(subsetname)
85 bcache = self[subset].copy()
85 bcache = self[subset].copy()
86 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
86 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
87 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
87 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
88 else:
88 else:
89 # nothing to fall back on, start empty.
89 # nothing to fall back on, start empty.
90 bcache = branchcache()
90 bcache = branchcache()
91
91
92 revs.extend(cl.revs(start=bcache.tiprev + 1))
92 revs.extend(cl.revs(start=bcache.tiprev + 1))
93 if revs:
93 if revs:
94 bcache.update(repo, revs)
94 bcache.update(repo, revs)
95
95
96 assert bcache.validfor(repo), filtername
96 assert bcache.validfor(repo), filtername
97 self._per_filter[repo.filtername] = bcache
97 self._per_filter[repo.filtername] = bcache
98
98
99 def replace(self, repo, remotebranchmap):
99 def replace(self, repo, remotebranchmap):
100 """Replace the branchmap cache for a repo with a branch mapping.
100 """Replace the branchmap cache for a repo with a branch mapping.
101
101
102 This is likely only called during clone with a branch map from a
102 This is likely only called during clone with a branch map from a
103 remote.
103 remote.
104
104
105 """
105 """
106 cl = repo.changelog
106 cl = repo.changelog
107 clrev = cl.rev
107 clrev = cl.rev
108 clbranchinfo = cl.branchinfo
108 clbranchinfo = cl.branchinfo
109 rbheads = []
109 rbheads = []
110 closed = []
110 closed = set()
111 for bheads in pycompat.itervalues(remotebranchmap):
111 for bheads in pycompat.itervalues(remotebranchmap):
112 rbheads += bheads
112 rbheads += bheads
113 for h in bheads:
113 for h in bheads:
114 r = clrev(h)
114 r = clrev(h)
115 b, c = clbranchinfo(r)
115 b, c = clbranchinfo(r)
116 if c:
116 if c:
117 closed.append(h)
117 closed.add(h)
118
118
119 if rbheads:
119 if rbheads:
120 rtiprev = max((int(clrev(node)) for node in rbheads))
120 rtiprev = max((int(clrev(node)) for node in rbheads))
121 cache = branchcache(
121 cache = branchcache(
122 remotebranchmap,
122 remotebranchmap,
123 repo[rtiprev].node(),
123 repo[rtiprev].node(),
124 rtiprev,
124 rtiprev,
125 closednodes=set(closed),
125 closednodes=closed,
126 )
126 )
127
127
128 # Try to stick it as low as possible
128 # Try to stick it as low as possible
129 # filter above served are unlikely to be fetch from a clone
129 # filter above served are unlikely to be fetch from a clone
130 for candidate in (b'base', b'immutable', b'served'):
130 for candidate in (b'base', b'immutable', b'served'):
131 rview = repo.filtered(candidate)
131 rview = repo.filtered(candidate)
132 if cache.validfor(rview):
132 if cache.validfor(rview):
133 self._per_filter[candidate] = cache
133 self._per_filter[candidate] = cache
134 cache.write(rview)
134 cache.write(rview)
135 return
135 return
136
136
137 def clear(self):
137 def clear(self):
138 self._per_filter.clear()
138 self._per_filter.clear()
139
139
140
140
141 def _unknownnode(node):
141 def _unknownnode(node):
142 """ raises ValueError when branchcache found a node which does not exists
142 """ raises ValueError when branchcache found a node which does not exists
143 """
143 """
144 raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node)))
144 raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node)))
145
145
146
146
147 def _branchcachedesc(repo):
147 def _branchcachedesc(repo):
148 if repo.filtername is not None:
148 if repo.filtername is not None:
149 return b'branch cache (%s)' % repo.filtername
149 return b'branch cache (%s)' % repo.filtername
150 else:
150 else:
151 return b'branch cache'
151 return b'branch cache'
152
152
153
153
154 class branchcache(object):
154 class branchcache(object):
155 """A dict like object that hold branches heads cache.
155 """A dict like object that hold branches heads cache.
156
156
157 This cache is used to avoid costly computations to determine all the
157 This cache is used to avoid costly computations to determine all the
158 branch heads of a repo.
158 branch heads of a repo.
159
159
160 The cache is serialized on disk in the following format:
160 The cache is serialized on disk in the following format:
161
161
162 <tip hex node> <tip rev number> [optional filtered repo hex hash]
162 <tip hex node> <tip rev number> [optional filtered repo hex hash]
163 <branch head hex node> <open/closed state> <branch name>
163 <branch head hex node> <open/closed state> <branch name>
164 <branch head hex node> <open/closed state> <branch name>
164 <branch head hex node> <open/closed state> <branch name>
165 ...
165 ...
166
166
167 The first line is used to check if the cache is still valid. If the
167 The first line is used to check if the cache is still valid. If the
168 branch cache is for a filtered repo view, an optional third hash is
168 branch cache is for a filtered repo view, an optional third hash is
169 included that hashes the hashes of all filtered revisions.
169 included that hashes the hashes of all filtered revisions.
170
170
171 The open/closed state is represented by a single letter 'o' or 'c'.
171 The open/closed state is represented by a single letter 'o' or 'c'.
172 This field can be used to avoid changelog reads when determining if a
172 This field can be used to avoid changelog reads when determining if a
173 branch head closes a branch or not.
173 branch head closes a branch or not.
174 """
174 """
175
175
176 def __init__(
176 def __init__(
177 self,
177 self,
178 entries=(),
178 entries=(),
179 tipnode=nullid,
179 tipnode=nullid,
180 tiprev=nullrev,
180 tiprev=nullrev,
181 filteredhash=None,
181 filteredhash=None,
182 closednodes=None,
182 closednodes=None,
183 hasnode=None,
183 hasnode=None,
184 ):
184 ):
185 # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
185 # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
186 """ hasnode is a function which can be used to verify whether changelog
186 """ hasnode is a function which can be used to verify whether changelog
187 has a given node or not. If it's not provided, we assume that every node
187 has a given node or not. If it's not provided, we assume that every node
188 we have exists in changelog """
188 we have exists in changelog """
189 self.tipnode = tipnode
189 self.tipnode = tipnode
190 self.tiprev = tiprev
190 self.tiprev = tiprev
191 self.filteredhash = filteredhash
191 self.filteredhash = filteredhash
192 # closednodes is a set of nodes that close their branch. If the branch
192 # closednodes is a set of nodes that close their branch. If the branch
193 # cache has been updated, it may contain nodes that are no longer
193 # cache has been updated, it may contain nodes that are no longer
194 # heads.
194 # heads.
195 if closednodes is None:
195 if closednodes is None:
196 self._closednodes = set()
196 self._closednodes = set()
197 else:
197 else:
198 self._closednodes = closednodes
198 self._closednodes = closednodes
199 self._entries = dict(entries)
199 self._entries = dict(entries)
200 # whether closed nodes are verified or not
200 # whether closed nodes are verified or not
201 self._closedverified = False
201 self._closedverified = False
202 # branches for which nodes are verified
202 # branches for which nodes are verified
203 self._verifiedbranches = set()
203 self._verifiedbranches = set()
204 self._hasnode = hasnode
204 self._hasnode = hasnode
205 if self._hasnode is None:
205 if self._hasnode is None:
206 self._hasnode = lambda x: True
206 self._hasnode = lambda x: True
207
207
208 def _verifyclosed(self):
208 def _verifyclosed(self):
209 """ verify the closed nodes we have """
209 """ verify the closed nodes we have """
210 if self._closedverified:
210 if self._closedverified:
211 return
211 return
212 for node in self._closednodes:
212 for node in self._closednodes:
213 if not self._hasnode(node):
213 if not self._hasnode(node):
214 _unknownnode(node)
214 _unknownnode(node)
215
215
216 self._closedverified = True
216 self._closedverified = True
217
217
218 def _verifybranch(self, branch):
218 def _verifybranch(self, branch):
219 """ verify head nodes for the given branch. """
219 """ verify head nodes for the given branch. """
220 if branch not in self._entries or branch in self._verifiedbranches:
220 if branch not in self._entries or branch in self._verifiedbranches:
221 return
221 return
222 for n in self._entries[branch]:
222 for n in self._entries[branch]:
223 if not self._hasnode(n):
223 if not self._hasnode(n):
224 _unknownnode(n)
224 _unknownnode(n)
225
225
226 self._verifiedbranches.add(branch)
226 self._verifiedbranches.add(branch)
227
227
228 def _verifyall(self):
228 def _verifyall(self):
229 """ verifies nodes of all the branches """
229 """ verifies nodes of all the branches """
230 needverification = set(self._entries.keys()) - self._verifiedbranches
230 needverification = set(self._entries.keys()) - self._verifiedbranches
231 for b in needverification:
231 for b in needverification:
232 self._verifybranch(b)
232 self._verifybranch(b)
233
233
234 def __iter__(self):
234 def __iter__(self):
235 return iter(self._entries)
235 return iter(self._entries)
236
236
237 def __setitem__(self, key, value):
237 def __setitem__(self, key, value):
238 self._entries[key] = value
238 self._entries[key] = value
239
239
240 def __getitem__(self, key):
240 def __getitem__(self, key):
241 self._verifybranch(key)
241 self._verifybranch(key)
242 return self._entries[key]
242 return self._entries[key]
243
243
244 def __contains__(self, key):
244 def __contains__(self, key):
245 self._verifybranch(key)
245 self._verifybranch(key)
246 return key in self._entries
246 return key in self._entries
247
247
248 def iteritems(self):
248 def iteritems(self):
249 for k, v in pycompat.iteritems(self._entries):
249 for k, v in pycompat.iteritems(self._entries):
250 self._verifybranch(k)
250 self._verifybranch(k)
251 yield k, v
251 yield k, v
252
252
253 items = iteritems
253 items = iteritems
254
254
255 def hasbranch(self, label):
255 def hasbranch(self, label):
256 """ checks whether a branch of this name exists or not """
256 """ checks whether a branch of this name exists or not """
257 self._verifybranch(label)
257 self._verifybranch(label)
258 return label in self._entries
258 return label in self._entries
259
259
260 @classmethod
260 @classmethod
261 def fromfile(cls, repo):
261 def fromfile(cls, repo):
262 f = None
262 f = None
263 try:
263 try:
264 f = repo.cachevfs(cls._filename(repo))
264 f = repo.cachevfs(cls._filename(repo))
265 lineiter = iter(f)
265 lineiter = iter(f)
266 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
266 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
267 last, lrev = cachekey[:2]
267 last, lrev = cachekey[:2]
268 last, lrev = bin(last), int(lrev)
268 last, lrev = bin(last), int(lrev)
269 filteredhash = None
269 filteredhash = None
270 hasnode = repo.changelog.hasnode
270 hasnode = repo.changelog.hasnode
271 if len(cachekey) > 2:
271 if len(cachekey) > 2:
272 filteredhash = bin(cachekey[2])
272 filteredhash = bin(cachekey[2])
273 bcache = cls(
273 bcache = cls(
274 tipnode=last,
274 tipnode=last,
275 tiprev=lrev,
275 tiprev=lrev,
276 filteredhash=filteredhash,
276 filteredhash=filteredhash,
277 hasnode=hasnode,
277 hasnode=hasnode,
278 )
278 )
279 if not bcache.validfor(repo):
279 if not bcache.validfor(repo):
280 # invalidate the cache
280 # invalidate the cache
281 raise ValueError('tip differs')
281 raise ValueError('tip differs')
282 bcache.load(repo, lineiter)
282 bcache.load(repo, lineiter)
283 except (IOError, OSError):
283 except (IOError, OSError):
284 return None
284 return None
285
285
286 except Exception as inst:
286 except Exception as inst:
287 if repo.ui.debugflag:
287 if repo.ui.debugflag:
288 msg = b'invalid %s: %s\n'
288 msg = b'invalid %s: %s\n'
289 repo.ui.debug(
289 repo.ui.debug(
290 msg
290 msg
291 % (
291 % (
292 _branchcachedesc(repo),
292 _branchcachedesc(repo),
293 pycompat.bytestr(
293 pycompat.bytestr(
294 inst # pytype: disable=wrong-arg-types
294 inst # pytype: disable=wrong-arg-types
295 ),
295 ),
296 )
296 )
297 )
297 )
298 bcache = None
298 bcache = None
299
299
300 finally:
300 finally:
301 if f:
301 if f:
302 f.close()
302 f.close()
303
303
304 return bcache
304 return bcache
305
305
306 def load(self, repo, lineiter):
306 def load(self, repo, lineiter):
307 """ fully loads the branchcache by reading from the file using the line
307 """ fully loads the branchcache by reading from the file using the line
308 iterator passed"""
308 iterator passed"""
309 for line in lineiter:
309 for line in lineiter:
310 line = line.rstrip(b'\n')
310 line = line.rstrip(b'\n')
311 if not line:
311 if not line:
312 continue
312 continue
313 node, state, label = line.split(b" ", 2)
313 node, state, label = line.split(b" ", 2)
314 if state not in b'oc':
314 if state not in b'oc':
315 raise ValueError('invalid branch state')
315 raise ValueError('invalid branch state')
316 label = encoding.tolocal(label.strip())
316 label = encoding.tolocal(label.strip())
317 node = bin(node)
317 node = bin(node)
318 self._entries.setdefault(label, []).append(node)
318 self._entries.setdefault(label, []).append(node)
319 if state == b'c':
319 if state == b'c':
320 self._closednodes.add(node)
320 self._closednodes.add(node)
321
321
322 @staticmethod
322 @staticmethod
323 def _filename(repo):
323 def _filename(repo):
324 """name of a branchcache file for a given repo or repoview"""
324 """name of a branchcache file for a given repo or repoview"""
325 filename = b"branch2"
325 filename = b"branch2"
326 if repo.filtername:
326 if repo.filtername:
327 filename = b'%s-%s' % (filename, repo.filtername)
327 filename = b'%s-%s' % (filename, repo.filtername)
328 return filename
328 return filename
329
329
330 def validfor(self, repo):
330 def validfor(self, repo):
331 """Is the cache content valid regarding a repo
331 """Is the cache content valid regarding a repo
332
332
333 - False when cached tipnode is unknown or if we detect a strip.
333 - False when cached tipnode is unknown or if we detect a strip.
334 - True when cache is up to date or a subset of current repo."""
334 - True when cache is up to date or a subset of current repo."""
335 try:
335 try:
336 return (self.tipnode == repo.changelog.node(self.tiprev)) and (
336 return (self.tipnode == repo.changelog.node(self.tiprev)) and (
337 self.filteredhash == scmutil.filteredhash(repo, self.tiprev)
337 self.filteredhash == scmutil.filteredhash(repo, self.tiprev)
338 )
338 )
339 except IndexError:
339 except IndexError:
340 return False
340 return False
341
341
342 def _branchtip(self, heads):
342 def _branchtip(self, heads):
343 '''Return tuple with last open head in heads and false,
343 '''Return tuple with last open head in heads and false,
344 otherwise return last closed head and true.'''
344 otherwise return last closed head and true.'''
345 tip = heads[-1]
345 tip = heads[-1]
346 closed = True
346 closed = True
347 for h in reversed(heads):
347 for h in reversed(heads):
348 if h not in self._closednodes:
348 if h not in self._closednodes:
349 tip = h
349 tip = h
350 closed = False
350 closed = False
351 break
351 break
352 return tip, closed
352 return tip, closed
353
353
354 def branchtip(self, branch):
354 def branchtip(self, branch):
355 '''Return the tipmost open head on branch head, otherwise return the
355 '''Return the tipmost open head on branch head, otherwise return the
356 tipmost closed head on branch.
356 tipmost closed head on branch.
357 Raise KeyError for unknown branch.'''
357 Raise KeyError for unknown branch.'''
358 return self._branchtip(self[branch])[0]
358 return self._branchtip(self[branch])[0]
359
359
360 def iteropen(self, nodes):
360 def iteropen(self, nodes):
361 return (n for n in nodes if n not in self._closednodes)
361 return (n for n in nodes if n not in self._closednodes)
362
362
363 def branchheads(self, branch, closed=False):
363 def branchheads(self, branch, closed=False):
364 self._verifybranch(branch)
364 self._verifybranch(branch)
365 heads = self._entries[branch]
365 heads = self._entries[branch]
366 if not closed:
366 if not closed:
367 heads = list(self.iteropen(heads))
367 heads = list(self.iteropen(heads))
368 return heads
368 return heads
369
369
370 def iterbranches(self):
370 def iterbranches(self):
371 for bn, heads in pycompat.iteritems(self):
371 for bn, heads in pycompat.iteritems(self):
372 yield (bn, heads) + self._branchtip(heads)
372 yield (bn, heads) + self._branchtip(heads)
373
373
374 def iterheads(self):
374 def iterheads(self):
375 """ returns all the heads """
375 """ returns all the heads """
376 self._verifyall()
376 self._verifyall()
377 return pycompat.itervalues(self._entries)
377 return pycompat.itervalues(self._entries)
378
378
379 def copy(self):
379 def copy(self):
380 """return an deep copy of the branchcache object"""
380 """return an deep copy of the branchcache object"""
381 return type(self)(
381 return type(self)(
382 self._entries,
382 self._entries,
383 self.tipnode,
383 self.tipnode,
384 self.tiprev,
384 self.tiprev,
385 self.filteredhash,
385 self.filteredhash,
386 self._closednodes,
386 self._closednodes,
387 )
387 )
388
388
389 def write(self, repo):
389 def write(self, repo):
390 try:
390 try:
391 f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True)
391 f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True)
392 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
392 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
393 if self.filteredhash is not None:
393 if self.filteredhash is not None:
394 cachekey.append(hex(self.filteredhash))
394 cachekey.append(hex(self.filteredhash))
395 f.write(b" ".join(cachekey) + b'\n')
395 f.write(b" ".join(cachekey) + b'\n')
396 nodecount = 0
396 nodecount = 0
397 for label, nodes in sorted(pycompat.iteritems(self._entries)):
397 for label, nodes in sorted(pycompat.iteritems(self._entries)):
398 label = encoding.fromlocal(label)
398 label = encoding.fromlocal(label)
399 for node in nodes:
399 for node in nodes:
400 nodecount += 1
400 nodecount += 1
401 if node in self._closednodes:
401 if node in self._closednodes:
402 state = b'c'
402 state = b'c'
403 else:
403 else:
404 state = b'o'
404 state = b'o'
405 f.write(b"%s %s %s\n" % (hex(node), state, label))
405 f.write(b"%s %s %s\n" % (hex(node), state, label))
406 f.close()
406 f.close()
407 repo.ui.log(
407 repo.ui.log(
408 b'branchcache',
408 b'branchcache',
409 b'wrote %s with %d labels and %d nodes\n',
409 b'wrote %s with %d labels and %d nodes\n',
410 _branchcachedesc(repo),
410 _branchcachedesc(repo),
411 len(self._entries),
411 len(self._entries),
412 nodecount,
412 nodecount,
413 )
413 )
414 except (IOError, OSError, error.Abort) as inst:
414 except (IOError, OSError, error.Abort) as inst:
415 # Abort may be raised by read only opener, so log and continue
415 # Abort may be raised by read only opener, so log and continue
416 repo.ui.debug(
416 repo.ui.debug(
417 b"couldn't write branch cache: %s\n"
417 b"couldn't write branch cache: %s\n"
418 % stringutil.forcebytestr(inst)
418 % stringutil.forcebytestr(inst)
419 )
419 )
420
420
421 def update(self, repo, revgen):
421 def update(self, repo, revgen):
422 """Given a branchhead cache, self, that may have extra nodes or be
422 """Given a branchhead cache, self, that may have extra nodes or be
423 missing heads, and a generator of nodes that are strictly a superset of
423 missing heads, and a generator of nodes that are strictly a superset of
424 heads missing, this function updates self to be correct.
424 heads missing, this function updates self to be correct.
425 """
425 """
426 starttime = util.timer()
426 starttime = util.timer()
427 cl = repo.changelog
427 cl = repo.changelog
428 # collect new branch entries
428 # collect new branch entries
429 newbranches = {}
429 newbranches = {}
430 getbranchinfo = repo.revbranchcache().branchinfo
430 getbranchinfo = repo.revbranchcache().branchinfo
431 for r in revgen:
431 for r in revgen:
432 branch, closesbranch = getbranchinfo(r)
432 branch, closesbranch = getbranchinfo(r)
433 newbranches.setdefault(branch, []).append(r)
433 newbranches.setdefault(branch, []).append(r)
434 if closesbranch:
434 if closesbranch:
435 self._closednodes.add(cl.node(r))
435 self._closednodes.add(cl.node(r))
436
436
437 # fetch current topological heads to speed up filtering
437 # fetch current topological heads to speed up filtering
438 topoheads = set(cl.headrevs())
438 topoheads = set(cl.headrevs())
439
439
440 # new tip revision which we found after iterating items from new
440 # new tip revision which we found after iterating items from new
441 # branches
441 # branches
442 ntiprev = self.tiprev
442 ntiprev = self.tiprev
443
443
444 # if older branchheads are reachable from new ones, they aren't
444 # if older branchheads are reachable from new ones, they aren't
445 # really branchheads. Note checking parents is insufficient:
445 # really branchheads. Note checking parents is insufficient:
446 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
446 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
447 for branch, newheadrevs in pycompat.iteritems(newbranches):
447 for branch, newheadrevs in pycompat.iteritems(newbranches):
448 bheads = self._entries.setdefault(branch, [])
448 bheads = self._entries.setdefault(branch, [])
449 bheadset = set(cl.rev(node) for node in bheads)
449 bheadset = set(cl.rev(node) for node in bheads)
450
450
451 # This have been tested True on all internal usage of this function.
451 # This have been tested True on all internal usage of this function.
452 # run it again in case of doubt
452 # run it again in case of doubt
453 # assert not (set(bheadrevs) & set(newheadrevs))
453 # assert not (set(bheadrevs) & set(newheadrevs))
454 bheadset.update(newheadrevs)
454 bheadset.update(newheadrevs)
455
455
456 # This prunes out two kinds of heads - heads that are superseded by
456 # This prunes out two kinds of heads - heads that are superseded by
457 # a head in newheadrevs, and newheadrevs that are not heads because
457 # a head in newheadrevs, and newheadrevs that are not heads because
458 # an existing head is their descendant.
458 # an existing head is their descendant.
459 uncertain = bheadset - topoheads
459 uncertain = bheadset - topoheads
460 if uncertain:
460 if uncertain:
461 floorrev = min(uncertain)
461 floorrev = min(uncertain)
462 ancestors = set(cl.ancestors(newheadrevs, floorrev))
462 ancestors = set(cl.ancestors(newheadrevs, floorrev))
463 bheadset -= ancestors
463 bheadset -= ancestors
464 bheadrevs = sorted(bheadset)
464 bheadrevs = sorted(bheadset)
465 self[branch] = [cl.node(rev) for rev in bheadrevs]
465 self[branch] = [cl.node(rev) for rev in bheadrevs]
466 tiprev = bheadrevs[-1]
466 tiprev = bheadrevs[-1]
467 if tiprev > ntiprev:
467 if tiprev > ntiprev:
468 ntiprev = tiprev
468 ntiprev = tiprev
469
469
470 if ntiprev > self.tiprev:
470 if ntiprev > self.tiprev:
471 self.tiprev = ntiprev
471 self.tiprev = ntiprev
472 self.tipnode = cl.node(ntiprev)
472 self.tipnode = cl.node(ntiprev)
473
473
474 if not self.validfor(repo):
474 if not self.validfor(repo):
475 # cache key are not valid anymore
475 # cache key are not valid anymore
476 self.tipnode = nullid
476 self.tipnode = nullid
477 self.tiprev = nullrev
477 self.tiprev = nullrev
478 for heads in self.iterheads():
478 for heads in self.iterheads():
479 tiprev = max(cl.rev(node) for node in heads)
479 tiprev = max(cl.rev(node) for node in heads)
480 if tiprev > self.tiprev:
480 if tiprev > self.tiprev:
481 self.tipnode = cl.node(tiprev)
481 self.tipnode = cl.node(tiprev)
482 self.tiprev = tiprev
482 self.tiprev = tiprev
483 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
483 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
484
484
485 duration = util.timer() - starttime
485 duration = util.timer() - starttime
486 repo.ui.log(
486 repo.ui.log(
487 b'branchcache',
487 b'branchcache',
488 b'updated %s in %.4f seconds\n',
488 b'updated %s in %.4f seconds\n',
489 _branchcachedesc(repo),
489 _branchcachedesc(repo),
490 duration,
490 duration,
491 )
491 )
492
492
493 self.write(repo)
493 self.write(repo)
494
494
495
495
496 class remotebranchcache(branchcache):
496 class remotebranchcache(branchcache):
497 """Branchmap info for a remote connection, should not write locally"""
497 """Branchmap info for a remote connection, should not write locally"""
498
498
499 def write(self, repo):
499 def write(self, repo):
500 pass
500 pass
501
501
502
502
503 # Revision branch info cache
503 # Revision branch info cache
504
504
505 _rbcversion = b'-v1'
505 _rbcversion = b'-v1'
506 _rbcnames = b'rbc-names' + _rbcversion
506 _rbcnames = b'rbc-names' + _rbcversion
507 _rbcrevs = b'rbc-revs' + _rbcversion
507 _rbcrevs = b'rbc-revs' + _rbcversion
508 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
508 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
509 _rbcrecfmt = b'>4sI'
509 _rbcrecfmt = b'>4sI'
510 _rbcrecsize = calcsize(_rbcrecfmt)
510 _rbcrecsize = calcsize(_rbcrecfmt)
511 _rbcnodelen = 4
511 _rbcnodelen = 4
512 _rbcbranchidxmask = 0x7FFFFFFF
512 _rbcbranchidxmask = 0x7FFFFFFF
513 _rbccloseflag = 0x80000000
513 _rbccloseflag = 0x80000000
514
514
515
515
516 class revbranchcache(object):
516 class revbranchcache(object):
517 """Persistent cache, mapping from revision number to branch name and close.
517 """Persistent cache, mapping from revision number to branch name and close.
518 This is a low level cache, independent of filtering.
518 This is a low level cache, independent of filtering.
519
519
520 Branch names are stored in rbc-names in internal encoding separated by 0.
520 Branch names are stored in rbc-names in internal encoding separated by 0.
521 rbc-names is append-only, and each branch name is only stored once and will
521 rbc-names is append-only, and each branch name is only stored once and will
522 thus have a unique index.
522 thus have a unique index.
523
523
524 The branch info for each revision is stored in rbc-revs as constant size
524 The branch info for each revision is stored in rbc-revs as constant size
525 records. The whole file is read into memory, but it is only 'parsed' on
525 records. The whole file is read into memory, but it is only 'parsed' on
526 demand. The file is usually append-only but will be truncated if repo
526 demand. The file is usually append-only but will be truncated if repo
527 modification is detected.
527 modification is detected.
528 The record for each revision contains the first 4 bytes of the
528 The record for each revision contains the first 4 bytes of the
529 corresponding node hash, and the record is only used if it still matches.
529 corresponding node hash, and the record is only used if it still matches.
530 Even a completely trashed rbc-revs fill thus still give the right result
530 Even a completely trashed rbc-revs fill thus still give the right result
531 while converging towards full recovery ... assuming no incorrectly matching
531 while converging towards full recovery ... assuming no incorrectly matching
532 node hashes.
532 node hashes.
533 The record also contains 4 bytes where 31 bits contains the index of the
533 The record also contains 4 bytes where 31 bits contains the index of the
534 branch and the last bit indicate that it is a branch close commit.
534 branch and the last bit indicate that it is a branch close commit.
535 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
535 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
536 and will grow with it but be 1/8th of its size.
536 and will grow with it but be 1/8th of its size.
537 """
537 """
538
538
539 def __init__(self, repo, readonly=True):
539 def __init__(self, repo, readonly=True):
540 assert repo.filtername is None
540 assert repo.filtername is None
541 self._repo = repo
541 self._repo = repo
542 self._names = [] # branch names in local encoding with static index
542 self._names = [] # branch names in local encoding with static index
543 self._rbcrevs = bytearray()
543 self._rbcrevs = bytearray()
544 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
544 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
545 try:
545 try:
546 bndata = repo.cachevfs.read(_rbcnames)
546 bndata = repo.cachevfs.read(_rbcnames)
547 self._rbcsnameslen = len(bndata) # for verification before writing
547 self._rbcsnameslen = len(bndata) # for verification before writing
548 if bndata:
548 if bndata:
549 self._names = [
549 self._names = [
550 encoding.tolocal(bn) for bn in bndata.split(b'\0')
550 encoding.tolocal(bn) for bn in bndata.split(b'\0')
551 ]
551 ]
552 except (IOError, OSError):
552 except (IOError, OSError):
553 if readonly:
553 if readonly:
554 # don't try to use cache - fall back to the slow path
554 # don't try to use cache - fall back to the slow path
555 self.branchinfo = self._branchinfo
555 self.branchinfo = self._branchinfo
556
556
557 if self._names:
557 if self._names:
558 try:
558 try:
559 data = repo.cachevfs.read(_rbcrevs)
559 data = repo.cachevfs.read(_rbcrevs)
560 self._rbcrevs[:] = data
560 self._rbcrevs[:] = data
561 except (IOError, OSError) as inst:
561 except (IOError, OSError) as inst:
562 repo.ui.debug(
562 repo.ui.debug(
563 b"couldn't read revision branch cache: %s\n"
563 b"couldn't read revision branch cache: %s\n"
564 % stringutil.forcebytestr(inst)
564 % stringutil.forcebytestr(inst)
565 )
565 )
566 # remember number of good records on disk
566 # remember number of good records on disk
567 self._rbcrevslen = min(
567 self._rbcrevslen = min(
568 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
568 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
569 )
569 )
570 if self._rbcrevslen == 0:
570 if self._rbcrevslen == 0:
571 self._names = []
571 self._names = []
572 self._rbcnamescount = len(self._names) # number of names read at
572 self._rbcnamescount = len(self._names) # number of names read at
573 # _rbcsnameslen
573 # _rbcsnameslen
574
574
575 def _clear(self):
575 def _clear(self):
576 self._rbcsnameslen = 0
576 self._rbcsnameslen = 0
577 del self._names[:]
577 del self._names[:]
578 self._rbcnamescount = 0
578 self._rbcnamescount = 0
579 self._rbcrevslen = len(self._repo.changelog)
579 self._rbcrevslen = len(self._repo.changelog)
580 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
580 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
581 util.clearcachedproperty(self, b'_namesreverse')
581 util.clearcachedproperty(self, b'_namesreverse')
582
582
583 @util.propertycache
583 @util.propertycache
584 def _namesreverse(self):
584 def _namesreverse(self):
585 return dict((b, r) for r, b in enumerate(self._names))
585 return dict((b, r) for r, b in enumerate(self._names))
586
586
587 def branchinfo(self, rev):
587 def branchinfo(self, rev):
588 """Return branch name and close flag for rev, using and updating
588 """Return branch name and close flag for rev, using and updating
589 persistent cache."""
589 persistent cache."""
590 changelog = self._repo.changelog
590 changelog = self._repo.changelog
591 rbcrevidx = rev * _rbcrecsize
591 rbcrevidx = rev * _rbcrecsize
592
592
593 # avoid negative index, changelog.read(nullrev) is fast without cache
593 # avoid negative index, changelog.read(nullrev) is fast without cache
594 if rev == nullrev:
594 if rev == nullrev:
595 return changelog.branchinfo(rev)
595 return changelog.branchinfo(rev)
596
596
597 # if requested rev isn't allocated, grow and cache the rev info
597 # if requested rev isn't allocated, grow and cache the rev info
598 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
598 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
599 return self._branchinfo(rev)
599 return self._branchinfo(rev)
600
600
601 # fast path: extract data from cache, use it if node is matching
601 # fast path: extract data from cache, use it if node is matching
602 reponode = changelog.node(rev)[:_rbcnodelen]
602 reponode = changelog.node(rev)[:_rbcnodelen]
603 cachenode, branchidx = unpack_from(
603 cachenode, branchidx = unpack_from(
604 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx
604 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx
605 )
605 )
606 close = bool(branchidx & _rbccloseflag)
606 close = bool(branchidx & _rbccloseflag)
607 if close:
607 if close:
608 branchidx &= _rbcbranchidxmask
608 branchidx &= _rbcbranchidxmask
609 if cachenode == b'\0\0\0\0':
609 if cachenode == b'\0\0\0\0':
610 pass
610 pass
611 elif cachenode == reponode:
611 elif cachenode == reponode:
612 try:
612 try:
613 return self._names[branchidx], close
613 return self._names[branchidx], close
614 except IndexError:
614 except IndexError:
615 # recover from invalid reference to unknown branch
615 # recover from invalid reference to unknown branch
616 self._repo.ui.debug(
616 self._repo.ui.debug(
617 b"referenced branch names not found"
617 b"referenced branch names not found"
618 b" - rebuilding revision branch cache from scratch\n"
618 b" - rebuilding revision branch cache from scratch\n"
619 )
619 )
620 self._clear()
620 self._clear()
621 else:
621 else:
622 # rev/node map has changed, invalidate the cache from here up
622 # rev/node map has changed, invalidate the cache from here up
623 self._repo.ui.debug(
623 self._repo.ui.debug(
624 b"history modification detected - truncating "
624 b"history modification detected - truncating "
625 b"revision branch cache to revision %d\n" % rev
625 b"revision branch cache to revision %d\n" % rev
626 )
626 )
627 truncate = rbcrevidx + _rbcrecsize
627 truncate = rbcrevidx + _rbcrecsize
628 del self._rbcrevs[truncate:]
628 del self._rbcrevs[truncate:]
629 self._rbcrevslen = min(self._rbcrevslen, truncate)
629 self._rbcrevslen = min(self._rbcrevslen, truncate)
630
630
631 # fall back to slow path and make sure it will be written to disk
631 # fall back to slow path and make sure it will be written to disk
632 return self._branchinfo(rev)
632 return self._branchinfo(rev)
633
633
634 def _branchinfo(self, rev):
634 def _branchinfo(self, rev):
635 """Retrieve branch info from changelog and update _rbcrevs"""
635 """Retrieve branch info from changelog and update _rbcrevs"""
636 changelog = self._repo.changelog
636 changelog = self._repo.changelog
637 b, close = changelog.branchinfo(rev)
637 b, close = changelog.branchinfo(rev)
638 if b in self._namesreverse:
638 if b in self._namesreverse:
639 branchidx = self._namesreverse[b]
639 branchidx = self._namesreverse[b]
640 else:
640 else:
641 branchidx = len(self._names)
641 branchidx = len(self._names)
642 self._names.append(b)
642 self._names.append(b)
643 self._namesreverse[b] = branchidx
643 self._namesreverse[b] = branchidx
644 reponode = changelog.node(rev)
644 reponode = changelog.node(rev)
645 if close:
645 if close:
646 branchidx |= _rbccloseflag
646 branchidx |= _rbccloseflag
647 self._setcachedata(rev, reponode, branchidx)
647 self._setcachedata(rev, reponode, branchidx)
648 return b, close
648 return b, close
649
649
650 def setdata(self, branch, rev, node, close):
650 def setdata(self, branch, rev, node, close):
651 """add new data information to the cache"""
651 """add new data information to the cache"""
652 if branch in self._namesreverse:
652 if branch in self._namesreverse:
653 branchidx = self._namesreverse[branch]
653 branchidx = self._namesreverse[branch]
654 else:
654 else:
655 branchidx = len(self._names)
655 branchidx = len(self._names)
656 self._names.append(branch)
656 self._names.append(branch)
657 self._namesreverse[branch] = branchidx
657 self._namesreverse[branch] = branchidx
658 if close:
658 if close:
659 branchidx |= _rbccloseflag
659 branchidx |= _rbccloseflag
660 self._setcachedata(rev, node, branchidx)
660 self._setcachedata(rev, node, branchidx)
661 # If no cache data were readable (non exists, bad permission, etc)
661 # If no cache data were readable (non exists, bad permission, etc)
662 # the cache was bypassing itself by setting:
662 # the cache was bypassing itself by setting:
663 #
663 #
664 # self.branchinfo = self._branchinfo
664 # self.branchinfo = self._branchinfo
665 #
665 #
666 # Since we now have data in the cache, we need to drop this bypassing.
666 # Since we now have data in the cache, we need to drop this bypassing.
667 if 'branchinfo' in vars(self):
667 if 'branchinfo' in vars(self):
668 del self.branchinfo
668 del self.branchinfo
669
669
670 def _setcachedata(self, rev, node, branchidx):
670 def _setcachedata(self, rev, node, branchidx):
671 """Writes the node's branch data to the in-memory cache data."""
671 """Writes the node's branch data to the in-memory cache data."""
672 if rev == nullrev:
672 if rev == nullrev:
673 return
673 return
674 rbcrevidx = rev * _rbcrecsize
674 rbcrevidx = rev * _rbcrecsize
675 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
675 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
676 self._rbcrevs.extend(
676 self._rbcrevs.extend(
677 b'\0'
677 b'\0'
678 * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
678 * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
679 )
679 )
680 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
680 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
681 self._rbcrevslen = min(self._rbcrevslen, rev)
681 self._rbcrevslen = min(self._rbcrevslen, rev)
682
682
683 tr = self._repo.currenttransaction()
683 tr = self._repo.currenttransaction()
684 if tr:
684 if tr:
685 tr.addfinalize(b'write-revbranchcache', self.write)
685 tr.addfinalize(b'write-revbranchcache', self.write)
686
686
687 def write(self, tr=None):
687 def write(self, tr=None):
688 """Save branch cache if it is dirty."""
688 """Save branch cache if it is dirty."""
689 repo = self._repo
689 repo = self._repo
690 wlock = None
690 wlock = None
691 step = b''
691 step = b''
692 try:
692 try:
693 # write the new names
693 # write the new names
694 if self._rbcnamescount < len(self._names):
694 if self._rbcnamescount < len(self._names):
695 wlock = repo.wlock(wait=False)
695 wlock = repo.wlock(wait=False)
696 step = b' names'
696 step = b' names'
697 self._writenames(repo)
697 self._writenames(repo)
698
698
699 # write the new revs
699 # write the new revs
700 start = self._rbcrevslen * _rbcrecsize
700 start = self._rbcrevslen * _rbcrecsize
701 if start != len(self._rbcrevs):
701 if start != len(self._rbcrevs):
702 step = b''
702 step = b''
703 if wlock is None:
703 if wlock is None:
704 wlock = repo.wlock(wait=False)
704 wlock = repo.wlock(wait=False)
705 self._writerevs(repo, start)
705 self._writerevs(repo, start)
706
706
707 except (IOError, OSError, error.Abort, error.LockError) as inst:
707 except (IOError, OSError, error.Abort, error.LockError) as inst:
708 repo.ui.debug(
708 repo.ui.debug(
709 b"couldn't write revision branch cache%s: %s\n"
709 b"couldn't write revision branch cache%s: %s\n"
710 % (step, stringutil.forcebytestr(inst))
710 % (step, stringutil.forcebytestr(inst))
711 )
711 )
712 finally:
712 finally:
713 if wlock is not None:
713 if wlock is not None:
714 wlock.release()
714 wlock.release()
715
715
716 def _writenames(self, repo):
716 def _writenames(self, repo):
717 """ write the new branch names to revbranchcache """
717 """ write the new branch names to revbranchcache """
718 if self._rbcnamescount != 0:
718 if self._rbcnamescount != 0:
719 f = repo.cachevfs.open(_rbcnames, b'ab')
719 f = repo.cachevfs.open(_rbcnames, b'ab')
720 if f.tell() == self._rbcsnameslen:
720 if f.tell() == self._rbcsnameslen:
721 f.write(b'\0')
721 f.write(b'\0')
722 else:
722 else:
723 f.close()
723 f.close()
724 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
724 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
725 self._rbcnamescount = 0
725 self._rbcnamescount = 0
726 self._rbcrevslen = 0
726 self._rbcrevslen = 0
727 if self._rbcnamescount == 0:
727 if self._rbcnamescount == 0:
728 # before rewriting names, make sure references are removed
728 # before rewriting names, make sure references are removed
729 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
729 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
730 f = repo.cachevfs.open(_rbcnames, b'wb')
730 f = repo.cachevfs.open(_rbcnames, b'wb')
731 f.write(
731 f.write(
732 b'\0'.join(
732 b'\0'.join(
733 encoding.fromlocal(b)
733 encoding.fromlocal(b)
734 for b in self._names[self._rbcnamescount :]
734 for b in self._names[self._rbcnamescount :]
735 )
735 )
736 )
736 )
737 self._rbcsnameslen = f.tell()
737 self._rbcsnameslen = f.tell()
738 f.close()
738 f.close()
739 self._rbcnamescount = len(self._names)
739 self._rbcnamescount = len(self._names)
740
740
741 def _writerevs(self, repo, start):
741 def _writerevs(self, repo, start):
742 """ write the new revs to revbranchcache """
742 """ write the new revs to revbranchcache """
743 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
743 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
744 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
744 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
745 if f.tell() != start:
745 if f.tell() != start:
746 repo.ui.debug(
746 repo.ui.debug(
747 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
747 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
748 )
748 )
749 f.seek(start)
749 f.seek(start)
750 if f.tell() != start:
750 if f.tell() != start:
751 start = 0
751 start = 0
752 f.seek(start)
752 f.seek(start)
753 f.truncate()
753 f.truncate()
754 end = revs * _rbcrecsize
754 end = revs * _rbcrecsize
755 f.write(self._rbcrevs[start:end])
755 f.write(self._rbcrevs[start:end])
756 self._rbcrevslen = revs
756 self._rbcrevslen = revs
General Comments 0
You need to be logged in to leave comments. Login now