##// END OF EJS Templates
branchcache: introduce revbranchcache for caching of revision branch names...
Mads Kiilerich -
r23785:cb99bacb default
parent child Browse files
Show More
@@ -1,287 +1,436 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11 import time
11 import time
12 from array import array
13 from struct import calcsize, pack, unpack
12
14
13 def _filename(repo):
15 def _filename(repo):
14 """name of a branchcache file for a given repo or repoview"""
16 """name of a branchcache file for a given repo or repoview"""
15 filename = "cache/branch2"
17 filename = "cache/branch2"
16 if repo.filtername:
18 if repo.filtername:
17 filename = '%s-%s' % (filename, repo.filtername)
19 filename = '%s-%s' % (filename, repo.filtername)
18 return filename
20 return filename
19
21
20 def read(repo):
22 def read(repo):
21 try:
23 try:
22 f = repo.opener(_filename(repo))
24 f = repo.opener(_filename(repo))
23 lines = f.read().split('\n')
25 lines = f.read().split('\n')
24 f.close()
26 f.close()
25 except (IOError, OSError):
27 except (IOError, OSError):
26 return None
28 return None
27
29
28 try:
30 try:
29 cachekey = lines.pop(0).split(" ", 2)
31 cachekey = lines.pop(0).split(" ", 2)
30 last, lrev = cachekey[:2]
32 last, lrev = cachekey[:2]
31 last, lrev = bin(last), int(lrev)
33 last, lrev = bin(last), int(lrev)
32 filteredhash = None
34 filteredhash = None
33 if len(cachekey) > 2:
35 if len(cachekey) > 2:
34 filteredhash = bin(cachekey[2])
36 filteredhash = bin(cachekey[2])
35 partial = branchcache(tipnode=last, tiprev=lrev,
37 partial = branchcache(tipnode=last, tiprev=lrev,
36 filteredhash=filteredhash)
38 filteredhash=filteredhash)
37 if not partial.validfor(repo):
39 if not partial.validfor(repo):
38 # invalidate the cache
40 # invalidate the cache
39 raise ValueError('tip differs')
41 raise ValueError('tip differs')
40 for l in lines:
42 for l in lines:
41 if not l:
43 if not l:
42 continue
44 continue
43 node, state, label = l.split(" ", 2)
45 node, state, label = l.split(" ", 2)
44 if state not in 'oc':
46 if state not in 'oc':
45 raise ValueError('invalid branch state')
47 raise ValueError('invalid branch state')
46 label = encoding.tolocal(label.strip())
48 label = encoding.tolocal(label.strip())
47 if not node in repo:
49 if not node in repo:
48 raise ValueError('node %s does not exist' % node)
50 raise ValueError('node %s does not exist' % node)
49 node = bin(node)
51 node = bin(node)
50 partial.setdefault(label, []).append(node)
52 partial.setdefault(label, []).append(node)
51 if state == 'c':
53 if state == 'c':
52 partial._closednodes.add(node)
54 partial._closednodes.add(node)
53 except KeyboardInterrupt:
55 except KeyboardInterrupt:
54 raise
56 raise
55 except Exception, inst:
57 except Exception, inst:
56 if repo.ui.debugflag:
58 if repo.ui.debugflag:
57 msg = 'invalid branchheads cache'
59 msg = 'invalid branchheads cache'
58 if repo.filtername is not None:
60 if repo.filtername is not None:
59 msg += ' (%s)' % repo.filtername
61 msg += ' (%s)' % repo.filtername
60 msg += ': %s\n'
62 msg += ': %s\n'
61 repo.ui.debug(msg % inst)
63 repo.ui.debug(msg % inst)
62 partial = None
64 partial = None
63 return partial
65 return partial
64
66
65 ### Nearest subset relation
67 ### Nearest subset relation
66 # Nearest subset of filter X is a filter Y so that:
68 # Nearest subset of filter X is a filter Y so that:
67 # * Y is included in X,
69 # * Y is included in X,
68 # * X - Y is as small as possible.
70 # * X - Y is as small as possible.
69 # This create and ordering used for branchmap purpose.
71 # This create and ordering used for branchmap purpose.
70 # the ordering may be partial
72 # the ordering may be partial
71 subsettable = {None: 'visible',
73 subsettable = {None: 'visible',
72 'visible': 'served',
74 'visible': 'served',
73 'served': 'immutable',
75 'served': 'immutable',
74 'immutable': 'base'}
76 'immutable': 'base'}
75
77
76 def updatecache(repo):
78 def updatecache(repo):
77 cl = repo.changelog
79 cl = repo.changelog
78 filtername = repo.filtername
80 filtername = repo.filtername
79 partial = repo._branchcaches.get(filtername)
81 partial = repo._branchcaches.get(filtername)
80
82
81 revs = []
83 revs = []
82 if partial is None or not partial.validfor(repo):
84 if partial is None or not partial.validfor(repo):
83 partial = read(repo)
85 partial = read(repo)
84 if partial is None:
86 if partial is None:
85 subsetname = subsettable.get(filtername)
87 subsetname = subsettable.get(filtername)
86 if subsetname is None:
88 if subsetname is None:
87 partial = branchcache()
89 partial = branchcache()
88 else:
90 else:
89 subset = repo.filtered(subsetname)
91 subset = repo.filtered(subsetname)
90 partial = subset.branchmap().copy()
92 partial = subset.branchmap().copy()
91 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
92 revs.extend(r for r in extrarevs if r <= partial.tiprev)
94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
93 revs.extend(cl.revs(start=partial.tiprev + 1))
95 revs.extend(cl.revs(start=partial.tiprev + 1))
94 if revs:
96 if revs:
95 partial.update(repo, revs)
97 partial.update(repo, revs)
96 partial.write(repo)
98 partial.write(repo)
97 assert partial.validfor(repo), filtername
99 assert partial.validfor(repo), filtername
98 repo._branchcaches[repo.filtername] = partial
100 repo._branchcaches[repo.filtername] = partial
99
101
100 class branchcache(dict):
102 class branchcache(dict):
101 """A dict like object that hold branches heads cache.
103 """A dict like object that hold branches heads cache.
102
104
103 This cache is used to avoid costly computations to determine all the
105 This cache is used to avoid costly computations to determine all the
104 branch heads of a repo.
106 branch heads of a repo.
105
107
106 The cache is serialized on disk in the following format:
108 The cache is serialized on disk in the following format:
107
109
108 <tip hex node> <tip rev number> [optional filtered repo hex hash]
110 <tip hex node> <tip rev number> [optional filtered repo hex hash]
109 <branch head hex node> <open/closed state> <branch name>
111 <branch head hex node> <open/closed state> <branch name>
110 <branch head hex node> <open/closed state> <branch name>
112 <branch head hex node> <open/closed state> <branch name>
111 ...
113 ...
112
114
113 The first line is used to check if the cache is still valid. If the
115 The first line is used to check if the cache is still valid. If the
114 branch cache is for a filtered repo view, an optional third hash is
116 branch cache is for a filtered repo view, an optional third hash is
115 included that hashes the hashes of all filtered revisions.
117 included that hashes the hashes of all filtered revisions.
116
118
117 The open/closed state is represented by a single letter 'o' or 'c'.
119 The open/closed state is represented by a single letter 'o' or 'c'.
118 This field can be used to avoid changelog reads when determining if a
120 This field can be used to avoid changelog reads when determining if a
119 branch head closes a branch or not.
121 branch head closes a branch or not.
120 """
122 """
121
123
122 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
124 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
123 filteredhash=None, closednodes=None):
125 filteredhash=None, closednodes=None):
124 super(branchcache, self).__init__(entries)
126 super(branchcache, self).__init__(entries)
125 self.tipnode = tipnode
127 self.tipnode = tipnode
126 self.tiprev = tiprev
128 self.tiprev = tiprev
127 self.filteredhash = filteredhash
129 self.filteredhash = filteredhash
128 # closednodes is a set of nodes that close their branch. If the branch
130 # closednodes is a set of nodes that close their branch. If the branch
129 # cache has been updated, it may contain nodes that are no longer
131 # cache has been updated, it may contain nodes that are no longer
130 # heads.
132 # heads.
131 if closednodes is None:
133 if closednodes is None:
132 self._closednodes = set()
134 self._closednodes = set()
133 else:
135 else:
134 self._closednodes = closednodes
136 self._closednodes = closednodes
135
137
136 def _hashfiltered(self, repo):
138 def _hashfiltered(self, repo):
137 """build hash of revision filtered in the current cache
139 """build hash of revision filtered in the current cache
138
140
139 Tracking tipnode and tiprev is not enough to ensure validity of the
141 Tracking tipnode and tiprev is not enough to ensure validity of the
140 cache as they do not help to distinct cache that ignored various
142 cache as they do not help to distinct cache that ignored various
141 revision bellow tiprev.
143 revision bellow tiprev.
142
144
143 To detect such difference, we build a cache of all ignored revisions.
145 To detect such difference, we build a cache of all ignored revisions.
144 """
146 """
145 cl = repo.changelog
147 cl = repo.changelog
146 if not cl.filteredrevs:
148 if not cl.filteredrevs:
147 return None
149 return None
148 key = None
150 key = None
149 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
151 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
150 if revs:
152 if revs:
151 s = util.sha1()
153 s = util.sha1()
152 for rev in revs:
154 for rev in revs:
153 s.update('%s;' % rev)
155 s.update('%s;' % rev)
154 key = s.digest()
156 key = s.digest()
155 return key
157 return key
156
158
157 def validfor(self, repo):
159 def validfor(self, repo):
158 """Is the cache content valid regarding a repo
160 """Is the cache content valid regarding a repo
159
161
160 - False when cached tipnode is unknown or if we detect a strip.
162 - False when cached tipnode is unknown or if we detect a strip.
161 - True when cache is up to date or a subset of current repo."""
163 - True when cache is up to date or a subset of current repo."""
162 try:
164 try:
163 return ((self.tipnode == repo.changelog.node(self.tiprev))
165 return ((self.tipnode == repo.changelog.node(self.tiprev))
164 and (self.filteredhash == self._hashfiltered(repo)))
166 and (self.filteredhash == self._hashfiltered(repo)))
165 except IndexError:
167 except IndexError:
166 return False
168 return False
167
169
168 def _branchtip(self, heads):
170 def _branchtip(self, heads):
169 '''Return tuple with last open head in heads and false,
171 '''Return tuple with last open head in heads and false,
170 otherwise return last closed head and true.'''
172 otherwise return last closed head and true.'''
171 tip = heads[-1]
173 tip = heads[-1]
172 closed = True
174 closed = True
173 for h in reversed(heads):
175 for h in reversed(heads):
174 if h not in self._closednodes:
176 if h not in self._closednodes:
175 tip = h
177 tip = h
176 closed = False
178 closed = False
177 break
179 break
178 return tip, closed
180 return tip, closed
179
181
180 def branchtip(self, branch):
182 def branchtip(self, branch):
181 '''Return the tipmost open head on branch head, otherwise return the
183 '''Return the tipmost open head on branch head, otherwise return the
182 tipmost closed head on branch.
184 tipmost closed head on branch.
183 Raise KeyError for unknown branch.'''
185 Raise KeyError for unknown branch.'''
184 return self._branchtip(self[branch])[0]
186 return self._branchtip(self[branch])[0]
185
187
186 def branchheads(self, branch, closed=False):
188 def branchheads(self, branch, closed=False):
187 heads = self[branch]
189 heads = self[branch]
188 if not closed:
190 if not closed:
189 heads = [h for h in heads if h not in self._closednodes]
191 heads = [h for h in heads if h not in self._closednodes]
190 return heads
192 return heads
191
193
192 def iterbranches(self):
194 def iterbranches(self):
193 for bn, heads in self.iteritems():
195 for bn, heads in self.iteritems():
194 yield (bn, heads) + self._branchtip(heads)
196 yield (bn, heads) + self._branchtip(heads)
195
197
196 def copy(self):
198 def copy(self):
197 """return an deep copy of the branchcache object"""
199 """return an deep copy of the branchcache object"""
198 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
200 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
199 self._closednodes)
201 self._closednodes)
200
202
201 def write(self, repo):
203 def write(self, repo):
202 try:
204 try:
203 f = repo.opener(_filename(repo), "w", atomictemp=True)
205 f = repo.opener(_filename(repo), "w", atomictemp=True)
204 cachekey = [hex(self.tipnode), str(self.tiprev)]
206 cachekey = [hex(self.tipnode), str(self.tiprev)]
205 if self.filteredhash is not None:
207 if self.filteredhash is not None:
206 cachekey.append(hex(self.filteredhash))
208 cachekey.append(hex(self.filteredhash))
207 f.write(" ".join(cachekey) + '\n')
209 f.write(" ".join(cachekey) + '\n')
208 nodecount = 0
210 nodecount = 0
209 for label, nodes in sorted(self.iteritems()):
211 for label, nodes in sorted(self.iteritems()):
210 for node in nodes:
212 for node in nodes:
211 nodecount += 1
213 nodecount += 1
212 if node in self._closednodes:
214 if node in self._closednodes:
213 state = 'c'
215 state = 'c'
214 else:
216 else:
215 state = 'o'
217 state = 'o'
216 f.write("%s %s %s\n" % (hex(node), state,
218 f.write("%s %s %s\n" % (hex(node), state,
217 encoding.fromlocal(label)))
219 encoding.fromlocal(label)))
218 f.close()
220 f.close()
219 repo.ui.log('branchcache',
221 repo.ui.log('branchcache',
220 'wrote %s branch cache with %d labels and %d nodes\n',
222 'wrote %s branch cache with %d labels and %d nodes\n',
221 repo.filtername, len(self), nodecount)
223 repo.filtername, len(self), nodecount)
222 except (IOError, OSError, util.Abort), inst:
224 except (IOError, OSError, util.Abort), inst:
223 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
225 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
224 # Abort may be raise by read only opener
226 # Abort may be raise by read only opener
225 pass
227 pass
226
228
227 def update(self, repo, revgen):
229 def update(self, repo, revgen):
228 """Given a branchhead cache, self, that may have extra nodes or be
230 """Given a branchhead cache, self, that may have extra nodes or be
229 missing heads, and a generator of nodes that are strictly a superset of
231 missing heads, and a generator of nodes that are strictly a superset of
230 heads missing, this function updates self to be correct.
232 heads missing, this function updates self to be correct.
231 """
233 """
232 starttime = time.time()
234 starttime = time.time()
233 cl = repo.changelog
235 cl = repo.changelog
234 # collect new branch entries
236 # collect new branch entries
235 newbranches = {}
237 newbranches = {}
236 getbranchinfo = cl.branchinfo
238 getbranchinfo = cl.branchinfo
237 for r in revgen:
239 for r in revgen:
238 branch, closesbranch = getbranchinfo(r)
240 branch, closesbranch = getbranchinfo(r)
239 newbranches.setdefault(branch, []).append(r)
241 newbranches.setdefault(branch, []).append(r)
240 if closesbranch:
242 if closesbranch:
241 self._closednodes.add(cl.node(r))
243 self._closednodes.add(cl.node(r))
242
244
243 # fetch current topological heads to speed up filtering
245 # fetch current topological heads to speed up filtering
244 topoheads = set(cl.headrevs())
246 topoheads = set(cl.headrevs())
245
247
246 # if older branchheads are reachable from new ones, they aren't
248 # if older branchheads are reachable from new ones, they aren't
247 # really branchheads. Note checking parents is insufficient:
249 # really branchheads. Note checking parents is insufficient:
248 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
250 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
249 for branch, newheadrevs in newbranches.iteritems():
251 for branch, newheadrevs in newbranches.iteritems():
250 bheads = self.setdefault(branch, [])
252 bheads = self.setdefault(branch, [])
251 bheadset = set(cl.rev(node) for node in bheads)
253 bheadset = set(cl.rev(node) for node in bheads)
252
254
253 # This have been tested True on all internal usage of this function.
255 # This have been tested True on all internal usage of this function.
254 # run it again in case of doubt
256 # run it again in case of doubt
255 # assert not (set(bheadrevs) & set(newheadrevs))
257 # assert not (set(bheadrevs) & set(newheadrevs))
256 newheadrevs.sort()
258 newheadrevs.sort()
257 bheadset.update(newheadrevs)
259 bheadset.update(newheadrevs)
258
260
259 # This prunes out two kinds of heads - heads that are superseded by
261 # This prunes out two kinds of heads - heads that are superseded by
260 # a head in newheadrevs, and newheadrevs that are not heads because
262 # a head in newheadrevs, and newheadrevs that are not heads because
261 # an existing head is their descendant.
263 # an existing head is their descendant.
262 uncertain = bheadset - topoheads
264 uncertain = bheadset - topoheads
263 if uncertain:
265 if uncertain:
264 floorrev = min(uncertain)
266 floorrev = min(uncertain)
265 ancestors = set(cl.ancestors(newheadrevs, floorrev))
267 ancestors = set(cl.ancestors(newheadrevs, floorrev))
266 bheadset -= ancestors
268 bheadset -= ancestors
267 bheadrevs = sorted(bheadset)
269 bheadrevs = sorted(bheadset)
268 self[branch] = [cl.node(rev) for rev in bheadrevs]
270 self[branch] = [cl.node(rev) for rev in bheadrevs]
269 tiprev = bheadrevs[-1]
271 tiprev = bheadrevs[-1]
270 if tiprev > self.tiprev:
272 if tiprev > self.tiprev:
271 self.tipnode = cl.node(tiprev)
273 self.tipnode = cl.node(tiprev)
272 self.tiprev = tiprev
274 self.tiprev = tiprev
273
275
274 if not self.validfor(repo):
276 if not self.validfor(repo):
275 # cache key are not valid anymore
277 # cache key are not valid anymore
276 self.tipnode = nullid
278 self.tipnode = nullid
277 self.tiprev = nullrev
279 self.tiprev = nullrev
278 for heads in self.values():
280 for heads in self.values():
279 tiprev = max(cl.rev(node) for node in heads)
281 tiprev = max(cl.rev(node) for node in heads)
280 if tiprev > self.tiprev:
282 if tiprev > self.tiprev:
281 self.tipnode = cl.node(tiprev)
283 self.tipnode = cl.node(tiprev)
282 self.tiprev = tiprev
284 self.tiprev = tiprev
283 self.filteredhash = self._hashfiltered(repo)
285 self.filteredhash = self._hashfiltered(repo)
284
286
285 duration = time.time() - starttime
287 duration = time.time() - starttime
286 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
288 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
287 repo.filtername, duration)
289 repo.filtername, duration)
290
291 # Revision branch info cache
292
293 _rbcversion = '-v1'
294 _rbcnames = 'cache/rbc-names' + _rbcversion
295 _rbcrevs = 'cache/rbc-revs' + _rbcversion
296 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
297 _rbcrecfmt = '>4sI'
298 _rbcrecsize = calcsize(_rbcrecfmt)
299 _rbcnodelen = 4
300 _rbcbranchidxmask = 0x7fffffff
301 _rbccloseflag = 0x80000000
302
303 class revbranchcache(object):
304 """Persistent cache, mapping from revision number to branch name and close.
305 This is a low level cache, independent of filtering.
306
307 Branch names are stored in rbc-names in internal encoding separated by 0.
308 rbc-names is append-only, and each branch name is only stored once and will
309 thus have a unique index.
310
311 The branch info for each revision is stored in rbc-revs as constant size
312 records. The whole file is read into memory, but it is only 'parsed' on
313 demand. The file is usually append-only but will be truncated if repo
314 modification is detected.
315 The record for each revision contains the first 4 bytes of the
316 corresponding node hash, and the record is only used if it still matches.
317 Even a completely trashed rbc-revs fill thus still give the right result
318 while converging towards full recovery ... assuming no incorrectly matching
319 node hashes.
320 The record also contains 4 bytes where 31 bits contains the index of the
321 branch and the last bit indicate that it is a branch close commit.
322 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
323 and will grow with it but be 1/8th of its size.
324 """
325
326 def __init__(self, repo):
327 assert repo.filtername is None
328 self._names = [] # branch names in local encoding with static index
329 self._rbcrevs = array('c') # structs of type _rbcrecfmt
330 self._rbcsnameslen = 0
331 try:
332 bndata = repo.vfs.read(_rbcnames)
333 self._rbcsnameslen = len(bndata) # for verification before writing
334 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
335 except (IOError, OSError), inst:
336 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
337 inst)
338 if self._names:
339 try:
340 data = repo.vfs.read(_rbcrevs)
341 self._rbcrevs.fromstring(data)
342 except (IOError, OSError), inst:
343 repo.ui.debug("couldn't read revision branch cache: %s\n" %
344 inst)
345 # remember number of good records on disk
346 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
347 len(repo.changelog))
348 if self._rbcrevslen == 0:
349 self._names = []
350 self._rbcnamescount = len(self._names) # number of good names on disk
351 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
352
353 def branchinfo(self, changelog, rev):
354 """Return branch name and close flag for rev, using and updating
355 persistent cache."""
356 rbcrevidx = rev * _rbcrecsize
357
358 # if requested rev is missing, add and populate all missing revs
359 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
360 first = len(self._rbcrevs) // _rbcrecsize
361 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
362 len(self._rbcrevs)))
363 for r in xrange(first, len(changelog)):
364 self._branchinfo(r)
365
366 # fast path: extract data from cache, use it if node is matching
367 reponode = changelog.node(rev)[:_rbcnodelen]
368 cachenode, branchidx = unpack(
369 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
370 close = bool(branchidx & _rbccloseflag)
371 if close:
372 branchidx &= _rbcbranchidxmask
373 if cachenode == reponode:
374 return self._names[branchidx], close
375 # fall back to slow path and make sure it will be written to disk
376 self._rbcrevslen = min(self._rbcrevslen, rev)
377 return self._branchinfo(rev)
378
379 def _branchinfo(self, changelog, rev):
380 """Retrieve branch info from changelog and update _rbcrevs"""
381 b, close = changelog.branchinfo(rev)
382 if b in self._namesreverse:
383 branchidx = self._namesreverse[b]
384 else:
385 branchidx = len(self._names)
386 self._names.append(b)
387 self._namesreverse[b] = branchidx
388 reponode = changelog.node(rev)
389 if close:
390 branchidx |= _rbccloseflag
391 rbcrevidx = rev * _rbcrecsize
392 rec = array('c')
393 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
394 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
395 return b, close
396
397 def write(self, repo):
398 """Save branch cache if it is dirty."""
399 if self._rbcnamescount < len(self._names):
400 try:
401 if self._rbcnamescount != 0:
402 f = repo.vfs.open(_rbcnames, 'ab')
403 if f.tell() == self._rbcsnameslen:
404 f.write('\0')
405 else:
406 f.close()
407 self._rbcnamescount = 0
408 self._rbcrevslen = 0
409 if self._rbcnamescount == 0:
410 f = repo.vfs.open(_rbcnames, 'wb')
411 f.write('\0'.join(encoding.fromlocal(b)
412 for b in self._names[self._rbcnamescount:]))
413 self._rbcsnameslen = f.tell()
414 f.close()
415 except (IOError, OSError, util.Abort), inst:
416 repo.ui.debug("couldn't write revision branch cache names: "
417 "%s\n" % inst)
418 return
419 self._rbcnamescount = len(self._names)
420
421 start = self._rbcrevslen * _rbcrecsize
422 if start != len(self._rbcrevs):
423 self._rbcrevslen = min(len(repo.changelog),
424 len(self._rbcrevs) // _rbcrecsize)
425 try:
426 f = repo.vfs.open(_rbcrevs, 'ab')
427 if f.tell() != start:
428 f.seek(start)
429 f.truncate()
430 end = self._rbcrevslen * _rbcrecsize
431 f.write(self._rbcrevs[start:end])
432 f.close()
433 except (IOError, OSError, util.Abort), inst:
434 repo.ui.debug("couldn't write revision branch cache: %s\n" %
435 inst)
436 return
General Comments 0
You need to be logged in to leave comments. Login now