##// END OF EJS Templates
branchmap: remove superfluous pass statements
Augie Fackler -
r34369:d0db41af default
parent child Browse files
Show More
@@ -1,523 +1,522 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 scmutil,
21 scmutil,
22 util,
22 util,
23 )
23 )
24
24
25 calcsize = struct.calcsize
25 calcsize = struct.calcsize
26 pack_into = struct.pack_into
26 pack_into = struct.pack_into
27 unpack_from = struct.unpack_from
27 unpack_from = struct.unpack_from
28
28
29 def _filename(repo):
29 def _filename(repo):
30 """name of a branchcache file for a given repo or repoview"""
30 """name of a branchcache file for a given repo or repoview"""
31 filename = "branch2"
31 filename = "branch2"
32 if repo.filtername:
32 if repo.filtername:
33 filename = '%s-%s' % (filename, repo.filtername)
33 filename = '%s-%s' % (filename, repo.filtername)
34 return filename
34 return filename
35
35
36 def read(repo):
36 def read(repo):
37 try:
37 try:
38 f = repo.cachevfs(_filename(repo))
38 f = repo.cachevfs(_filename(repo))
39 lines = f.read().split('\n')
39 lines = f.read().split('\n')
40 f.close()
40 f.close()
41 except (IOError, OSError):
41 except (IOError, OSError):
42 return None
42 return None
43
43
44 try:
44 try:
45 cachekey = lines.pop(0).split(" ", 2)
45 cachekey = lines.pop(0).split(" ", 2)
46 last, lrev = cachekey[:2]
46 last, lrev = cachekey[:2]
47 last, lrev = bin(last), int(lrev)
47 last, lrev = bin(last), int(lrev)
48 filteredhash = None
48 filteredhash = None
49 if len(cachekey) > 2:
49 if len(cachekey) > 2:
50 filteredhash = bin(cachekey[2])
50 filteredhash = bin(cachekey[2])
51 partial = branchcache(tipnode=last, tiprev=lrev,
51 partial = branchcache(tipnode=last, tiprev=lrev,
52 filteredhash=filteredhash)
52 filteredhash=filteredhash)
53 if not partial.validfor(repo):
53 if not partial.validfor(repo):
54 # invalidate the cache
54 # invalidate the cache
55 raise ValueError('tip differs')
55 raise ValueError('tip differs')
56 cl = repo.changelog
56 cl = repo.changelog
57 for l in lines:
57 for l in lines:
58 if not l:
58 if not l:
59 continue
59 continue
60 node, state, label = l.split(" ", 2)
60 node, state, label = l.split(" ", 2)
61 if state not in 'oc':
61 if state not in 'oc':
62 raise ValueError('invalid branch state')
62 raise ValueError('invalid branch state')
63 label = encoding.tolocal(label.strip())
63 label = encoding.tolocal(label.strip())
64 node = bin(node)
64 node = bin(node)
65 if not cl.hasnode(node):
65 if not cl.hasnode(node):
66 raise ValueError('node %s does not exist' % hex(node))
66 raise ValueError('node %s does not exist' % hex(node))
67 partial.setdefault(label, []).append(node)
67 partial.setdefault(label, []).append(node)
68 if state == 'c':
68 if state == 'c':
69 partial._closednodes.add(node)
69 partial._closednodes.add(node)
70 except Exception as inst:
70 except Exception as inst:
71 if repo.ui.debugflag:
71 if repo.ui.debugflag:
72 msg = 'invalid branchheads cache'
72 msg = 'invalid branchheads cache'
73 if repo.filtername is not None:
73 if repo.filtername is not None:
74 msg += ' (%s)' % repo.filtername
74 msg += ' (%s)' % repo.filtername
75 msg += ': %s\n'
75 msg += ': %s\n'
76 repo.ui.debug(msg % inst)
76 repo.ui.debug(msg % inst)
77 partial = None
77 partial = None
78 return partial
78 return partial
79
79
80 ### Nearest subset relation
80 ### Nearest subset relation
81 # Nearest subset of filter X is a filter Y so that:
81 # Nearest subset of filter X is a filter Y so that:
82 # * Y is included in X,
82 # * Y is included in X,
83 # * X - Y is as small as possible.
83 # * X - Y is as small as possible.
84 # This create and ordering used for branchmap purpose.
84 # This create and ordering used for branchmap purpose.
85 # the ordering may be partial
85 # the ordering may be partial
86 subsettable = {None: 'visible',
86 subsettable = {None: 'visible',
87 'visible': 'served',
87 'visible': 'served',
88 'served': 'immutable',
88 'served': 'immutable',
89 'immutable': 'base'}
89 'immutable': 'base'}
90
90
91 def updatecache(repo):
91 def updatecache(repo):
92 cl = repo.changelog
92 cl = repo.changelog
93 filtername = repo.filtername
93 filtername = repo.filtername
94 partial = repo._branchcaches.get(filtername)
94 partial = repo._branchcaches.get(filtername)
95
95
96 revs = []
96 revs = []
97 if partial is None or not partial.validfor(repo):
97 if partial is None or not partial.validfor(repo):
98 partial = read(repo)
98 partial = read(repo)
99 if partial is None:
99 if partial is None:
100 subsetname = subsettable.get(filtername)
100 subsetname = subsettable.get(filtername)
101 if subsetname is None:
101 if subsetname is None:
102 partial = branchcache()
102 partial = branchcache()
103 else:
103 else:
104 subset = repo.filtered(subsetname)
104 subset = repo.filtered(subsetname)
105 partial = subset.branchmap().copy()
105 partial = subset.branchmap().copy()
106 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
106 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
107 revs.extend(r for r in extrarevs if r <= partial.tiprev)
107 revs.extend(r for r in extrarevs if r <= partial.tiprev)
108 revs.extend(cl.revs(start=partial.tiprev + 1))
108 revs.extend(cl.revs(start=partial.tiprev + 1))
109 if revs:
109 if revs:
110 partial.update(repo, revs)
110 partial.update(repo, revs)
111 partial.write(repo)
111 partial.write(repo)
112
112
113 assert partial.validfor(repo), filtername
113 assert partial.validfor(repo), filtername
114 repo._branchcaches[repo.filtername] = partial
114 repo._branchcaches[repo.filtername] = partial
115
115
116 def replacecache(repo, bm):
116 def replacecache(repo, bm):
117 """Replace the branchmap cache for a repo with a branch mapping.
117 """Replace the branchmap cache for a repo with a branch mapping.
118
118
119 This is likely only called during clone with a branch map from a remote.
119 This is likely only called during clone with a branch map from a remote.
120 """
120 """
121 rbheads = []
121 rbheads = []
122 closed = []
122 closed = []
123 for bheads in bm.itervalues():
123 for bheads in bm.itervalues():
124 rbheads.extend(bheads)
124 rbheads.extend(bheads)
125 for h in bheads:
125 for h in bheads:
126 r = repo.changelog.rev(h)
126 r = repo.changelog.rev(h)
127 b, c = repo.changelog.branchinfo(r)
127 b, c = repo.changelog.branchinfo(r)
128 if c:
128 if c:
129 closed.append(h)
129 closed.append(h)
130
130
131 if rbheads:
131 if rbheads:
132 rtiprev = max((int(repo.changelog.rev(node))
132 rtiprev = max((int(repo.changelog.rev(node))
133 for node in rbheads))
133 for node in rbheads))
134 cache = branchcache(bm,
134 cache = branchcache(bm,
135 repo[rtiprev].node(),
135 repo[rtiprev].node(),
136 rtiprev,
136 rtiprev,
137 closednodes=closed)
137 closednodes=closed)
138
138
139 # Try to stick it as low as possible
139 # Try to stick it as low as possible
140 # filter above served are unlikely to be fetch from a clone
140 # filter above served are unlikely to be fetch from a clone
141 for candidate in ('base', 'immutable', 'served'):
141 for candidate in ('base', 'immutable', 'served'):
142 rview = repo.filtered(candidate)
142 rview = repo.filtered(candidate)
143 if cache.validfor(rview):
143 if cache.validfor(rview):
144 repo._branchcaches[candidate] = cache
144 repo._branchcaches[candidate] = cache
145 cache.write(rview)
145 cache.write(rview)
146 break
146 break
147
147
148 class branchcache(dict):
148 class branchcache(dict):
149 """A dict like object that hold branches heads cache.
149 """A dict like object that hold branches heads cache.
150
150
151 This cache is used to avoid costly computations to determine all the
151 This cache is used to avoid costly computations to determine all the
152 branch heads of a repo.
152 branch heads of a repo.
153
153
154 The cache is serialized on disk in the following format:
154 The cache is serialized on disk in the following format:
155
155
156 <tip hex node> <tip rev number> [optional filtered repo hex hash]
156 <tip hex node> <tip rev number> [optional filtered repo hex hash]
157 <branch head hex node> <open/closed state> <branch name>
157 <branch head hex node> <open/closed state> <branch name>
158 <branch head hex node> <open/closed state> <branch name>
158 <branch head hex node> <open/closed state> <branch name>
159 ...
159 ...
160
160
161 The first line is used to check if the cache is still valid. If the
161 The first line is used to check if the cache is still valid. If the
162 branch cache is for a filtered repo view, an optional third hash is
162 branch cache is for a filtered repo view, an optional third hash is
163 included that hashes the hashes of all filtered revisions.
163 included that hashes the hashes of all filtered revisions.
164
164
165 The open/closed state is represented by a single letter 'o' or 'c'.
165 The open/closed state is represented by a single letter 'o' or 'c'.
166 This field can be used to avoid changelog reads when determining if a
166 This field can be used to avoid changelog reads when determining if a
167 branch head closes a branch or not.
167 branch head closes a branch or not.
168 """
168 """
169
169
170 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
170 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
171 filteredhash=None, closednodes=None):
171 filteredhash=None, closednodes=None):
172 super(branchcache, self).__init__(entries)
172 super(branchcache, self).__init__(entries)
173 self.tipnode = tipnode
173 self.tipnode = tipnode
174 self.tiprev = tiprev
174 self.tiprev = tiprev
175 self.filteredhash = filteredhash
175 self.filteredhash = filteredhash
176 # closednodes is a set of nodes that close their branch. If the branch
176 # closednodes is a set of nodes that close their branch. If the branch
177 # cache has been updated, it may contain nodes that are no longer
177 # cache has been updated, it may contain nodes that are no longer
178 # heads.
178 # heads.
179 if closednodes is None:
179 if closednodes is None:
180 self._closednodes = set()
180 self._closednodes = set()
181 else:
181 else:
182 self._closednodes = closednodes
182 self._closednodes = closednodes
183
183
184 def validfor(self, repo):
184 def validfor(self, repo):
185 """Is the cache content valid regarding a repo
185 """Is the cache content valid regarding a repo
186
186
187 - False when cached tipnode is unknown or if we detect a strip.
187 - False when cached tipnode is unknown or if we detect a strip.
188 - True when cache is up to date or a subset of current repo."""
188 - True when cache is up to date or a subset of current repo."""
189 try:
189 try:
190 return ((self.tipnode == repo.changelog.node(self.tiprev))
190 return ((self.tipnode == repo.changelog.node(self.tiprev))
191 and (self.filteredhash == \
191 and (self.filteredhash == \
192 scmutil.filteredhash(repo, self.tiprev)))
192 scmutil.filteredhash(repo, self.tiprev)))
193 except IndexError:
193 except IndexError:
194 return False
194 return False
195
195
196 def _branchtip(self, heads):
196 def _branchtip(self, heads):
197 '''Return tuple with last open head in heads and false,
197 '''Return tuple with last open head in heads and false,
198 otherwise return last closed head and true.'''
198 otherwise return last closed head and true.'''
199 tip = heads[-1]
199 tip = heads[-1]
200 closed = True
200 closed = True
201 for h in reversed(heads):
201 for h in reversed(heads):
202 if h not in self._closednodes:
202 if h not in self._closednodes:
203 tip = h
203 tip = h
204 closed = False
204 closed = False
205 break
205 break
206 return tip, closed
206 return tip, closed
207
207
208 def branchtip(self, branch):
208 def branchtip(self, branch):
209 '''Return the tipmost open head on branch head, otherwise return the
209 '''Return the tipmost open head on branch head, otherwise return the
210 tipmost closed head on branch.
210 tipmost closed head on branch.
211 Raise KeyError for unknown branch.'''
211 Raise KeyError for unknown branch.'''
212 return self._branchtip(self[branch])[0]
212 return self._branchtip(self[branch])[0]
213
213
214 def iteropen(self, nodes):
214 def iteropen(self, nodes):
215 return (n for n in nodes if n not in self._closednodes)
215 return (n for n in nodes if n not in self._closednodes)
216
216
217 def branchheads(self, branch, closed=False):
217 def branchheads(self, branch, closed=False):
218 heads = self[branch]
218 heads = self[branch]
219 if not closed:
219 if not closed:
220 heads = list(self.iteropen(heads))
220 heads = list(self.iteropen(heads))
221 return heads
221 return heads
222
222
223 def iterbranches(self):
223 def iterbranches(self):
224 for bn, heads in self.iteritems():
224 for bn, heads in self.iteritems():
225 yield (bn, heads) + self._branchtip(heads)
225 yield (bn, heads) + self._branchtip(heads)
226
226
227 def copy(self):
227 def copy(self):
228 """return an deep copy of the branchcache object"""
228 """return an deep copy of the branchcache object"""
229 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
229 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
230 self._closednodes)
230 self._closednodes)
231
231
232 def write(self, repo):
232 def write(self, repo):
233 try:
233 try:
234 f = repo.cachevfs(_filename(repo), "w", atomictemp=True)
234 f = repo.cachevfs(_filename(repo), "w", atomictemp=True)
235 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
235 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
236 if self.filteredhash is not None:
236 if self.filteredhash is not None:
237 cachekey.append(hex(self.filteredhash))
237 cachekey.append(hex(self.filteredhash))
238 f.write(" ".join(cachekey) + '\n')
238 f.write(" ".join(cachekey) + '\n')
239 nodecount = 0
239 nodecount = 0
240 for label, nodes in sorted(self.iteritems()):
240 for label, nodes in sorted(self.iteritems()):
241 for node in nodes:
241 for node in nodes:
242 nodecount += 1
242 nodecount += 1
243 if node in self._closednodes:
243 if node in self._closednodes:
244 state = 'c'
244 state = 'c'
245 else:
245 else:
246 state = 'o'
246 state = 'o'
247 f.write("%s %s %s\n" % (hex(node), state,
247 f.write("%s %s %s\n" % (hex(node), state,
248 encoding.fromlocal(label)))
248 encoding.fromlocal(label)))
249 f.close()
249 f.close()
250 repo.ui.log('branchcache',
250 repo.ui.log('branchcache',
251 'wrote %s branch cache with %d labels and %d nodes\n',
251 'wrote %s branch cache with %d labels and %d nodes\n',
252 repo.filtername, len(self), nodecount)
252 repo.filtername, len(self), nodecount)
253 except (IOError, OSError, error.Abort) as inst:
253 except (IOError, OSError, error.Abort) as inst:
254 # Abort may be raised by read only opener, so log and continue
254 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
255 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
255 # Abort may be raise by read only opener
256 pass
257
256
258 def update(self, repo, revgen):
257 def update(self, repo, revgen):
259 """Given a branchhead cache, self, that may have extra nodes or be
258 """Given a branchhead cache, self, that may have extra nodes or be
260 missing heads, and a generator of nodes that are strictly a superset of
259 missing heads, and a generator of nodes that are strictly a superset of
261 heads missing, this function updates self to be correct.
260 heads missing, this function updates self to be correct.
262 """
261 """
263 starttime = util.timer()
262 starttime = util.timer()
264 cl = repo.changelog
263 cl = repo.changelog
265 # collect new branch entries
264 # collect new branch entries
266 newbranches = {}
265 newbranches = {}
267 getbranchinfo = repo.revbranchcache().branchinfo
266 getbranchinfo = repo.revbranchcache().branchinfo
268 for r in revgen:
267 for r in revgen:
269 branch, closesbranch = getbranchinfo(r)
268 branch, closesbranch = getbranchinfo(r)
270 newbranches.setdefault(branch, []).append(r)
269 newbranches.setdefault(branch, []).append(r)
271 if closesbranch:
270 if closesbranch:
272 self._closednodes.add(cl.node(r))
271 self._closednodes.add(cl.node(r))
273
272
274 # fetch current topological heads to speed up filtering
273 # fetch current topological heads to speed up filtering
275 topoheads = set(cl.headrevs())
274 topoheads = set(cl.headrevs())
276
275
277 # if older branchheads are reachable from new ones, they aren't
276 # if older branchheads are reachable from new ones, they aren't
278 # really branchheads. Note checking parents is insufficient:
277 # really branchheads. Note checking parents is insufficient:
279 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
278 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
280 for branch, newheadrevs in newbranches.iteritems():
279 for branch, newheadrevs in newbranches.iteritems():
281 bheads = self.setdefault(branch, [])
280 bheads = self.setdefault(branch, [])
282 bheadset = set(cl.rev(node) for node in bheads)
281 bheadset = set(cl.rev(node) for node in bheads)
283
282
284 # This have been tested True on all internal usage of this function.
283 # This have been tested True on all internal usage of this function.
285 # run it again in case of doubt
284 # run it again in case of doubt
286 # assert not (set(bheadrevs) & set(newheadrevs))
285 # assert not (set(bheadrevs) & set(newheadrevs))
287 newheadrevs.sort()
286 newheadrevs.sort()
288 bheadset.update(newheadrevs)
287 bheadset.update(newheadrevs)
289
288
290 # This prunes out two kinds of heads - heads that are superseded by
289 # This prunes out two kinds of heads - heads that are superseded by
291 # a head in newheadrevs, and newheadrevs that are not heads because
290 # a head in newheadrevs, and newheadrevs that are not heads because
292 # an existing head is their descendant.
291 # an existing head is their descendant.
293 uncertain = bheadset - topoheads
292 uncertain = bheadset - topoheads
294 if uncertain:
293 if uncertain:
295 floorrev = min(uncertain)
294 floorrev = min(uncertain)
296 ancestors = set(cl.ancestors(newheadrevs, floorrev))
295 ancestors = set(cl.ancestors(newheadrevs, floorrev))
297 bheadset -= ancestors
296 bheadset -= ancestors
298 bheadrevs = sorted(bheadset)
297 bheadrevs = sorted(bheadset)
299 self[branch] = [cl.node(rev) for rev in bheadrevs]
298 self[branch] = [cl.node(rev) for rev in bheadrevs]
300 tiprev = bheadrevs[-1]
299 tiprev = bheadrevs[-1]
301 if tiprev > self.tiprev:
300 if tiprev > self.tiprev:
302 self.tipnode = cl.node(tiprev)
301 self.tipnode = cl.node(tiprev)
303 self.tiprev = tiprev
302 self.tiprev = tiprev
304
303
305 if not self.validfor(repo):
304 if not self.validfor(repo):
306 # cache key are not valid anymore
305 # cache key are not valid anymore
307 self.tipnode = nullid
306 self.tipnode = nullid
308 self.tiprev = nullrev
307 self.tiprev = nullrev
309 for heads in self.values():
308 for heads in self.values():
310 tiprev = max(cl.rev(node) for node in heads)
309 tiprev = max(cl.rev(node) for node in heads)
311 if tiprev > self.tiprev:
310 if tiprev > self.tiprev:
312 self.tipnode = cl.node(tiprev)
311 self.tipnode = cl.node(tiprev)
313 self.tiprev = tiprev
312 self.tiprev = tiprev
314 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
313 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
315
314
316 duration = util.timer() - starttime
315 duration = util.timer() - starttime
317 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
316 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
318 repo.filtername, duration)
317 repo.filtername, duration)
319
318
320 # Revision branch info cache
319 # Revision branch info cache
321
320
322 _rbcversion = '-v1'
321 _rbcversion = '-v1'
323 _rbcnames = 'rbc-names' + _rbcversion
322 _rbcnames = 'rbc-names' + _rbcversion
324 _rbcrevs = 'rbc-revs' + _rbcversion
323 _rbcrevs = 'rbc-revs' + _rbcversion
325 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
324 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
326 _rbcrecfmt = '>4sI'
325 _rbcrecfmt = '>4sI'
327 _rbcrecsize = calcsize(_rbcrecfmt)
326 _rbcrecsize = calcsize(_rbcrecfmt)
328 _rbcnodelen = 4
327 _rbcnodelen = 4
329 _rbcbranchidxmask = 0x7fffffff
328 _rbcbranchidxmask = 0x7fffffff
330 _rbccloseflag = 0x80000000
329 _rbccloseflag = 0x80000000
331
330
332 class revbranchcache(object):
331 class revbranchcache(object):
333 """Persistent cache, mapping from revision number to branch name and close.
332 """Persistent cache, mapping from revision number to branch name and close.
334 This is a low level cache, independent of filtering.
333 This is a low level cache, independent of filtering.
335
334
336 Branch names are stored in rbc-names in internal encoding separated by 0.
335 Branch names are stored in rbc-names in internal encoding separated by 0.
337 rbc-names is append-only, and each branch name is only stored once and will
336 rbc-names is append-only, and each branch name is only stored once and will
338 thus have a unique index.
337 thus have a unique index.
339
338
340 The branch info for each revision is stored in rbc-revs as constant size
339 The branch info for each revision is stored in rbc-revs as constant size
341 records. The whole file is read into memory, but it is only 'parsed' on
340 records. The whole file is read into memory, but it is only 'parsed' on
342 demand. The file is usually append-only but will be truncated if repo
341 demand. The file is usually append-only but will be truncated if repo
343 modification is detected.
342 modification is detected.
344 The record for each revision contains the first 4 bytes of the
343 The record for each revision contains the first 4 bytes of the
345 corresponding node hash, and the record is only used if it still matches.
344 corresponding node hash, and the record is only used if it still matches.
346 Even a completely trashed rbc-revs fill thus still give the right result
345 Even a completely trashed rbc-revs fill thus still give the right result
347 while converging towards full recovery ... assuming no incorrectly matching
346 while converging towards full recovery ... assuming no incorrectly matching
348 node hashes.
347 node hashes.
349 The record also contains 4 bytes where 31 bits contains the index of the
348 The record also contains 4 bytes where 31 bits contains the index of the
350 branch and the last bit indicate that it is a branch close commit.
349 branch and the last bit indicate that it is a branch close commit.
351 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
350 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
352 and will grow with it but be 1/8th of its size.
351 and will grow with it but be 1/8th of its size.
353 """
352 """
354
353
355 def __init__(self, repo, readonly=True):
354 def __init__(self, repo, readonly=True):
356 assert repo.filtername is None
355 assert repo.filtername is None
357 self._repo = repo
356 self._repo = repo
358 self._names = [] # branch names in local encoding with static index
357 self._names = [] # branch names in local encoding with static index
359 self._rbcrevs = bytearray()
358 self._rbcrevs = bytearray()
360 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
359 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
361 try:
360 try:
362 bndata = repo.cachevfs.read(_rbcnames)
361 bndata = repo.cachevfs.read(_rbcnames)
363 self._rbcsnameslen = len(bndata) # for verification before writing
362 self._rbcsnameslen = len(bndata) # for verification before writing
364 if bndata:
363 if bndata:
365 self._names = [encoding.tolocal(bn)
364 self._names = [encoding.tolocal(bn)
366 for bn in bndata.split('\0')]
365 for bn in bndata.split('\0')]
367 except (IOError, OSError):
366 except (IOError, OSError):
368 if readonly:
367 if readonly:
369 # don't try to use cache - fall back to the slow path
368 # don't try to use cache - fall back to the slow path
370 self.branchinfo = self._branchinfo
369 self.branchinfo = self._branchinfo
371
370
372 if self._names:
371 if self._names:
373 try:
372 try:
374 data = repo.cachevfs.read(_rbcrevs)
373 data = repo.cachevfs.read(_rbcrevs)
375 self._rbcrevs[:] = data
374 self._rbcrevs[:] = data
376 except (IOError, OSError) as inst:
375 except (IOError, OSError) as inst:
377 repo.ui.debug("couldn't read revision branch cache: %s\n" %
376 repo.ui.debug("couldn't read revision branch cache: %s\n" %
378 inst)
377 inst)
379 # remember number of good records on disk
378 # remember number of good records on disk
380 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
379 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
381 len(repo.changelog))
380 len(repo.changelog))
382 if self._rbcrevslen == 0:
381 if self._rbcrevslen == 0:
383 self._names = []
382 self._names = []
384 self._rbcnamescount = len(self._names) # number of names read at
383 self._rbcnamescount = len(self._names) # number of names read at
385 # _rbcsnameslen
384 # _rbcsnameslen
386 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
385 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
387
386
388 def _clear(self):
387 def _clear(self):
389 self._rbcsnameslen = 0
388 self._rbcsnameslen = 0
390 del self._names[:]
389 del self._names[:]
391 self._rbcnamescount = 0
390 self._rbcnamescount = 0
392 self._namesreverse.clear()
391 self._namesreverse.clear()
393 self._rbcrevslen = len(self._repo.changelog)
392 self._rbcrevslen = len(self._repo.changelog)
394 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
393 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
395
394
396 def branchinfo(self, rev):
395 def branchinfo(self, rev):
397 """Return branch name and close flag for rev, using and updating
396 """Return branch name and close flag for rev, using and updating
398 persistent cache."""
397 persistent cache."""
399 changelog = self._repo.changelog
398 changelog = self._repo.changelog
400 rbcrevidx = rev * _rbcrecsize
399 rbcrevidx = rev * _rbcrecsize
401
400
402 # avoid negative index, changelog.read(nullrev) is fast without cache
401 # avoid negative index, changelog.read(nullrev) is fast without cache
403 if rev == nullrev:
402 if rev == nullrev:
404 return changelog.branchinfo(rev)
403 return changelog.branchinfo(rev)
405
404
406 # if requested rev isn't allocated, grow and cache the rev info
405 # if requested rev isn't allocated, grow and cache the rev info
407 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
406 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
408 return self._branchinfo(rev)
407 return self._branchinfo(rev)
409
408
410 # fast path: extract data from cache, use it if node is matching
409 # fast path: extract data from cache, use it if node is matching
411 reponode = changelog.node(rev)[:_rbcnodelen]
410 reponode = changelog.node(rev)[:_rbcnodelen]
412 cachenode, branchidx = unpack_from(
411 cachenode, branchidx = unpack_from(
413 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
412 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
414 close = bool(branchidx & _rbccloseflag)
413 close = bool(branchidx & _rbccloseflag)
415 if close:
414 if close:
416 branchidx &= _rbcbranchidxmask
415 branchidx &= _rbcbranchidxmask
417 if cachenode == '\0\0\0\0':
416 if cachenode == '\0\0\0\0':
418 pass
417 pass
419 elif cachenode == reponode:
418 elif cachenode == reponode:
420 try:
419 try:
421 return self._names[branchidx], close
420 return self._names[branchidx], close
422 except IndexError:
421 except IndexError:
423 # recover from invalid reference to unknown branch
422 # recover from invalid reference to unknown branch
424 self._repo.ui.debug("referenced branch names not found"
423 self._repo.ui.debug("referenced branch names not found"
425 " - rebuilding revision branch cache from scratch\n")
424 " - rebuilding revision branch cache from scratch\n")
426 self._clear()
425 self._clear()
427 else:
426 else:
428 # rev/node map has changed, invalidate the cache from here up
427 # rev/node map has changed, invalidate the cache from here up
429 self._repo.ui.debug("history modification detected - truncating "
428 self._repo.ui.debug("history modification detected - truncating "
430 "revision branch cache to revision %d\n" % rev)
429 "revision branch cache to revision %d\n" % rev)
431 truncate = rbcrevidx + _rbcrecsize
430 truncate = rbcrevidx + _rbcrecsize
432 del self._rbcrevs[truncate:]
431 del self._rbcrevs[truncate:]
433 self._rbcrevslen = min(self._rbcrevslen, truncate)
432 self._rbcrevslen = min(self._rbcrevslen, truncate)
434
433
435 # fall back to slow path and make sure it will be written to disk
434 # fall back to slow path and make sure it will be written to disk
436 return self._branchinfo(rev)
435 return self._branchinfo(rev)
437
436
438 def _branchinfo(self, rev):
437 def _branchinfo(self, rev):
439 """Retrieve branch info from changelog and update _rbcrevs"""
438 """Retrieve branch info from changelog and update _rbcrevs"""
440 changelog = self._repo.changelog
439 changelog = self._repo.changelog
441 b, close = changelog.branchinfo(rev)
440 b, close = changelog.branchinfo(rev)
442 if b in self._namesreverse:
441 if b in self._namesreverse:
443 branchidx = self._namesreverse[b]
442 branchidx = self._namesreverse[b]
444 else:
443 else:
445 branchidx = len(self._names)
444 branchidx = len(self._names)
446 self._names.append(b)
445 self._names.append(b)
447 self._namesreverse[b] = branchidx
446 self._namesreverse[b] = branchidx
448 reponode = changelog.node(rev)
447 reponode = changelog.node(rev)
449 if close:
448 if close:
450 branchidx |= _rbccloseflag
449 branchidx |= _rbccloseflag
451 self._setcachedata(rev, reponode, branchidx)
450 self._setcachedata(rev, reponode, branchidx)
452 return b, close
451 return b, close
453
452
454 def _setcachedata(self, rev, node, branchidx):
453 def _setcachedata(self, rev, node, branchidx):
455 """Writes the node's branch data to the in-memory cache data."""
454 """Writes the node's branch data to the in-memory cache data."""
456 if rev == nullrev:
455 if rev == nullrev:
457 return
456 return
458 rbcrevidx = rev * _rbcrecsize
457 rbcrevidx = rev * _rbcrecsize
459 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
458 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
460 self._rbcrevs.extend('\0' *
459 self._rbcrevs.extend('\0' *
461 (len(self._repo.changelog) * _rbcrecsize -
460 (len(self._repo.changelog) * _rbcrecsize -
462 len(self._rbcrevs)))
461 len(self._rbcrevs)))
463 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
462 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
464 self._rbcrevslen = min(self._rbcrevslen, rev)
463 self._rbcrevslen = min(self._rbcrevslen, rev)
465
464
466 tr = self._repo.currenttransaction()
465 tr = self._repo.currenttransaction()
467 if tr:
466 if tr:
468 tr.addfinalize('write-revbranchcache', self.write)
467 tr.addfinalize('write-revbranchcache', self.write)
469
468
470 def write(self, tr=None):
469 def write(self, tr=None):
471 """Save branch cache if it is dirty."""
470 """Save branch cache if it is dirty."""
472 repo = self._repo
471 repo = self._repo
473 wlock = None
472 wlock = None
474 step = ''
473 step = ''
475 try:
474 try:
476 if self._rbcnamescount < len(self._names):
475 if self._rbcnamescount < len(self._names):
477 step = ' names'
476 step = ' names'
478 wlock = repo.wlock(wait=False)
477 wlock = repo.wlock(wait=False)
479 if self._rbcnamescount != 0:
478 if self._rbcnamescount != 0:
480 f = repo.cachevfs.open(_rbcnames, 'ab')
479 f = repo.cachevfs.open(_rbcnames, 'ab')
481 if f.tell() == self._rbcsnameslen:
480 if f.tell() == self._rbcsnameslen:
482 f.write('\0')
481 f.write('\0')
483 else:
482 else:
484 f.close()
483 f.close()
485 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
484 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
486 self._rbcnamescount = 0
485 self._rbcnamescount = 0
487 self._rbcrevslen = 0
486 self._rbcrevslen = 0
488 if self._rbcnamescount == 0:
487 if self._rbcnamescount == 0:
489 # before rewriting names, make sure references are removed
488 # before rewriting names, make sure references are removed
490 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
489 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
491 f = repo.cachevfs.open(_rbcnames, 'wb')
490 f = repo.cachevfs.open(_rbcnames, 'wb')
492 f.write('\0'.join(encoding.fromlocal(b)
491 f.write('\0'.join(encoding.fromlocal(b)
493 for b in self._names[self._rbcnamescount:]))
492 for b in self._names[self._rbcnamescount:]))
494 self._rbcsnameslen = f.tell()
493 self._rbcsnameslen = f.tell()
495 f.close()
494 f.close()
496 self._rbcnamescount = len(self._names)
495 self._rbcnamescount = len(self._names)
497
496
498 start = self._rbcrevslen * _rbcrecsize
497 start = self._rbcrevslen * _rbcrecsize
499 if start != len(self._rbcrevs):
498 if start != len(self._rbcrevs):
500 step = ''
499 step = ''
501 if wlock is None:
500 if wlock is None:
502 wlock = repo.wlock(wait=False)
501 wlock = repo.wlock(wait=False)
503 revs = min(len(repo.changelog),
502 revs = min(len(repo.changelog),
504 len(self._rbcrevs) // _rbcrecsize)
503 len(self._rbcrevs) // _rbcrecsize)
505 f = repo.cachevfs.open(_rbcrevs, 'ab')
504 f = repo.cachevfs.open(_rbcrevs, 'ab')
506 if f.tell() != start:
505 if f.tell() != start:
507 repo.ui.debug("truncating cache/%s to %d\n"
506 repo.ui.debug("truncating cache/%s to %d\n"
508 % (_rbcrevs, start))
507 % (_rbcrevs, start))
509 f.seek(start)
508 f.seek(start)
510 if f.tell() != start:
509 if f.tell() != start:
511 start = 0
510 start = 0
512 f.seek(start)
511 f.seek(start)
513 f.truncate()
512 f.truncate()
514 end = revs * _rbcrecsize
513 end = revs * _rbcrecsize
515 f.write(self._rbcrevs[start:end])
514 f.write(self._rbcrevs[start:end])
516 f.close()
515 f.close()
517 self._rbcrevslen = revs
516 self._rbcrevslen = revs
518 except (IOError, OSError, error.Abort, error.LockError) as inst:
517 except (IOError, OSError, error.Abort, error.LockError) as inst:
519 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
518 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
520 % (step, inst))
519 % (step, inst))
521 finally:
520 finally:
522 if wlock is not None:
521 if wlock is not None:
523 wlock.release()
522 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now