##// END OF EJS Templates
branchmap: revert c34532365b38 for Python 2.7 compatibility...
Mike Hommey -
r33737:1814ca41 stable
parent child Browse files
Show More
@@ -1,519 +1,520 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 scmutil,
21 scmutil,
22 util,
22 util,
23 )
23 )
24
24
25 calcsize = struct.calcsize
25 calcsize = struct.calcsize
26 pack_into = struct.pack_into
26 pack_into = struct.pack_into
27 unpack_from = struct.unpack_from
27 unpack_from = struct.unpack_from
28
28
29 def _filename(repo):
29 def _filename(repo):
30 """name of a branchcache file for a given repo or repoview"""
30 """name of a branchcache file for a given repo or repoview"""
31 filename = "branch2"
31 filename = "branch2"
32 if repo.filtername:
32 if repo.filtername:
33 filename = '%s-%s' % (filename, repo.filtername)
33 filename = '%s-%s' % (filename, repo.filtername)
34 return filename
34 return filename
35
35
36 def read(repo):
36 def read(repo):
37 try:
37 try:
38 f = repo.cachevfs(_filename(repo))
38 f = repo.cachevfs(_filename(repo))
39 lines = f.read().split('\n')
39 lines = f.read().split('\n')
40 f.close()
40 f.close()
41 except (IOError, OSError):
41 except (IOError, OSError):
42 return None
42 return None
43
43
44 try:
44 try:
45 cachekey = lines.pop(0).split(" ", 2)
45 cachekey = lines.pop(0).split(" ", 2)
46 last, lrev = cachekey[:2]
46 last, lrev = cachekey[:2]
47 last, lrev = bin(last), int(lrev)
47 last, lrev = bin(last), int(lrev)
48 filteredhash = None
48 filteredhash = None
49 if len(cachekey) > 2:
49 if len(cachekey) > 2:
50 filteredhash = bin(cachekey[2])
50 filteredhash = bin(cachekey[2])
51 partial = branchcache(tipnode=last, tiprev=lrev,
51 partial = branchcache(tipnode=last, tiprev=lrev,
52 filteredhash=filteredhash)
52 filteredhash=filteredhash)
53 if not partial.validfor(repo):
53 if not partial.validfor(repo):
54 # invalidate the cache
54 # invalidate the cache
55 raise ValueError('tip differs')
55 raise ValueError('tip differs')
56 cl = repo.changelog
56 cl = repo.changelog
57 for l in lines:
57 for l in lines:
58 if not l:
58 if not l:
59 continue
59 continue
60 node, state, label = l.split(" ", 2)
60 node, state, label = l.split(" ", 2)
61 if state not in 'oc':
61 if state not in 'oc':
62 raise ValueError('invalid branch state')
62 raise ValueError('invalid branch state')
63 label = encoding.tolocal(label.strip())
63 label = encoding.tolocal(label.strip())
64 node = bin(node)
64 node = bin(node)
65 if not cl.hasnode(node):
65 if not cl.hasnode(node):
66 raise ValueError('node %s does not exist' % hex(node))
66 raise ValueError('node %s does not exist' % hex(node))
67 partial.setdefault(label, []).append(node)
67 partial.setdefault(label, []).append(node)
68 if state == 'c':
68 if state == 'c':
69 partial._closednodes.add(node)
69 partial._closednodes.add(node)
70 except Exception as inst:
70 except Exception as inst:
71 if repo.ui.debugflag:
71 if repo.ui.debugflag:
72 msg = 'invalid branchheads cache'
72 msg = 'invalid branchheads cache'
73 if repo.filtername is not None:
73 if repo.filtername is not None:
74 msg += ' (%s)' % repo.filtername
74 msg += ' (%s)' % repo.filtername
75 msg += ': %s\n'
75 msg += ': %s\n'
76 repo.ui.debug(msg % inst)
76 repo.ui.debug(msg % inst)
77 partial = None
77 partial = None
78 return partial
78 return partial
79
79
80 ### Nearest subset relation
80 ### Nearest subset relation
81 # Nearest subset of filter X is a filter Y so that:
81 # Nearest subset of filter X is a filter Y so that:
82 # * Y is included in X,
82 # * Y is included in X,
83 # * X - Y is as small as possible.
83 # * X - Y is as small as possible.
84 # This create and ordering used for branchmap purpose.
84 # This create and ordering used for branchmap purpose.
85 # the ordering may be partial
85 # the ordering may be partial
86 subsettable = {None: 'visible',
86 subsettable = {None: 'visible',
87 'visible': 'served',
87 'visible': 'served',
88 'served': 'immutable',
88 'served': 'immutable',
89 'immutable': 'base'}
89 'immutable': 'base'}
90
90
91 def updatecache(repo):
91 def updatecache(repo):
92 cl = repo.changelog
92 cl = repo.changelog
93 filtername = repo.filtername
93 filtername = repo.filtername
94 partial = repo._branchcaches.get(filtername)
94 partial = repo._branchcaches.get(filtername)
95
95
96 revs = []
96 revs = []
97 if partial is None or not partial.validfor(repo):
97 if partial is None or not partial.validfor(repo):
98 partial = read(repo)
98 partial = read(repo)
99 if partial is None:
99 if partial is None:
100 subsetname = subsettable.get(filtername)
100 subsetname = subsettable.get(filtername)
101 if subsetname is None:
101 if subsetname is None:
102 partial = branchcache()
102 partial = branchcache()
103 else:
103 else:
104 subset = repo.filtered(subsetname)
104 subset = repo.filtered(subsetname)
105 partial = subset.branchmap().copy()
105 partial = subset.branchmap().copy()
106 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
106 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
107 revs.extend(r for r in extrarevs if r <= partial.tiprev)
107 revs.extend(r for r in extrarevs if r <= partial.tiprev)
108 revs.extend(cl.revs(start=partial.tiprev + 1))
108 revs.extend(cl.revs(start=partial.tiprev + 1))
109 if revs:
109 if revs:
110 partial.update(repo, revs)
110 partial.update(repo, revs)
111 partial.write(repo)
111 partial.write(repo)
112
112
113 assert partial.validfor(repo), filtername
113 assert partial.validfor(repo), filtername
114 repo._branchcaches[repo.filtername] = partial
114 repo._branchcaches[repo.filtername] = partial
115
115
116 def replacecache(repo, bm):
116 def replacecache(repo, bm):
117 """Replace the branchmap cache for a repo with a branch mapping.
117 """Replace the branchmap cache for a repo with a branch mapping.
118
118
119 This is likely only called during clone with a branch map from a remote.
119 This is likely only called during clone with a branch map from a remote.
120 """
120 """
121 rbheads = []
121 rbheads = []
122 closed = []
122 closed = []
123 for bheads in bm.itervalues():
123 for bheads in bm.itervalues():
124 rbheads.extend(bheads)
124 rbheads.extend(bheads)
125 for h in bheads:
125 for h in bheads:
126 r = repo.changelog.rev(h)
126 r = repo.changelog.rev(h)
127 b, c = repo.changelog.branchinfo(r)
127 b, c = repo.changelog.branchinfo(r)
128 if c:
128 if c:
129 closed.append(h)
129 closed.append(h)
130
130
131 if rbheads:
131 if rbheads:
132 rtiprev = max((int(repo.changelog.rev(node))
132 rtiprev = max((int(repo.changelog.rev(node))
133 for node in rbheads))
133 for node in rbheads))
134 cache = branchcache(bm,
134 cache = branchcache(bm,
135 repo[rtiprev].node(),
135 repo[rtiprev].node(),
136 rtiprev,
136 rtiprev,
137 closednodes=closed)
137 closednodes=closed)
138
138
139 # Try to stick it as low as possible
139 # Try to stick it as low as possible
140 # filter above served are unlikely to be fetch from a clone
140 # filter above served are unlikely to be fetch from a clone
141 for candidate in ('base', 'immutable', 'served'):
141 for candidate in ('base', 'immutable', 'served'):
142 rview = repo.filtered(candidate)
142 rview = repo.filtered(candidate)
143 if cache.validfor(rview):
143 if cache.validfor(rview):
144 repo._branchcaches[candidate] = cache
144 repo._branchcaches[candidate] = cache
145 cache.write(rview)
145 cache.write(rview)
146 break
146 break
147
147
148 class branchcache(dict):
148 class branchcache(dict):
149 """A dict like object that hold branches heads cache.
149 """A dict like object that hold branches heads cache.
150
150
151 This cache is used to avoid costly computations to determine all the
151 This cache is used to avoid costly computations to determine all the
152 branch heads of a repo.
152 branch heads of a repo.
153
153
154 The cache is serialized on disk in the following format:
154 The cache is serialized on disk in the following format:
155
155
156 <tip hex node> <tip rev number> [optional filtered repo hex hash]
156 <tip hex node> <tip rev number> [optional filtered repo hex hash]
157 <branch head hex node> <open/closed state> <branch name>
157 <branch head hex node> <open/closed state> <branch name>
158 <branch head hex node> <open/closed state> <branch name>
158 <branch head hex node> <open/closed state> <branch name>
159 ...
159 ...
160
160
161 The first line is used to check if the cache is still valid. If the
161 The first line is used to check if the cache is still valid. If the
162 branch cache is for a filtered repo view, an optional third hash is
162 branch cache is for a filtered repo view, an optional third hash is
163 included that hashes the hashes of all filtered revisions.
163 included that hashes the hashes of all filtered revisions.
164
164
165 The open/closed state is represented by a single letter 'o' or 'c'.
165 The open/closed state is represented by a single letter 'o' or 'c'.
166 This field can be used to avoid changelog reads when determining if a
166 This field can be used to avoid changelog reads when determining if a
167 branch head closes a branch or not.
167 branch head closes a branch or not.
168 """
168 """
169
169
170 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
170 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
171 filteredhash=None, closednodes=None):
171 filteredhash=None, closednodes=None):
172 super(branchcache, self).__init__(entries)
172 super(branchcache, self).__init__(entries)
173 self.tipnode = tipnode
173 self.tipnode = tipnode
174 self.tiprev = tiprev
174 self.tiprev = tiprev
175 self.filteredhash = filteredhash
175 self.filteredhash = filteredhash
176 # closednodes is a set of nodes that close their branch. If the branch
176 # closednodes is a set of nodes that close their branch. If the branch
177 # cache has been updated, it may contain nodes that are no longer
177 # cache has been updated, it may contain nodes that are no longer
178 # heads.
178 # heads.
179 if closednodes is None:
179 if closednodes is None:
180 self._closednodes = set()
180 self._closednodes = set()
181 else:
181 else:
182 self._closednodes = closednodes
182 self._closednodes = closednodes
183
183
184 def validfor(self, repo):
184 def validfor(self, repo):
185 """Is the cache content valid regarding a repo
185 """Is the cache content valid regarding a repo
186
186
187 - False when cached tipnode is unknown or if we detect a strip.
187 - False when cached tipnode is unknown or if we detect a strip.
188 - True when cache is up to date or a subset of current repo."""
188 - True when cache is up to date or a subset of current repo."""
189 try:
189 try:
190 return ((self.tipnode == repo.changelog.node(self.tiprev))
190 return ((self.tipnode == repo.changelog.node(self.tiprev))
191 and (self.filteredhash == \
191 and (self.filteredhash == \
192 scmutil.filteredhash(repo, self.tiprev)))
192 scmutil.filteredhash(repo, self.tiprev)))
193 except IndexError:
193 except IndexError:
194 return False
194 return False
195
195
196 def _branchtip(self, heads):
196 def _branchtip(self, heads):
197 '''Return tuple with last open head in heads and false,
197 '''Return tuple with last open head in heads and false,
198 otherwise return last closed head and true.'''
198 otherwise return last closed head and true.'''
199 tip = heads[-1]
199 tip = heads[-1]
200 closed = True
200 closed = True
201 for h in reversed(heads):
201 for h in reversed(heads):
202 if h not in self._closednodes:
202 if h not in self._closednodes:
203 tip = h
203 tip = h
204 closed = False
204 closed = False
205 break
205 break
206 return tip, closed
206 return tip, closed
207
207
208 def branchtip(self, branch):
208 def branchtip(self, branch):
209 '''Return the tipmost open head on branch head, otherwise return the
209 '''Return the tipmost open head on branch head, otherwise return the
210 tipmost closed head on branch.
210 tipmost closed head on branch.
211 Raise KeyError for unknown branch.'''
211 Raise KeyError for unknown branch.'''
212 return self._branchtip(self[branch])[0]
212 return self._branchtip(self[branch])[0]
213
213
214 def branchheads(self, branch, closed=False):
214 def branchheads(self, branch, closed=False):
215 heads = self[branch]
215 heads = self[branch]
216 if not closed:
216 if not closed:
217 heads = [h for h in heads if h not in self._closednodes]
217 heads = [h for h in heads if h not in self._closednodes]
218 return heads
218 return heads
219
219
220 def iterbranches(self):
220 def iterbranches(self):
221 for bn, heads in self.iteritems():
221 for bn, heads in self.iteritems():
222 yield (bn, heads) + self._branchtip(heads)
222 yield (bn, heads) + self._branchtip(heads)
223
223
224 def copy(self):
224 def copy(self):
225 """return an deep copy of the branchcache object"""
225 """return an deep copy of the branchcache object"""
226 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
226 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
227 self._closednodes)
227 self._closednodes)
228
228
229 def write(self, repo):
229 def write(self, repo):
230 try:
230 try:
231 f = repo.cachevfs(_filename(repo), "w", atomictemp=True)
231 f = repo.cachevfs(_filename(repo), "w", atomictemp=True)
232 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
232 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
233 if self.filteredhash is not None:
233 if self.filteredhash is not None:
234 cachekey.append(hex(self.filteredhash))
234 cachekey.append(hex(self.filteredhash))
235 f.write(" ".join(cachekey) + '\n')
235 f.write(" ".join(cachekey) + '\n')
236 nodecount = 0
236 nodecount = 0
237 for label, nodes in sorted(self.iteritems()):
237 for label, nodes in sorted(self.iteritems()):
238 for node in nodes:
238 for node in nodes:
239 nodecount += 1
239 nodecount += 1
240 if node in self._closednodes:
240 if node in self._closednodes:
241 state = 'c'
241 state = 'c'
242 else:
242 else:
243 state = 'o'
243 state = 'o'
244 f.write("%s %s %s\n" % (hex(node), state,
244 f.write("%s %s %s\n" % (hex(node), state,
245 encoding.fromlocal(label)))
245 encoding.fromlocal(label)))
246 f.close()
246 f.close()
247 repo.ui.log('branchcache',
247 repo.ui.log('branchcache',
248 'wrote %s branch cache with %d labels and %d nodes\n',
248 'wrote %s branch cache with %d labels and %d nodes\n',
249 repo.filtername, len(self), nodecount)
249 repo.filtername, len(self), nodecount)
250 except (IOError, OSError, error.Abort) as inst:
250 except (IOError, OSError, error.Abort) as inst:
251 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
251 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
252 # Abort may be raise by read only opener
252 # Abort may be raise by read only opener
253 pass
253 pass
254
254
255 def update(self, repo, revgen):
255 def update(self, repo, revgen):
256 """Given a branchhead cache, self, that may have extra nodes or be
256 """Given a branchhead cache, self, that may have extra nodes or be
257 missing heads, and a generator of nodes that are strictly a superset of
257 missing heads, and a generator of nodes that are strictly a superset of
258 heads missing, this function updates self to be correct.
258 heads missing, this function updates self to be correct.
259 """
259 """
260 starttime = util.timer()
260 starttime = util.timer()
261 cl = repo.changelog
261 cl = repo.changelog
262 # collect new branch entries
262 # collect new branch entries
263 newbranches = {}
263 newbranches = {}
264 getbranchinfo = repo.revbranchcache().branchinfo
264 getbranchinfo = repo.revbranchcache().branchinfo
265 for r in revgen:
265 for r in revgen:
266 branch, closesbranch = getbranchinfo(r)
266 branch, closesbranch = getbranchinfo(r)
267 newbranches.setdefault(branch, []).append(r)
267 newbranches.setdefault(branch, []).append(r)
268 if closesbranch:
268 if closesbranch:
269 self._closednodes.add(cl.node(r))
269 self._closednodes.add(cl.node(r))
270
270
271 # fetch current topological heads to speed up filtering
271 # fetch current topological heads to speed up filtering
272 topoheads = set(cl.headrevs())
272 topoheads = set(cl.headrevs())
273
273
274 # if older branchheads are reachable from new ones, they aren't
274 # if older branchheads are reachable from new ones, they aren't
275 # really branchheads. Note checking parents is insufficient:
275 # really branchheads. Note checking parents is insufficient:
276 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
276 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
277 for branch, newheadrevs in newbranches.iteritems():
277 for branch, newheadrevs in newbranches.iteritems():
278 bheads = self.setdefault(branch, [])
278 bheads = self.setdefault(branch, [])
279 bheadset = set(cl.rev(node) for node in bheads)
279 bheadset = set(cl.rev(node) for node in bheads)
280
280
281 # This have been tested True on all internal usage of this function.
281 # This have been tested True on all internal usage of this function.
282 # run it again in case of doubt
282 # run it again in case of doubt
283 # assert not (set(bheadrevs) & set(newheadrevs))
283 # assert not (set(bheadrevs) & set(newheadrevs))
284 newheadrevs.sort()
284 newheadrevs.sort()
285 bheadset.update(newheadrevs)
285 bheadset.update(newheadrevs)
286
286
287 # This prunes out two kinds of heads - heads that are superseded by
287 # This prunes out two kinds of heads - heads that are superseded by
288 # a head in newheadrevs, and newheadrevs that are not heads because
288 # a head in newheadrevs, and newheadrevs that are not heads because
289 # an existing head is their descendant.
289 # an existing head is their descendant.
290 uncertain = bheadset - topoheads
290 uncertain = bheadset - topoheads
291 if uncertain:
291 if uncertain:
292 floorrev = min(uncertain)
292 floorrev = min(uncertain)
293 ancestors = set(cl.ancestors(newheadrevs, floorrev))
293 ancestors = set(cl.ancestors(newheadrevs, floorrev))
294 bheadset -= ancestors
294 bheadset -= ancestors
295 bheadrevs = sorted(bheadset)
295 bheadrevs = sorted(bheadset)
296 self[branch] = [cl.node(rev) for rev in bheadrevs]
296 self[branch] = [cl.node(rev) for rev in bheadrevs]
297 tiprev = bheadrevs[-1]
297 tiprev = bheadrevs[-1]
298 if tiprev > self.tiprev:
298 if tiprev > self.tiprev:
299 self.tipnode = cl.node(tiprev)
299 self.tipnode = cl.node(tiprev)
300 self.tiprev = tiprev
300 self.tiprev = tiprev
301
301
302 if not self.validfor(repo):
302 if not self.validfor(repo):
303 # cache key are not valid anymore
303 # cache key are not valid anymore
304 self.tipnode = nullid
304 self.tipnode = nullid
305 self.tiprev = nullrev
305 self.tiprev = nullrev
306 for heads in self.values():
306 for heads in self.values():
307 tiprev = max(cl.rev(node) for node in heads)
307 tiprev = max(cl.rev(node) for node in heads)
308 if tiprev > self.tiprev:
308 if tiprev > self.tiprev:
309 self.tipnode = cl.node(tiprev)
309 self.tipnode = cl.node(tiprev)
310 self.tiprev = tiprev
310 self.tiprev = tiprev
311 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
311 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
312
312
313 duration = util.timer() - starttime
313 duration = util.timer() - starttime
314 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
314 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
315 repo.filtername, duration)
315 repo.filtername, duration)
316
316
317 # Revision branch info cache
317 # Revision branch info cache
318
318
319 _rbcversion = '-v1'
319 _rbcversion = '-v1'
320 _rbcnames = 'rbc-names' + _rbcversion
320 _rbcnames = 'rbc-names' + _rbcversion
321 _rbcrevs = 'rbc-revs' + _rbcversion
321 _rbcrevs = 'rbc-revs' + _rbcversion
322 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
322 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
323 _rbcrecfmt = '>4sI'
323 _rbcrecfmt = '>4sI'
324 _rbcrecsize = calcsize(_rbcrecfmt)
324 _rbcrecsize = calcsize(_rbcrecfmt)
325 _rbcnodelen = 4
325 _rbcnodelen = 4
326 _rbcbranchidxmask = 0x7fffffff
326 _rbcbranchidxmask = 0x7fffffff
327 _rbccloseflag = 0x80000000
327 _rbccloseflag = 0x80000000
328
328
329 class revbranchcache(object):
329 class revbranchcache(object):
330 """Persistent cache, mapping from revision number to branch name and close.
330 """Persistent cache, mapping from revision number to branch name and close.
331 This is a low level cache, independent of filtering.
331 This is a low level cache, independent of filtering.
332
332
333 Branch names are stored in rbc-names in internal encoding separated by 0.
333 Branch names are stored in rbc-names in internal encoding separated by 0.
334 rbc-names is append-only, and each branch name is only stored once and will
334 rbc-names is append-only, and each branch name is only stored once and will
335 thus have a unique index.
335 thus have a unique index.
336
336
337 The branch info for each revision is stored in rbc-revs as constant size
337 The branch info for each revision is stored in rbc-revs as constant size
338 records. The whole file is read into memory, but it is only 'parsed' on
338 records. The whole file is read into memory, but it is only 'parsed' on
339 demand. The file is usually append-only but will be truncated if repo
339 demand. The file is usually append-only but will be truncated if repo
340 modification is detected.
340 modification is detected.
341 The record for each revision contains the first 4 bytes of the
341 The record for each revision contains the first 4 bytes of the
342 corresponding node hash, and the record is only used if it still matches.
342 corresponding node hash, and the record is only used if it still matches.
343 Even a completely trashed rbc-revs fill thus still give the right result
343 Even a completely trashed rbc-revs fill thus still give the right result
344 while converging towards full recovery ... assuming no incorrectly matching
344 while converging towards full recovery ... assuming no incorrectly matching
345 node hashes.
345 node hashes.
346 The record also contains 4 bytes where 31 bits contains the index of the
346 The record also contains 4 bytes where 31 bits contains the index of the
347 branch and the last bit indicate that it is a branch close commit.
347 branch and the last bit indicate that it is a branch close commit.
348 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
348 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
349 and will grow with it but be 1/8th of its size.
349 and will grow with it but be 1/8th of its size.
350 """
350 """
351
351
352 def __init__(self, repo, readonly=True):
352 def __init__(self, repo, readonly=True):
353 assert repo.filtername is None
353 assert repo.filtername is None
354 self._repo = repo
354 self._repo = repo
355 self._names = [] # branch names in local encoding with static index
355 self._names = [] # branch names in local encoding with static index
356 self._rbcrevs = bytearray()
356 self._rbcrevs = bytearray()
357 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
357 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
358 try:
358 try:
359 bndata = repo.cachevfs.read(_rbcnames)
359 bndata = repo.cachevfs.read(_rbcnames)
360 self._rbcsnameslen = len(bndata) # for verification before writing
360 self._rbcsnameslen = len(bndata) # for verification before writing
361 if bndata:
361 if bndata:
362 self._names = [encoding.tolocal(bn)
362 self._names = [encoding.tolocal(bn)
363 for bn in bndata.split('\0')]
363 for bn in bndata.split('\0')]
364 except (IOError, OSError):
364 except (IOError, OSError):
365 if readonly:
365 if readonly:
366 # don't try to use cache - fall back to the slow path
366 # don't try to use cache - fall back to the slow path
367 self.branchinfo = self._branchinfo
367 self.branchinfo = self._branchinfo
368
368
369 if self._names:
369 if self._names:
370 try:
370 try:
371 data = repo.cachevfs.read(_rbcrevs)
371 data = repo.cachevfs.read(_rbcrevs)
372 self._rbcrevs[:] = data
372 self._rbcrevs[:] = data
373 except (IOError, OSError) as inst:
373 except (IOError, OSError) as inst:
374 repo.ui.debug("couldn't read revision branch cache: %s\n" %
374 repo.ui.debug("couldn't read revision branch cache: %s\n" %
375 inst)
375 inst)
376 # remember number of good records on disk
376 # remember number of good records on disk
377 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
377 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
378 len(repo.changelog))
378 len(repo.changelog))
379 if self._rbcrevslen == 0:
379 if self._rbcrevslen == 0:
380 self._names = []
380 self._names = []
381 self._rbcnamescount = len(self._names) # number of names read at
381 self._rbcnamescount = len(self._names) # number of names read at
382 # _rbcsnameslen
382 # _rbcsnameslen
383 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
383 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
384
384
385 def _clear(self):
385 def _clear(self):
386 self._rbcsnameslen = 0
386 self._rbcsnameslen = 0
387 del self._names[:]
387 del self._names[:]
388 self._rbcnamescount = 0
388 self._rbcnamescount = 0
389 self._namesreverse.clear()
389 self._namesreverse.clear()
390 self._rbcrevslen = len(self._repo.changelog)
390 self._rbcrevslen = len(self._repo.changelog)
391 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
391 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
392
392
393 def branchinfo(self, rev):
393 def branchinfo(self, rev):
394 """Return branch name and close flag for rev, using and updating
394 """Return branch name and close flag for rev, using and updating
395 persistent cache."""
395 persistent cache."""
396 changelog = self._repo.changelog
396 changelog = self._repo.changelog
397 rbcrevidx = rev * _rbcrecsize
397 rbcrevidx = rev * _rbcrecsize
398
398
399 # avoid negative index, changelog.read(nullrev) is fast without cache
399 # avoid negative index, changelog.read(nullrev) is fast without cache
400 if rev == nullrev:
400 if rev == nullrev:
401 return changelog.branchinfo(rev)
401 return changelog.branchinfo(rev)
402
402
403 # if requested rev isn't allocated, grow and cache the rev info
403 # if requested rev isn't allocated, grow and cache the rev info
404 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
404 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
405 return self._branchinfo(rev)
405 return self._branchinfo(rev)
406
406
407 # fast path: extract data from cache, use it if node is matching
407 # fast path: extract data from cache, use it if node is matching
408 reponode = changelog.node(rev)[:_rbcnodelen]
408 reponode = changelog.node(rev)[:_rbcnodelen]
409 cachenode, branchidx = unpack_from(_rbcrecfmt, self._rbcrevs, rbcrevidx)
409 cachenode, branchidx = unpack_from(
410 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
410 close = bool(branchidx & _rbccloseflag)
411 close = bool(branchidx & _rbccloseflag)
411 if close:
412 if close:
412 branchidx &= _rbcbranchidxmask
413 branchidx &= _rbcbranchidxmask
413 if cachenode == '\0\0\0\0':
414 if cachenode == '\0\0\0\0':
414 pass
415 pass
415 elif cachenode == reponode:
416 elif cachenode == reponode:
416 try:
417 try:
417 return self._names[branchidx], close
418 return self._names[branchidx], close
418 except IndexError:
419 except IndexError:
419 # recover from invalid reference to unknown branch
420 # recover from invalid reference to unknown branch
420 self._repo.ui.debug("referenced branch names not found"
421 self._repo.ui.debug("referenced branch names not found"
421 " - rebuilding revision branch cache from scratch\n")
422 " - rebuilding revision branch cache from scratch\n")
422 self._clear()
423 self._clear()
423 else:
424 else:
424 # rev/node map has changed, invalidate the cache from here up
425 # rev/node map has changed, invalidate the cache from here up
425 self._repo.ui.debug("history modification detected - truncating "
426 self._repo.ui.debug("history modification detected - truncating "
426 "revision branch cache to revision %d\n" % rev)
427 "revision branch cache to revision %d\n" % rev)
427 truncate = rbcrevidx + _rbcrecsize
428 truncate = rbcrevidx + _rbcrecsize
428 del self._rbcrevs[truncate:]
429 del self._rbcrevs[truncate:]
429 self._rbcrevslen = min(self._rbcrevslen, truncate)
430 self._rbcrevslen = min(self._rbcrevslen, truncate)
430
431
431 # fall back to slow path and make sure it will be written to disk
432 # fall back to slow path and make sure it will be written to disk
432 return self._branchinfo(rev)
433 return self._branchinfo(rev)
433
434
434 def _branchinfo(self, rev):
435 def _branchinfo(self, rev):
435 """Retrieve branch info from changelog and update _rbcrevs"""
436 """Retrieve branch info from changelog and update _rbcrevs"""
436 changelog = self._repo.changelog
437 changelog = self._repo.changelog
437 b, close = changelog.branchinfo(rev)
438 b, close = changelog.branchinfo(rev)
438 if b in self._namesreverse:
439 if b in self._namesreverse:
439 branchidx = self._namesreverse[b]
440 branchidx = self._namesreverse[b]
440 else:
441 else:
441 branchidx = len(self._names)
442 branchidx = len(self._names)
442 self._names.append(b)
443 self._names.append(b)
443 self._namesreverse[b] = branchidx
444 self._namesreverse[b] = branchidx
444 reponode = changelog.node(rev)
445 reponode = changelog.node(rev)
445 if close:
446 if close:
446 branchidx |= _rbccloseflag
447 branchidx |= _rbccloseflag
447 self._setcachedata(rev, reponode, branchidx)
448 self._setcachedata(rev, reponode, branchidx)
448 return b, close
449 return b, close
449
450
450 def _setcachedata(self, rev, node, branchidx):
451 def _setcachedata(self, rev, node, branchidx):
451 """Writes the node's branch data to the in-memory cache data."""
452 """Writes the node's branch data to the in-memory cache data."""
452 if rev == nullrev:
453 if rev == nullrev:
453 return
454 return
454 rbcrevidx = rev * _rbcrecsize
455 rbcrevidx = rev * _rbcrecsize
455 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
456 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
456 self._rbcrevs.extend('\0' *
457 self._rbcrevs.extend('\0' *
457 (len(self._repo.changelog) * _rbcrecsize -
458 (len(self._repo.changelog) * _rbcrecsize -
458 len(self._rbcrevs)))
459 len(self._rbcrevs)))
459 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
460 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
460 self._rbcrevslen = min(self._rbcrevslen, rev)
461 self._rbcrevslen = min(self._rbcrevslen, rev)
461
462
462 tr = self._repo.currenttransaction()
463 tr = self._repo.currenttransaction()
463 if tr:
464 if tr:
464 tr.addfinalize('write-revbranchcache', self.write)
465 tr.addfinalize('write-revbranchcache', self.write)
465
466
466 def write(self, tr=None):
467 def write(self, tr=None):
467 """Save branch cache if it is dirty."""
468 """Save branch cache if it is dirty."""
468 repo = self._repo
469 repo = self._repo
469 wlock = None
470 wlock = None
470 step = ''
471 step = ''
471 try:
472 try:
472 if self._rbcnamescount < len(self._names):
473 if self._rbcnamescount < len(self._names):
473 step = ' names'
474 step = ' names'
474 wlock = repo.wlock(wait=False)
475 wlock = repo.wlock(wait=False)
475 if self._rbcnamescount != 0:
476 if self._rbcnamescount != 0:
476 f = repo.cachevfs.open(_rbcnames, 'ab')
477 f = repo.cachevfs.open(_rbcnames, 'ab')
477 if f.tell() == self._rbcsnameslen:
478 if f.tell() == self._rbcsnameslen:
478 f.write('\0')
479 f.write('\0')
479 else:
480 else:
480 f.close()
481 f.close()
481 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
482 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
482 self._rbcnamescount = 0
483 self._rbcnamescount = 0
483 self._rbcrevslen = 0
484 self._rbcrevslen = 0
484 if self._rbcnamescount == 0:
485 if self._rbcnamescount == 0:
485 # before rewriting names, make sure references are removed
486 # before rewriting names, make sure references are removed
486 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
487 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
487 f = repo.cachevfs.open(_rbcnames, 'wb')
488 f = repo.cachevfs.open(_rbcnames, 'wb')
488 f.write('\0'.join(encoding.fromlocal(b)
489 f.write('\0'.join(encoding.fromlocal(b)
489 for b in self._names[self._rbcnamescount:]))
490 for b in self._names[self._rbcnamescount:]))
490 self._rbcsnameslen = f.tell()
491 self._rbcsnameslen = f.tell()
491 f.close()
492 f.close()
492 self._rbcnamescount = len(self._names)
493 self._rbcnamescount = len(self._names)
493
494
494 start = self._rbcrevslen * _rbcrecsize
495 start = self._rbcrevslen * _rbcrecsize
495 if start != len(self._rbcrevs):
496 if start != len(self._rbcrevs):
496 step = ''
497 step = ''
497 if wlock is None:
498 if wlock is None:
498 wlock = repo.wlock(wait=False)
499 wlock = repo.wlock(wait=False)
499 revs = min(len(repo.changelog),
500 revs = min(len(repo.changelog),
500 len(self._rbcrevs) // _rbcrecsize)
501 len(self._rbcrevs) // _rbcrecsize)
501 f = repo.cachevfs.open(_rbcrevs, 'ab')
502 f = repo.cachevfs.open(_rbcrevs, 'ab')
502 if f.tell() != start:
503 if f.tell() != start:
503 repo.ui.debug("truncating cache/%s to %d\n"
504 repo.ui.debug("truncating cache/%s to %d\n"
504 % (_rbcrevs, start))
505 % (_rbcrevs, start))
505 f.seek(start)
506 f.seek(start)
506 if f.tell() != start:
507 if f.tell() != start:
507 start = 0
508 start = 0
508 f.seek(start)
509 f.seek(start)
509 f.truncate()
510 f.truncate()
510 end = revs * _rbcrecsize
511 end = revs * _rbcrecsize
511 f.write(self._rbcrevs[start:end])
512 f.write(self._rbcrevs[start:end])
512 f.close()
513 f.close()
513 self._rbcrevslen = revs
514 self._rbcrevslen = revs
514 except (IOError, OSError, error.Abort, error.LockError) as inst:
515 except (IOError, OSError, error.Abort, error.LockError) as inst:
515 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
516 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
516 % (step, inst))
517 % (step, inst))
517 finally:
518 finally:
518 if wlock is not None:
519 if wlock is not None:
519 wlock.release()
520 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now