##// END OF EJS Templates
branchmap: backout 6bf93440a717...
Matt Harbison -
r24052:32a64923 default
parent child Browse files
Show More
@@ -1,451 +1,445
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11 import time
11 import time
12 from array import array
12 from array import array
13 from struct import calcsize, pack, unpack
13 from struct import calcsize, pack, unpack
14
14
15 def _filename(repo):
15 def _filename(repo):
16 """name of a branchcache file for a given repo or repoview"""
16 """name of a branchcache file for a given repo or repoview"""
17 filename = "cache/branch2"
17 filename = "cache/branch2"
18 if repo.filtername:
18 if repo.filtername:
19 filename = '%s-%s' % (filename, repo.filtername)
19 filename = '%s-%s' % (filename, repo.filtername)
20 return filename
20 return filename
21
21
22 def read(repo):
22 def read(repo):
23 try:
23 try:
24 f = repo.vfs(_filename(repo))
24 f = repo.vfs(_filename(repo))
25 lines = f.read().split('\n')
25 lines = f.read().split('\n')
26 f.close()
26 f.close()
27 except (IOError, OSError):
27 except (IOError, OSError):
28 return None
28 return None
29
29
30 try:
30 try:
31 cachekey = lines.pop(0).split(" ", 2)
31 cachekey = lines.pop(0).split(" ", 2)
32 last, lrev = cachekey[:2]
32 last, lrev = cachekey[:2]
33 last, lrev = bin(last), int(lrev)
33 last, lrev = bin(last), int(lrev)
34 filteredhash = None
34 filteredhash = None
35 if len(cachekey) > 2:
35 if len(cachekey) > 2:
36 filteredhash = bin(cachekey[2])
36 filteredhash = bin(cachekey[2])
37 partial = branchcache(tipnode=last, tiprev=lrev,
37 partial = branchcache(tipnode=last, tiprev=lrev,
38 filteredhash=filteredhash)
38 filteredhash=filteredhash)
39 if not partial.validfor(repo):
39 if not partial.validfor(repo):
40 # invalidate the cache
40 # invalidate the cache
41 raise ValueError('tip differs')
41 raise ValueError('tip differs')
42 for l in lines:
42 for l in lines:
43 if not l:
43 if not l:
44 continue
44 continue
45 node, state, label = l.split(" ", 2)
45 node, state, label = l.split(" ", 2)
46 if state not in 'oc':
46 if state not in 'oc':
47 raise ValueError('invalid branch state')
47 raise ValueError('invalid branch state')
48 label = encoding.tolocal(label.strip())
48 label = encoding.tolocal(label.strip())
49 if not node in repo:
49 if not node in repo:
50 raise ValueError('node %s does not exist' % node)
50 raise ValueError('node %s does not exist' % node)
51 node = bin(node)
51 node = bin(node)
52 partial.setdefault(label, []).append(node)
52 partial.setdefault(label, []).append(node)
53 if state == 'c':
53 if state == 'c':
54 partial._closednodes.add(node)
54 partial._closednodes.add(node)
55 except KeyboardInterrupt:
55 except KeyboardInterrupt:
56 raise
56 raise
57 except Exception, inst:
57 except Exception, inst:
58 if repo.ui.debugflag:
58 if repo.ui.debugflag:
59 msg = 'invalid branchheads cache'
59 msg = 'invalid branchheads cache'
60 if repo.filtername is not None:
60 if repo.filtername is not None:
61 msg += ' (%s)' % repo.filtername
61 msg += ' (%s)' % repo.filtername
62 msg += ': %s\n'
62 msg += ': %s\n'
63 repo.ui.debug(msg % inst)
63 repo.ui.debug(msg % inst)
64 partial = None
64 partial = None
65 return partial
65 return partial
66
66
67 ### Nearest subset relation
67 ### Nearest subset relation
68 # Nearest subset of filter X is a filter Y so that:
68 # Nearest subset of filter X is a filter Y so that:
69 # * Y is included in X,
69 # * Y is included in X,
70 # * X - Y is as small as possible.
70 # * X - Y is as small as possible.
71 # This create and ordering used for branchmap purpose.
71 # This create and ordering used for branchmap purpose.
72 # the ordering may be partial
72 # the ordering may be partial
73 subsettable = {None: 'visible',
73 subsettable = {None: 'visible',
74 'visible': 'served',
74 'visible': 'served',
75 'served': 'immutable',
75 'served': 'immutable',
76 'immutable': 'base'}
76 'immutable': 'base'}
77
77
78 def updatecache(repo):
78 def updatecache(repo):
79 cl = repo.changelog
79 cl = repo.changelog
80 filtername = repo.filtername
80 filtername = repo.filtername
81 partial = repo._branchcaches.get(filtername)
81 partial = repo._branchcaches.get(filtername)
82
82
83 revs = []
83 revs = []
84 if partial is None or not partial.validfor(repo):
84 if partial is None or not partial.validfor(repo):
85 partial = read(repo)
85 partial = read(repo)
86 if partial is None:
86 if partial is None:
87 subsetname = subsettable.get(filtername)
87 subsetname = subsettable.get(filtername)
88 if subsetname is None:
88 if subsetname is None:
89 partial = branchcache()
89 partial = branchcache()
90 else:
90 else:
91 subset = repo.filtered(subsetname)
91 subset = repo.filtered(subsetname)
92 partial = subset.branchmap().copy()
92 partial = subset.branchmap().copy()
93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
95 revs.extend(cl.revs(start=partial.tiprev + 1))
95 revs.extend(cl.revs(start=partial.tiprev + 1))
96 if revs:
96 if revs:
97 partial.update(repo, revs)
97 partial.update(repo, revs)
98 partial.write(repo)
98 partial.write(repo)
99 assert partial.validfor(repo), filtername
99 assert partial.validfor(repo), filtername
100 repo._branchcaches[repo.filtername] = partial
100 repo._branchcaches[repo.filtername] = partial
101
101
102 class branchcache(dict):
102 class branchcache(dict):
103 """A dict like object that hold branches heads cache.
103 """A dict like object that hold branches heads cache.
104
104
105 This cache is used to avoid costly computations to determine all the
105 This cache is used to avoid costly computations to determine all the
106 branch heads of a repo.
106 branch heads of a repo.
107
107
108 The cache is serialized on disk in the following format:
108 The cache is serialized on disk in the following format:
109
109
110 <tip hex node> <tip rev number> [optional filtered repo hex hash]
110 <tip hex node> <tip rev number> [optional filtered repo hex hash]
111 <branch head hex node> <open/closed state> <branch name>
111 <branch head hex node> <open/closed state> <branch name>
112 <branch head hex node> <open/closed state> <branch name>
112 <branch head hex node> <open/closed state> <branch name>
113 ...
113 ...
114
114
115 The first line is used to check if the cache is still valid. If the
115 The first line is used to check if the cache is still valid. If the
116 branch cache is for a filtered repo view, an optional third hash is
116 branch cache is for a filtered repo view, an optional third hash is
117 included that hashes the hashes of all filtered revisions.
117 included that hashes the hashes of all filtered revisions.
118
118
119 The open/closed state is represented by a single letter 'o' or 'c'.
119 The open/closed state is represented by a single letter 'o' or 'c'.
120 This field can be used to avoid changelog reads when determining if a
120 This field can be used to avoid changelog reads when determining if a
121 branch head closes a branch or not.
121 branch head closes a branch or not.
122 """
122 """
123
123
124 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
124 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
125 filteredhash=None, closednodes=None):
125 filteredhash=None, closednodes=None):
126 super(branchcache, self).__init__(entries)
126 super(branchcache, self).__init__(entries)
127 self.tipnode = tipnode
127 self.tipnode = tipnode
128 self.tiprev = tiprev
128 self.tiprev = tiprev
129 self.filteredhash = filteredhash
129 self.filteredhash = filteredhash
130 # closednodes is a set of nodes that close their branch. If the branch
130 # closednodes is a set of nodes that close their branch. If the branch
131 # cache has been updated, it may contain nodes that are no longer
131 # cache has been updated, it may contain nodes that are no longer
132 # heads.
132 # heads.
133 if closednodes is None:
133 if closednodes is None:
134 self._closednodes = set()
134 self._closednodes = set()
135 else:
135 else:
136 self._closednodes = closednodes
136 self._closednodes = closednodes
137 self._revbranchcache = None
137 self._revbranchcache = None
138
138
139 def _hashfiltered(self, repo):
139 def _hashfiltered(self, repo):
140 """build hash of revision filtered in the current cache
140 """build hash of revision filtered in the current cache
141
141
142 Tracking tipnode and tiprev is not enough to ensure validity of the
142 Tracking tipnode and tiprev is not enough to ensure validity of the
143 cache as they do not help to distinct cache that ignored various
143 cache as they do not help to distinct cache that ignored various
144 revision bellow tiprev.
144 revision bellow tiprev.
145
145
146 To detect such difference, we build a cache of all ignored revisions.
146 To detect such difference, we build a cache of all ignored revisions.
147 """
147 """
148 cl = repo.changelog
148 cl = repo.changelog
149 if not cl.filteredrevs:
149 if not cl.filteredrevs:
150 return None
150 return None
151 key = None
151 key = None
152 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
152 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
153 if revs:
153 if revs:
154 s = util.sha1()
154 s = util.sha1()
155 for rev in revs:
155 for rev in revs:
156 s.update('%s;' % rev)
156 s.update('%s;' % rev)
157 key = s.digest()
157 key = s.digest()
158 return key
158 return key
159
159
160 def validfor(self, repo):
160 def validfor(self, repo):
161 """Is the cache content valid regarding a repo
161 """Is the cache content valid regarding a repo
162
162
163 - False when cached tipnode is unknown or if we detect a strip.
163 - False when cached tipnode is unknown or if we detect a strip.
164 - True when cache is up to date or a subset of current repo."""
164 - True when cache is up to date or a subset of current repo."""
165 try:
165 try:
166 return ((self.tipnode == repo.changelog.node(self.tiprev))
166 return ((self.tipnode == repo.changelog.node(self.tiprev))
167 and (self.filteredhash == self._hashfiltered(repo)))
167 and (self.filteredhash == self._hashfiltered(repo)))
168 except IndexError:
168 except IndexError:
169 return False
169 return False
170
170
171 def _branchtip(self, heads):
171 def _branchtip(self, heads):
172 '''Return tuple with last open head in heads and false,
172 '''Return tuple with last open head in heads and false,
173 otherwise return last closed head and true.'''
173 otherwise return last closed head and true.'''
174 tip = heads[-1]
174 tip = heads[-1]
175 closed = True
175 closed = True
176 for h in reversed(heads):
176 for h in reversed(heads):
177 if h not in self._closednodes:
177 if h not in self._closednodes:
178 tip = h
178 tip = h
179 closed = False
179 closed = False
180 break
180 break
181 return tip, closed
181 return tip, closed
182
182
183 def branchtip(self, branch):
183 def branchtip(self, branch):
184 '''Return the tipmost open head on branch head, otherwise return the
184 '''Return the tipmost open head on branch head, otherwise return the
185 tipmost closed head on branch.
185 tipmost closed head on branch.
186 Raise KeyError for unknown branch.'''
186 Raise KeyError for unknown branch.'''
187 return self._branchtip(self[branch])[0]
187 return self._branchtip(self[branch])[0]
188
188
189 def branchheads(self, branch, closed=False):
189 def branchheads(self, branch, closed=False):
190 heads = self[branch]
190 heads = self[branch]
191 if not closed:
191 if not closed:
192 heads = [h for h in heads if h not in self._closednodes]
192 heads = [h for h in heads if h not in self._closednodes]
193 return heads
193 return heads
194
194
195 def iterbranches(self):
195 def iterbranches(self):
196 for bn, heads in self.iteritems():
196 for bn, heads in self.iteritems():
197 yield (bn, heads) + self._branchtip(heads)
197 yield (bn, heads) + self._branchtip(heads)
198
198
199 def copy(self):
199 def copy(self):
200 """return an deep copy of the branchcache object"""
200 """return an deep copy of the branchcache object"""
201 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
201 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
202 self._closednodes)
202 self._closednodes)
203
203
204 def write(self, repo):
204 def write(self, repo):
205 try:
205 try:
206 f = repo.vfs(_filename(repo), "w", atomictemp=True)
206 f = repo.vfs(_filename(repo), "w", atomictemp=True)
207 cachekey = [hex(self.tipnode), str(self.tiprev)]
207 cachekey = [hex(self.tipnode), str(self.tiprev)]
208 if self.filteredhash is not None:
208 if self.filteredhash is not None:
209 cachekey.append(hex(self.filteredhash))
209 cachekey.append(hex(self.filteredhash))
210 f.write(" ".join(cachekey) + '\n')
210 f.write(" ".join(cachekey) + '\n')
211 nodecount = 0
211 nodecount = 0
212 for label, nodes in sorted(self.iteritems()):
212 for label, nodes in sorted(self.iteritems()):
213 for node in nodes:
213 for node in nodes:
214 nodecount += 1
214 nodecount += 1
215 if node in self._closednodes:
215 if node in self._closednodes:
216 state = 'c'
216 state = 'c'
217 else:
217 else:
218 state = 'o'
218 state = 'o'
219 f.write("%s %s %s\n" % (hex(node), state,
219 f.write("%s %s %s\n" % (hex(node), state,
220 encoding.fromlocal(label)))
220 encoding.fromlocal(label)))
221 f.close()
221 f.close()
222 repo.ui.log('branchcache',
222 repo.ui.log('branchcache',
223 'wrote %s branch cache with %d labels and %d nodes\n',
223 'wrote %s branch cache with %d labels and %d nodes\n',
224 repo.filtername, len(self), nodecount)
224 repo.filtername, len(self), nodecount)
225 except (IOError, OSError, util.Abort), inst:
225 except (IOError, OSError, util.Abort), inst:
226 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
226 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
227 # Abort may be raise by read only opener
227 # Abort may be raise by read only opener
228 pass
228 pass
229 if self._revbranchcache:
229 if self._revbranchcache:
230 self._revbranchcache.write(repo.unfiltered())
230 self._revbranchcache.write(repo.unfiltered())
231 self._revbranchcache = None
231 self._revbranchcache = None
232
232
233 def update(self, repo, revgen):
233 def update(self, repo, revgen):
234 """Given a branchhead cache, self, that may have extra nodes or be
234 """Given a branchhead cache, self, that may have extra nodes or be
235 missing heads, and a generator of nodes that are strictly a superset of
235 missing heads, and a generator of nodes that are strictly a superset of
236 heads missing, this function updates self to be correct.
236 heads missing, this function updates self to be correct.
237 """
237 """
238 starttime = time.time()
238 starttime = time.time()
239 cl = repo.changelog
239 cl = repo.changelog
240 # collect new branch entries
240 # collect new branch entries
241 newbranches = {}
241 newbranches = {}
242 urepo = repo.unfiltered()
242 urepo = repo.unfiltered()
243 self._revbranchcache = revbranchcache(urepo)
243 self._revbranchcache = revbranchcache(urepo)
244 getbranchinfo = self._revbranchcache.branchinfo
244 getbranchinfo = self._revbranchcache.branchinfo
245 ucl = urepo.changelog
245 ucl = urepo.changelog
246 for r in revgen:
246 for r in revgen:
247 branch, closesbranch = getbranchinfo(ucl, r)
247 branch, closesbranch = getbranchinfo(ucl, r)
248 newbranches.setdefault(branch, []).append(r)
248 newbranches.setdefault(branch, []).append(r)
249 if closesbranch:
249 if closesbranch:
250 self._closednodes.add(cl.node(r))
250 self._closednodes.add(cl.node(r))
251
251
252 # fetch current topological heads to speed up filtering
252 # fetch current topological heads to speed up filtering
253 topoheads = set(cl.headrevs())
253 topoheads = set(cl.headrevs())
254
254
255 # if older branchheads are reachable from new ones, they aren't
255 # if older branchheads are reachable from new ones, they aren't
256 # really branchheads. Note checking parents is insufficient:
256 # really branchheads. Note checking parents is insufficient:
257 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
257 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
258 for branch, newheadrevs in newbranches.iteritems():
258 for branch, newheadrevs in newbranches.iteritems():
259 bheads = self.setdefault(branch, [])
259 bheads = self.setdefault(branch, [])
260 bheadset = set(cl.rev(node) for node in bheads)
260 bheadset = set(cl.rev(node) for node in bheads)
261
261
262 # This have been tested True on all internal usage of this function.
262 # This have been tested True on all internal usage of this function.
263 # run it again in case of doubt
263 # run it again in case of doubt
264 # assert not (set(bheadrevs) & set(newheadrevs))
264 # assert not (set(bheadrevs) & set(newheadrevs))
265 newheadrevs.sort()
265 newheadrevs.sort()
266 bheadset.update(newheadrevs)
266 bheadset.update(newheadrevs)
267
267
268 # This prunes out two kinds of heads - heads that are superseded by
268 # This prunes out two kinds of heads - heads that are superseded by
269 # a head in newheadrevs, and newheadrevs that are not heads because
269 # a head in newheadrevs, and newheadrevs that are not heads because
270 # an existing head is their descendant.
270 # an existing head is their descendant.
271 uncertain = bheadset - topoheads
271 uncertain = bheadset - topoheads
272 if uncertain:
272 if uncertain:
273 floorrev = min(uncertain)
273 floorrev = min(uncertain)
274 ancestors = set(cl.ancestors(newheadrevs, floorrev))
274 ancestors = set(cl.ancestors(newheadrevs, floorrev))
275 bheadset -= ancestors
275 bheadset -= ancestors
276 bheadrevs = sorted(bheadset)
276 bheadrevs = sorted(bheadset)
277 self[branch] = [cl.node(rev) for rev in bheadrevs]
277 self[branch] = [cl.node(rev) for rev in bheadrevs]
278 tiprev = bheadrevs[-1]
278 tiprev = bheadrevs[-1]
279 if tiprev > self.tiprev:
279 if tiprev > self.tiprev:
280 self.tipnode = cl.node(tiprev)
280 self.tipnode = cl.node(tiprev)
281 self.tiprev = tiprev
281 self.tiprev = tiprev
282
282
283 if not self.validfor(repo):
283 if not self.validfor(repo):
284 # cache key are not valid anymore
284 # cache key are not valid anymore
285 self.tipnode = nullid
285 self.tipnode = nullid
286 self.tiprev = nullrev
286 self.tiprev = nullrev
287 for heads in self.values():
287 for heads in self.values():
288 tiprev = max(cl.rev(node) for node in heads)
288 tiprev = max(cl.rev(node) for node in heads)
289 if tiprev > self.tiprev:
289 if tiprev > self.tiprev:
290 self.tipnode = cl.node(tiprev)
290 self.tipnode = cl.node(tiprev)
291 self.tiprev = tiprev
291 self.tiprev = tiprev
292 self.filteredhash = self._hashfiltered(repo)
292 self.filteredhash = self._hashfiltered(repo)
293
293
294 duration = time.time() - starttime
294 duration = time.time() - starttime
295 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
295 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
296 repo.filtername, duration)
296 repo.filtername, duration)
297
297
298 # Revision branch info cache
298 # Revision branch info cache
299
299
300 _rbcversion = '-v1'
300 _rbcversion = '-v1'
301 _rbcnames = 'cache/rbc-names' + _rbcversion
301 _rbcnames = 'cache/rbc-names' + _rbcversion
302 _rbcrevs = 'cache/rbc-revs' + _rbcversion
302 _rbcrevs = 'cache/rbc-revs' + _rbcversion
303 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
303 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
304 _rbcrecfmt = '>4sI'
304 _rbcrecfmt = '>4sI'
305 _rbcrecsize = calcsize(_rbcrecfmt)
305 _rbcrecsize = calcsize(_rbcrecfmt)
306 _rbcnodelen = 4
306 _rbcnodelen = 4
307 _rbcbranchidxmask = 0x7fffffff
307 _rbcbranchidxmask = 0x7fffffff
308 _rbccloseflag = 0x80000000
308 _rbccloseflag = 0x80000000
309
309
310 class revbranchcache(object):
310 class revbranchcache(object):
311 """Persistent cache, mapping from revision number to branch name and close.
311 """Persistent cache, mapping from revision number to branch name and close.
312 This is a low level cache, independent of filtering.
312 This is a low level cache, independent of filtering.
313
313
314 Branch names are stored in rbc-names in internal encoding separated by 0.
314 Branch names are stored in rbc-names in internal encoding separated by 0.
315 rbc-names is append-only, and each branch name is only stored once and will
315 rbc-names is append-only, and each branch name is only stored once and will
316 thus have a unique index.
316 thus have a unique index.
317
317
318 The branch info for each revision is stored in rbc-revs as constant size
318 The branch info for each revision is stored in rbc-revs as constant size
319 records. The whole file is read into memory, but it is only 'parsed' on
319 records. The whole file is read into memory, but it is only 'parsed' on
320 demand. The file is usually append-only but will be truncated if repo
320 demand. The file is usually append-only but will be truncated if repo
321 modification is detected.
321 modification is detected.
322 The record for each revision contains the first 4 bytes of the
322 The record for each revision contains the first 4 bytes of the
323 corresponding node hash, and the record is only used if it still matches.
323 corresponding node hash, and the record is only used if it still matches.
324 Even a completely trashed rbc-revs fill thus still give the right result
324 Even a completely trashed rbc-revs fill thus still give the right result
325 while converging towards full recovery ... assuming no incorrectly matching
325 while converging towards full recovery ... assuming no incorrectly matching
326 node hashes.
326 node hashes.
327 The record also contains 4 bytes where 31 bits contains the index of the
327 The record also contains 4 bytes where 31 bits contains the index of the
328 branch and the last bit indicate that it is a branch close commit.
328 branch and the last bit indicate that it is a branch close commit.
329 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
329 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
330 and will grow with it but be 1/8th of its size.
330 and will grow with it but be 1/8th of its size.
331 """
331 """
332
332
333 def __init__(self, repo):
333 def __init__(self, repo):
334 assert repo.filtername is None
334 assert repo.filtername is None
335 self._names = [] # branch names in local encoding with static index
335 self._names = [] # branch names in local encoding with static index
336 self._rbcrevs = array('c') # structs of type _rbcrecfmt
336 self._rbcrevs = array('c') # structs of type _rbcrecfmt
337 self._rbcsnameslen = 0
337 self._rbcsnameslen = 0
338 try:
338 try:
339 bndata = repo.vfs.read(_rbcnames)
339 bndata = repo.vfs.read(_rbcnames)
340 self._rbcsnameslen = len(bndata) # for verification before writing
340 self._rbcsnameslen = len(bndata) # for verification before writing
341 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
341 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
342 except (IOError, OSError), inst:
342 except (IOError, OSError), inst:
343 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
343 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
344 inst)
344 inst)
345 if self._names:
345 if self._names:
346 try:
346 try:
347 data = repo.vfs.read(_rbcrevs)
347 data = repo.vfs.read(_rbcrevs)
348 self._rbcrevs.fromstring(data)
348 self._rbcrevs.fromstring(data)
349 except (IOError, OSError), inst:
349 except (IOError, OSError), inst:
350 repo.ui.debug("couldn't read revision branch cache: %s\n" %
350 repo.ui.debug("couldn't read revision branch cache: %s\n" %
351 inst)
351 inst)
352 # remember number of good records on disk
352 # remember number of good records on disk
353 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
353 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
354 len(repo.changelog))
354 len(repo.changelog))
355 if self._rbcrevslen == 0:
355 if self._rbcrevslen == 0:
356 self._names = []
356 self._names = []
357 self._rbcnamescount = len(self._names) # number of good names on disk
357 self._rbcnamescount = len(self._names) # number of good names on disk
358 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
358 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
359
359
360 def branchinfo(self, changelog, rev):
360 def branchinfo(self, changelog, rev):
361 """Return branch name and close flag for rev, using and updating
361 """Return branch name and close flag for rev, using and updating
362 persistent cache."""
362 persistent cache."""
363 rbcrevidx = rev * _rbcrecsize
363 rbcrevidx = rev * _rbcrecsize
364
364
365 # if requested rev is missing, add and populate all missing revs
365 # if requested rev is missing, add and populate all missing revs
366 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
366 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
367 first = len(self._rbcrevs) // _rbcrecsize
367 first = len(self._rbcrevs) // _rbcrecsize
368 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
368 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
369 len(self._rbcrevs)))
369 len(self._rbcrevs)))
370 for r in xrange(first, len(changelog)):
370 for r in xrange(first, len(changelog)):
371 self._branchinfo(changelog, r)
371 self._branchinfo(changelog, r)
372
372
373 # fast path: extract data from cache, use it if node is matching
373 # fast path: extract data from cache, use it if node is matching
374 reponode = changelog.node(rev)[:_rbcnodelen]
374 reponode = changelog.node(rev)[:_rbcnodelen]
375 cachenode, branchidx = unpack(
375 cachenode, branchidx = unpack(
376 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
376 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
377 close = bool(branchidx & _rbccloseflag)
377 close = bool(branchidx & _rbccloseflag)
378 if close:
378 if close:
379 branchidx &= _rbcbranchidxmask
379 branchidx &= _rbcbranchidxmask
380 if cachenode == reponode:
380 if cachenode == reponode:
381 return self._names[branchidx], close
381 return self._names[branchidx], close
382 # fall back to slow path and make sure it will be written to disk
382 # fall back to slow path and make sure it will be written to disk
383 self._rbcrevslen = min(self._rbcrevslen, rev)
383 self._rbcrevslen = min(self._rbcrevslen, rev)
384 return self._branchinfo(changelog, rev)
384 return self._branchinfo(changelog, rev)
385
385
386 def _branchinfo(self, changelog, rev):
386 def _branchinfo(self, changelog, rev):
387 """Retrieve branch info from changelog and update _rbcrevs"""
387 """Retrieve branch info from changelog and update _rbcrevs"""
388 b, close = changelog.branchinfo(rev)
388 b, close = changelog.branchinfo(rev)
389 if b in self._namesreverse:
389 if b in self._namesreverse:
390 branchidx = self._namesreverse[b]
390 branchidx = self._namesreverse[b]
391 else:
391 else:
392 branchidx = len(self._names)
392 branchidx = len(self._names)
393 self._names.append(b)
393 self._names.append(b)
394 self._namesreverse[b] = branchidx
394 self._namesreverse[b] = branchidx
395 reponode = changelog.node(rev)
395 reponode = changelog.node(rev)
396 if close:
396 if close:
397 branchidx |= _rbccloseflag
397 branchidx |= _rbccloseflag
398 rbcrevidx = rev * _rbcrecsize
398 rbcrevidx = rev * _rbcrecsize
399 rec = array('c')
399 rec = array('c')
400 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
400 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
401 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
401 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
402 return b, close
402 return b, close
403
403
404 def write(self, repo):
404 def write(self, repo):
405 """Save branch cache if it is dirty."""
405 """Save branch cache if it is dirty."""
406 if self._rbcnamescount < len(self._names):
406 if self._rbcnamescount < len(self._names):
407 try:
407 try:
408 if self._rbcnamescount != 0:
408 if self._rbcnamescount != 0:
409 f = repo.vfs.open(_rbcnames, 'ab')
409 f = repo.vfs.open(_rbcnames, 'ab')
410 # The position after open(x, 'a') is implementation defined-
411 # see issue3543. SEEK_END was added in 2.5
412 f.seek(0, 2) #os.SEEK_END
413 if f.tell() == self._rbcsnameslen:
410 if f.tell() == self._rbcsnameslen:
414 f.write('\0')
411 f.write('\0')
415 else:
412 else:
416 f.close()
413 f.close()
417 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
414 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
418 self._rbcnamescount = 0
415 self._rbcnamescount = 0
419 self._rbcrevslen = 0
416 self._rbcrevslen = 0
420 if self._rbcnamescount == 0:
417 if self._rbcnamescount == 0:
421 f = repo.vfs.open(_rbcnames, 'wb')
418 f = repo.vfs.open(_rbcnames, 'wb')
422 f.write('\0'.join(encoding.fromlocal(b)
419 f.write('\0'.join(encoding.fromlocal(b)
423 for b in self._names[self._rbcnamescount:]))
420 for b in self._names[self._rbcnamescount:]))
424 self._rbcsnameslen = f.tell()
421 self._rbcsnameslen = f.tell()
425 f.close()
422 f.close()
426 except (IOError, OSError, util.Abort), inst:
423 except (IOError, OSError, util.Abort), inst:
427 repo.ui.debug("couldn't write revision branch cache names: "
424 repo.ui.debug("couldn't write revision branch cache names: "
428 "%s\n" % inst)
425 "%s\n" % inst)
429 return
426 return
430 self._rbcnamescount = len(self._names)
427 self._rbcnamescount = len(self._names)
431
428
432 start = self._rbcrevslen * _rbcrecsize
429 start = self._rbcrevslen * _rbcrecsize
433 if start != len(self._rbcrevs):
430 if start != len(self._rbcrevs):
434 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
431 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
435 try:
432 try:
436 f = repo.vfs.open(_rbcrevs, 'ab')
433 f = repo.vfs.open(_rbcrevs, 'ab')
437 # The position after open(x, 'a') is implementation defined-
438 # see issue3543. SEEK_END was added in 2.5
439 f.seek(0, 2) #os.SEEK_END
440 if f.tell() != start:
434 if f.tell() != start:
441 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
435 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
442 f.seek(start)
436 f.seek(start)
443 f.truncate()
437 f.truncate()
444 end = revs * _rbcrecsize
438 end = revs * _rbcrecsize
445 f.write(self._rbcrevs[start:end])
439 f.write(self._rbcrevs[start:end])
446 f.close()
440 f.close()
447 except (IOError, OSError, util.Abort), inst:
441 except (IOError, OSError, util.Abort), inst:
448 repo.ui.debug("couldn't write revision branch cache: %s\n" %
442 repo.ui.debug("couldn't write revision branch cache: %s\n" %
449 inst)
443 inst)
450 return
444 return
451 self._rbcrevslen = revs
445 self._rbcrevslen = revs
General Comments 0
You need to be logged in to leave comments. Login now