##// END OF EJS Templates
branchmap: use set for update code...
Pierre-Yves David -
r20264:d9e1c167 default
parent child Browse files
Show More
@@ -1,275 +1,274 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11
11
12 def _filename(repo):
12 def _filename(repo):
13 """name of a branchcache file for a given repo or repoview"""
13 """name of a branchcache file for a given repo or repoview"""
14 filename = "cache/branch2"
14 filename = "cache/branch2"
15 if repo.filtername:
15 if repo.filtername:
16 filename = '%s-%s' % (filename, repo.filtername)
16 filename = '%s-%s' % (filename, repo.filtername)
17 return filename
17 return filename
18
18
19 def read(repo):
19 def read(repo):
20 try:
20 try:
21 f = repo.opener(_filename(repo))
21 f = repo.opener(_filename(repo))
22 lines = f.read().split('\n')
22 lines = f.read().split('\n')
23 f.close()
23 f.close()
24 except (IOError, OSError):
24 except (IOError, OSError):
25 return None
25 return None
26
26
27 try:
27 try:
28 cachekey = lines.pop(0).split(" ", 2)
28 cachekey = lines.pop(0).split(" ", 2)
29 last, lrev = cachekey[:2]
29 last, lrev = cachekey[:2]
30 last, lrev = bin(last), int(lrev)
30 last, lrev = bin(last), int(lrev)
31 filteredhash = None
31 filteredhash = None
32 if len(cachekey) > 2:
32 if len(cachekey) > 2:
33 filteredhash = bin(cachekey[2])
33 filteredhash = bin(cachekey[2])
34 partial = branchcache(tipnode=last, tiprev=lrev,
34 partial = branchcache(tipnode=last, tiprev=lrev,
35 filteredhash=filteredhash)
35 filteredhash=filteredhash)
36 if not partial.validfor(repo):
36 if not partial.validfor(repo):
37 # invalidate the cache
37 # invalidate the cache
38 raise ValueError('tip differs')
38 raise ValueError('tip differs')
39 for l in lines:
39 for l in lines:
40 if not l:
40 if not l:
41 continue
41 continue
42 node, state, label = l.split(" ", 2)
42 node, state, label = l.split(" ", 2)
43 if state not in 'oc':
43 if state not in 'oc':
44 raise ValueError('invalid branch state')
44 raise ValueError('invalid branch state')
45 label = encoding.tolocal(label.strip())
45 label = encoding.tolocal(label.strip())
46 if not node in repo:
46 if not node in repo:
47 raise ValueError('node %s does not exist' % node)
47 raise ValueError('node %s does not exist' % node)
48 node = bin(node)
48 node = bin(node)
49 partial.setdefault(label, []).append(node)
49 partial.setdefault(label, []).append(node)
50 if state == 'c':
50 if state == 'c':
51 partial._closednodes.add(node)
51 partial._closednodes.add(node)
52 except KeyboardInterrupt:
52 except KeyboardInterrupt:
53 raise
53 raise
54 except Exception, inst:
54 except Exception, inst:
55 if repo.ui.debugflag:
55 if repo.ui.debugflag:
56 msg = 'invalid branchheads cache'
56 msg = 'invalid branchheads cache'
57 if repo.filtername is not None:
57 if repo.filtername is not None:
58 msg += ' (%s)' % repo.filtername
58 msg += ' (%s)' % repo.filtername
59 msg += ': %s\n'
59 msg += ': %s\n'
60 repo.ui.warn(msg % inst)
60 repo.ui.warn(msg % inst)
61 partial = None
61 partial = None
62 return partial
62 return partial
63
63
64
64
65
65
66 ### Nearest subset relation
66 ### Nearest subset relation
67 # Nearest subset of filter X is a filter Y so that:
67 # Nearest subset of filter X is a filter Y so that:
68 # * Y is included in X,
68 # * Y is included in X,
69 # * X - Y is as small as possible.
69 # * X - Y is as small as possible.
70 # This create and ordering used for branchmap purpose.
70 # This create and ordering used for branchmap purpose.
71 # the ordering may be partial
71 # the ordering may be partial
72 subsettable = {None: 'visible',
72 subsettable = {None: 'visible',
73 'visible': 'served',
73 'visible': 'served',
74 'served': 'immutable',
74 'served': 'immutable',
75 'immutable': 'base'}
75 'immutable': 'base'}
76
76
77 def updatecache(repo):
77 def updatecache(repo):
78 cl = repo.changelog
78 cl = repo.changelog
79 filtername = repo.filtername
79 filtername = repo.filtername
80 partial = repo._branchcaches.get(filtername)
80 partial = repo._branchcaches.get(filtername)
81
81
82 revs = []
82 revs = []
83 if partial is None or not partial.validfor(repo):
83 if partial is None or not partial.validfor(repo):
84 partial = read(repo)
84 partial = read(repo)
85 if partial is None:
85 if partial is None:
86 subsetname = subsettable.get(filtername)
86 subsetname = subsettable.get(filtername)
87 if subsetname is None:
87 if subsetname is None:
88 partial = branchcache()
88 partial = branchcache()
89 else:
89 else:
90 subset = repo.filtered(subsetname)
90 subset = repo.filtered(subsetname)
91 partial = subset.branchmap().copy()
91 partial = subset.branchmap().copy()
92 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
92 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
93 revs.extend(r for r in extrarevs if r <= partial.tiprev)
93 revs.extend(r for r in extrarevs if r <= partial.tiprev)
94 revs.extend(cl.revs(start=partial.tiprev + 1))
94 revs.extend(cl.revs(start=partial.tiprev + 1))
95 if revs:
95 if revs:
96 partial.update(repo, revs)
96 partial.update(repo, revs)
97 partial.write(repo)
97 partial.write(repo)
98 assert partial.validfor(repo), filtername
98 assert partial.validfor(repo), filtername
99 repo._branchcaches[repo.filtername] = partial
99 repo._branchcaches[repo.filtername] = partial
100
100
101 class branchcache(dict):
101 class branchcache(dict):
102 """A dict like object that hold branches heads cache.
102 """A dict like object that hold branches heads cache.
103
103
104 This cache is used to avoid costly computations to determine all the
104 This cache is used to avoid costly computations to determine all the
105 branch heads of a repo.
105 branch heads of a repo.
106
106
107 The cache is serialized on disk in the following format:
107 The cache is serialized on disk in the following format:
108
108
109 <tip hex node> <tip rev number> [optional filtered repo hex hash]
109 <tip hex node> <tip rev number> [optional filtered repo hex hash]
110 <branch head hex node> <open/closed state> <branch name>
110 <branch head hex node> <open/closed state> <branch name>
111 <branch head hex node> <open/closed state> <branch name>
111 <branch head hex node> <open/closed state> <branch name>
112 ...
112 ...
113
113
114 The first line is used to check if the cache is still valid. If the
114 The first line is used to check if the cache is still valid. If the
115 branch cache is for a filtered repo view, an optional third hash is
115 branch cache is for a filtered repo view, an optional third hash is
116 included that hashes the hashes of all filtered revisions.
116 included that hashes the hashes of all filtered revisions.
117
117
118 The open/closed state is represented by a single letter 'o' or 'c'.
118 The open/closed state is represented by a single letter 'o' or 'c'.
119 This field can be used to avoid changelog reads when determining if a
119 This field can be used to avoid changelog reads when determining if a
120 branch head closes a branch or not.
120 branch head closes a branch or not.
121 """
121 """
122
122
123 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
123 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
124 filteredhash=None, closednodes=None):
124 filteredhash=None, closednodes=None):
125 super(branchcache, self).__init__(entries)
125 super(branchcache, self).__init__(entries)
126 self.tipnode = tipnode
126 self.tipnode = tipnode
127 self.tiprev = tiprev
127 self.tiprev = tiprev
128 self.filteredhash = filteredhash
128 self.filteredhash = filteredhash
129 # closednodes is a set of nodes that close their branch. If the branch
129 # closednodes is a set of nodes that close their branch. If the branch
130 # cache has been updated, it may contain nodes that are no longer
130 # cache has been updated, it may contain nodes that are no longer
131 # heads.
131 # heads.
132 if closednodes is None:
132 if closednodes is None:
133 self._closednodes = set()
133 self._closednodes = set()
134 else:
134 else:
135 self._closednodes = closednodes
135 self._closednodes = closednodes
136
136
137 def _hashfiltered(self, repo):
137 def _hashfiltered(self, repo):
138 """build hash of revision filtered in the current cache
138 """build hash of revision filtered in the current cache
139
139
140 Tracking tipnode and tiprev is not enough to ensure validity of the
140 Tracking tipnode and tiprev is not enough to ensure validity of the
141 cache as they do not help to distinct cache that ignored various
141 cache as they do not help to distinct cache that ignored various
142 revision bellow tiprev.
142 revision bellow tiprev.
143
143
144 To detect such difference, we build a cache of all ignored revisions.
144 To detect such difference, we build a cache of all ignored revisions.
145 """
145 """
146 cl = repo.changelog
146 cl = repo.changelog
147 if not cl.filteredrevs:
147 if not cl.filteredrevs:
148 return None
148 return None
149 key = None
149 key = None
150 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
150 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
151 if revs:
151 if revs:
152 s = util.sha1()
152 s = util.sha1()
153 for rev in revs:
153 for rev in revs:
154 s.update('%s;' % rev)
154 s.update('%s;' % rev)
155 key = s.digest()
155 key = s.digest()
156 return key
156 return key
157
157
158 def validfor(self, repo):
158 def validfor(self, repo):
159 """Is the cache content valid regarding a repo
159 """Is the cache content valid regarding a repo
160
160
161 - False when cached tipnode is unknown or if we detect a strip.
161 - False when cached tipnode is unknown or if we detect a strip.
162 - True when cache is up to date or a subset of current repo."""
162 - True when cache is up to date or a subset of current repo."""
163 try:
163 try:
164 return ((self.tipnode == repo.changelog.node(self.tiprev))
164 return ((self.tipnode == repo.changelog.node(self.tiprev))
165 and (self.filteredhash == self._hashfiltered(repo)))
165 and (self.filteredhash == self._hashfiltered(repo)))
166 except IndexError:
166 except IndexError:
167 return False
167 return False
168
168
169 def _branchtip(self, heads):
169 def _branchtip(self, heads):
170 '''Return tuple with last open head in heads and false,
170 '''Return tuple with last open head in heads and false,
171 otherwise return last closed head and true.'''
171 otherwise return last closed head and true.'''
172 tip = heads[-1]
172 tip = heads[-1]
173 closed = True
173 closed = True
174 for h in reversed(heads):
174 for h in reversed(heads):
175 if h not in self._closednodes:
175 if h not in self._closednodes:
176 tip = h
176 tip = h
177 closed = False
177 closed = False
178 break
178 break
179 return tip, closed
179 return tip, closed
180
180
181 def branchtip(self, branch):
181 def branchtip(self, branch):
182 '''Return the tipmost open head on branch head, otherwise return the
182 '''Return the tipmost open head on branch head, otherwise return the
183 tipmost closed head on branch.
183 tipmost closed head on branch.
184 Raise KeyError for unknown branch.'''
184 Raise KeyError for unknown branch.'''
185 return self._branchtip(self[branch])[0]
185 return self._branchtip(self[branch])[0]
186
186
187 def branchheads(self, branch, closed=False):
187 def branchheads(self, branch, closed=False):
188 heads = self[branch]
188 heads = self[branch]
189 if not closed:
189 if not closed:
190 heads = [h for h in heads if h not in self._closednodes]
190 heads = [h for h in heads if h not in self._closednodes]
191 return heads
191 return heads
192
192
193 def iterbranches(self):
193 def iterbranches(self):
194 for bn, heads in self.iteritems():
194 for bn, heads in self.iteritems():
195 yield (bn, heads) + self._branchtip(heads)
195 yield (bn, heads) + self._branchtip(heads)
196
196
197 def copy(self):
197 def copy(self):
198 """return an deep copy of the branchcache object"""
198 """return an deep copy of the branchcache object"""
199 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
199 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
200 self._closednodes)
200 self._closednodes)
201
201
202 def write(self, repo):
202 def write(self, repo):
203 try:
203 try:
204 f = repo.opener(_filename(repo), "w", atomictemp=True)
204 f = repo.opener(_filename(repo), "w", atomictemp=True)
205 cachekey = [hex(self.tipnode), str(self.tiprev)]
205 cachekey = [hex(self.tipnode), str(self.tiprev)]
206 if self.filteredhash is not None:
206 if self.filteredhash is not None:
207 cachekey.append(hex(self.filteredhash))
207 cachekey.append(hex(self.filteredhash))
208 f.write(" ".join(cachekey) + '\n')
208 f.write(" ".join(cachekey) + '\n')
209 for label, nodes in sorted(self.iteritems()):
209 for label, nodes in sorted(self.iteritems()):
210 for node in nodes:
210 for node in nodes:
211 if node in self._closednodes:
211 if node in self._closednodes:
212 state = 'c'
212 state = 'c'
213 else:
213 else:
214 state = 'o'
214 state = 'o'
215 f.write("%s %s %s\n" % (hex(node), state,
215 f.write("%s %s %s\n" % (hex(node), state,
216 encoding.fromlocal(label)))
216 encoding.fromlocal(label)))
217 f.close()
217 f.close()
218 except (IOError, OSError, util.Abort):
218 except (IOError, OSError, util.Abort):
219 # Abort may be raise by read only opener
219 # Abort may be raise by read only opener
220 pass
220 pass
221
221
222 def update(self, repo, revgen):
222 def update(self, repo, revgen):
223 """Given a branchhead cache, self, that may have extra nodes or be
223 """Given a branchhead cache, self, that may have extra nodes or be
224 missing heads, and a generator of nodes that are strictly a superset of
224 missing heads, and a generator of nodes that are strictly a superset of
225 heads missing, this function updates self to be correct.
225 heads missing, this function updates self to be correct.
226 """
226 """
227 cl = repo.changelog
227 cl = repo.changelog
228 # collect new branch entries
228 # collect new branch entries
229 newbranches = {}
229 newbranches = {}
230 getbranchinfo = cl.branchinfo
230 getbranchinfo = cl.branchinfo
231 for r in revgen:
231 for r in revgen:
232 branch, closesbranch = getbranchinfo(r)
232 branch, closesbranch = getbranchinfo(r)
233 newbranches.setdefault(branch, []).append(r)
233 newbranches.setdefault(branch, []).append(r)
234 if closesbranch:
234 if closesbranch:
235 self._closednodes.add(cl.node(r))
235 self._closednodes.add(cl.node(r))
236 # if older branchheads are reachable from new ones, they aren't
236 # if older branchheads are reachable from new ones, they aren't
237 # really branchheads. Note checking parents is insufficient:
237 # really branchheads. Note checking parents is insufficient:
238 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
238 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
239 for branch, newheadrevs in newbranches.iteritems():
239 for branch, newheadrevs in newbranches.iteritems():
240 bheads = self.setdefault(branch, [])
240 bheads = self.setdefault(branch, [])
241 bheadrevs = [cl.rev(node) for node in bheads]
241 bheadset = set(cl.rev(node) for node in bheads)
242
242
243 # This have been tested True on all internal usage of this function.
243 # This have been tested True on all internal usage of this function.
244 # run it again in case of doubt
244 # run it again in case of doubt
245 # assert not (set(bheadrevs) & set(newheadrevs))
245 # assert not (set(bheadrevs) & set(newheadrevs))
246 newheadrevs.sort()
246 newheadrevs.sort()
247 bheadrevs.extend(newheadrevs)
247 bheadset.update(newheadrevs)
248 bheadrevs.sort()
249
248
250 # This loop prunes out two kinds of heads - heads that are
249 # This loop prunes out two kinds of heads - heads that are
251 # superseded by a head in newheadrevs, and newheadrevs that are not
250 # superseded by a head in newheadrevs, and newheadrevs that are not
252 # heads because an existing head is their descendant.
251 # heads because an existing head is their descendant.
253 while newheadrevs:
252 while newheadrevs:
254 latest = newheadrevs.pop()
253 latest = newheadrevs.pop()
255 if latest not in bheadrevs:
254 if latest not in bheadset:
256 continue
255 continue
257 ancestors = set(cl.ancestors([latest], bheadrevs[0]))
256 ancestors = set(cl.ancestors([latest], min(bheadset)))
258 if ancestors:
257 bheadset -= ancestors
259 bheadrevs = [b for b in bheadrevs if b not in ancestors]
258 bheadrevs = sorted(bheadset)
260 self[branch] = [cl.node(rev) for rev in bheadrevs]
259 self[branch] = [cl.node(rev) for rev in bheadrevs]
261 tiprev = bheadrevs[-1]
260 tiprev = bheadrevs[-1]
262 if tiprev > self.tiprev:
261 if tiprev > self.tiprev:
263 self.tipnode = cl.node(tiprev)
262 self.tipnode = cl.node(tiprev)
264 self.tiprev = tiprev
263 self.tiprev = tiprev
265
264
266 if not self.validfor(repo):
265 if not self.validfor(repo):
267 # cache key are not valid anymore
266 # cache key are not valid anymore
268 self.tipnode = nullid
267 self.tipnode = nullid
269 self.tiprev = nullrev
268 self.tiprev = nullrev
270 for heads in self.values():
269 for heads in self.values():
271 tiprev = max(cl.rev(node) for node in heads)
270 tiprev = max(cl.rev(node) for node in heads)
272 if tiprev > self.tiprev:
271 if tiprev > self.tiprev:
273 self.tipnode = cl.node(tiprev)
272 self.tipnode = cl.node(tiprev)
274 self.tiprev = tiprev
273 self.tiprev = tiprev
275 self.filteredhash = self._hashfiltered(repo)
274 self.filteredhash = self._hashfiltered(repo)
General Comments 0
You need to be logged in to leave comments. Login now