##// END OF EJS Templates
branchmap: remove silly line break...
Pierre-Yves David -
r20260:586ec8fe default
parent child Browse files
Show More
@@ -1,290 +1,289 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11
11
12 def _filename(repo):
12 def _filename(repo):
13 """name of a branchcache file for a given repo or repoview"""
13 """name of a branchcache file for a given repo or repoview"""
14 filename = "cache/branch2"
14 filename = "cache/branch2"
15 if repo.filtername:
15 if repo.filtername:
16 filename = '%s-%s' % (filename, repo.filtername)
16 filename = '%s-%s' % (filename, repo.filtername)
17 return filename
17 return filename
18
18
19 def read(repo):
19 def read(repo):
20 try:
20 try:
21 f = repo.opener(_filename(repo))
21 f = repo.opener(_filename(repo))
22 lines = f.read().split('\n')
22 lines = f.read().split('\n')
23 f.close()
23 f.close()
24 except (IOError, OSError):
24 except (IOError, OSError):
25 return None
25 return None
26
26
27 try:
27 try:
28 cachekey = lines.pop(0).split(" ", 2)
28 cachekey = lines.pop(0).split(" ", 2)
29 last, lrev = cachekey[:2]
29 last, lrev = cachekey[:2]
30 last, lrev = bin(last), int(lrev)
30 last, lrev = bin(last), int(lrev)
31 filteredhash = None
31 filteredhash = None
32 if len(cachekey) > 2:
32 if len(cachekey) > 2:
33 filteredhash = bin(cachekey[2])
33 filteredhash = bin(cachekey[2])
34 partial = branchcache(tipnode=last, tiprev=lrev,
34 partial = branchcache(tipnode=last, tiprev=lrev,
35 filteredhash=filteredhash)
35 filteredhash=filteredhash)
36 if not partial.validfor(repo):
36 if not partial.validfor(repo):
37 # invalidate the cache
37 # invalidate the cache
38 raise ValueError('tip differs')
38 raise ValueError('tip differs')
39 for l in lines:
39 for l in lines:
40 if not l:
40 if not l:
41 continue
41 continue
42 node, state, label = l.split(" ", 2)
42 node, state, label = l.split(" ", 2)
43 if state not in 'oc':
43 if state not in 'oc':
44 raise ValueError('invalid branch state')
44 raise ValueError('invalid branch state')
45 label = encoding.tolocal(label.strip())
45 label = encoding.tolocal(label.strip())
46 if not node in repo:
46 if not node in repo:
47 raise ValueError('node %s does not exist' % node)
47 raise ValueError('node %s does not exist' % node)
48 node = bin(node)
48 node = bin(node)
49 partial.setdefault(label, []).append(node)
49 partial.setdefault(label, []).append(node)
50 if state == 'c':
50 if state == 'c':
51 partial._closednodes.add(node)
51 partial._closednodes.add(node)
52 except KeyboardInterrupt:
52 except KeyboardInterrupt:
53 raise
53 raise
54 except Exception, inst:
54 except Exception, inst:
55 if repo.ui.debugflag:
55 if repo.ui.debugflag:
56 msg = 'invalid branchheads cache'
56 msg = 'invalid branchheads cache'
57 if repo.filtername is not None:
57 if repo.filtername is not None:
58 msg += ' (%s)' % repo.filtername
58 msg += ' (%s)' % repo.filtername
59 msg += ': %s\n'
59 msg += ': %s\n'
60 repo.ui.warn(msg % inst)
60 repo.ui.warn(msg % inst)
61 partial = None
61 partial = None
62 return partial
62 return partial
63
63
64
64
65
65
66 ### Nearest subset relation
66 ### Nearest subset relation
67 # Nearest subset of filter X is a filter Y so that:
67 # Nearest subset of filter X is a filter Y so that:
68 # * Y is included in X,
68 # * Y is included in X,
69 # * X - Y is as small as possible.
69 # * X - Y is as small as possible.
70 # This create and ordering used for branchmap purpose.
70 # This create and ordering used for branchmap purpose.
71 # the ordering may be partial
71 # the ordering may be partial
72 subsettable = {None: 'visible',
72 subsettable = {None: 'visible',
73 'visible': 'served',
73 'visible': 'served',
74 'served': 'immutable',
74 'served': 'immutable',
75 'immutable': 'base'}
75 'immutable': 'base'}
76
76
77 def updatecache(repo):
77 def updatecache(repo):
78 cl = repo.changelog
78 cl = repo.changelog
79 filtername = repo.filtername
79 filtername = repo.filtername
80 partial = repo._branchcaches.get(filtername)
80 partial = repo._branchcaches.get(filtername)
81
81
82 revs = []
82 revs = []
83 if partial is None or not partial.validfor(repo):
83 if partial is None or not partial.validfor(repo):
84 partial = read(repo)
84 partial = read(repo)
85 if partial is None:
85 if partial is None:
86 subsetname = subsettable.get(filtername)
86 subsetname = subsettable.get(filtername)
87 if subsetname is None:
87 if subsetname is None:
88 partial = branchcache()
88 partial = branchcache()
89 else:
89 else:
90 subset = repo.filtered(subsetname)
90 subset = repo.filtered(subsetname)
91 partial = subset.branchmap().copy()
91 partial = subset.branchmap().copy()
92 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
92 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
93 revs.extend(r for r in extrarevs if r <= partial.tiprev)
93 revs.extend(r for r in extrarevs if r <= partial.tiprev)
94 revs.extend(cl.revs(start=partial.tiprev + 1))
94 revs.extend(cl.revs(start=partial.tiprev + 1))
95 if revs:
95 if revs:
96 partial.update(repo, revs)
96 partial.update(repo, revs)
97 partial.write(repo)
97 partial.write(repo)
98 assert partial.validfor(repo), filtername
98 assert partial.validfor(repo), filtername
99 repo._branchcaches[repo.filtername] = partial
99 repo._branchcaches[repo.filtername] = partial
100
100
101 class branchcache(dict):
101 class branchcache(dict):
102 """A dict like object that hold branches heads cache.
102 """A dict like object that hold branches heads cache.
103
103
104 This cache is used to avoid costly computations to determine all the
104 This cache is used to avoid costly computations to determine all the
105 branch heads of a repo.
105 branch heads of a repo.
106
106
107 The cache is serialized on disk in the following format:
107 The cache is serialized on disk in the following format:
108
108
109 <tip hex node> <tip rev number> [optional filtered repo hex hash]
109 <tip hex node> <tip rev number> [optional filtered repo hex hash]
110 <branch head hex node> <open/closed state> <branch name>
110 <branch head hex node> <open/closed state> <branch name>
111 <branch head hex node> <open/closed state> <branch name>
111 <branch head hex node> <open/closed state> <branch name>
112 ...
112 ...
113
113
114 The first line is used to check if the cache is still valid. If the
114 The first line is used to check if the cache is still valid. If the
115 branch cache is for a filtered repo view, an optional third hash is
115 branch cache is for a filtered repo view, an optional third hash is
116 included that hashes the hashes of all filtered revisions.
116 included that hashes the hashes of all filtered revisions.
117
117
118 The open/closed state is represented by a single letter 'o' or 'c'.
118 The open/closed state is represented by a single letter 'o' or 'c'.
119 This field can be used to avoid changelog reads when determining if a
119 This field can be used to avoid changelog reads when determining if a
120 branch head closes a branch or not.
120 branch head closes a branch or not.
121 """
121 """
122
122
123 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
123 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
124 filteredhash=None, closednodes=None):
124 filteredhash=None, closednodes=None):
125 super(branchcache, self).__init__(entries)
125 super(branchcache, self).__init__(entries)
126 self.tipnode = tipnode
126 self.tipnode = tipnode
127 self.tiprev = tiprev
127 self.tiprev = tiprev
128 self.filteredhash = filteredhash
128 self.filteredhash = filteredhash
129 # closednodes is a set of nodes that close their branch. If the branch
129 # closednodes is a set of nodes that close their branch. If the branch
130 # cache has been updated, it may contain nodes that are no longer
130 # cache has been updated, it may contain nodes that are no longer
131 # heads.
131 # heads.
132 if closednodes is None:
132 if closednodes is None:
133 self._closednodes = set()
133 self._closednodes = set()
134 else:
134 else:
135 self._closednodes = closednodes
135 self._closednodes = closednodes
136
136
137 def _hashfiltered(self, repo):
137 def _hashfiltered(self, repo):
138 """build hash of revision filtered in the current cache
138 """build hash of revision filtered in the current cache
139
139
140 Tracking tipnode and tiprev is not enough to ensure validity of the
140 Tracking tipnode and tiprev is not enough to ensure validity of the
141 cache as they do not help to distinct cache that ignored various
141 cache as they do not help to distinct cache that ignored various
142 revision bellow tiprev.
142 revision bellow tiprev.
143
143
144 To detect such difference, we build a cache of all ignored revisions.
144 To detect such difference, we build a cache of all ignored revisions.
145 """
145 """
146 cl = repo.changelog
146 cl = repo.changelog
147 if not cl.filteredrevs:
147 if not cl.filteredrevs:
148 return None
148 return None
149 key = None
149 key = None
150 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
150 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
151 if revs:
151 if revs:
152 s = util.sha1()
152 s = util.sha1()
153 for rev in revs:
153 for rev in revs:
154 s.update('%s;' % rev)
154 s.update('%s;' % rev)
155 key = s.digest()
155 key = s.digest()
156 return key
156 return key
157
157
158 def validfor(self, repo):
158 def validfor(self, repo):
159 """Is the cache content valid regarding a repo
159 """Is the cache content valid regarding a repo
160
160
161 - False when cached tipnode is unknown or if we detect a strip.
161 - False when cached tipnode is unknown or if we detect a strip.
162 - True when cache is up to date or a subset of current repo."""
162 - True when cache is up to date or a subset of current repo."""
163 try:
163 try:
164 return ((self.tipnode == repo.changelog.node(self.tiprev))
164 return ((self.tipnode == repo.changelog.node(self.tiprev))
165 and (self.filteredhash == self._hashfiltered(repo)))
165 and (self.filteredhash == self._hashfiltered(repo)))
166 except IndexError:
166 except IndexError:
167 return False
167 return False
168
168
169 def _branchtip(self, heads):
169 def _branchtip(self, heads):
170 '''Return tuple with last open head in heads and false,
170 '''Return tuple with last open head in heads and false,
171 otherwise return last closed head and true.'''
171 otherwise return last closed head and true.'''
172 tip = heads[-1]
172 tip = heads[-1]
173 closed = True
173 closed = True
174 for h in reversed(heads):
174 for h in reversed(heads):
175 if h not in self._closednodes:
175 if h not in self._closednodes:
176 tip = h
176 tip = h
177 closed = False
177 closed = False
178 break
178 break
179 return tip, closed
179 return tip, closed
180
180
181 def branchtip(self, branch):
181 def branchtip(self, branch):
182 '''Return the tipmost open head on branch head, otherwise return the
182 '''Return the tipmost open head on branch head, otherwise return the
183 tipmost closed head on branch.
183 tipmost closed head on branch.
184 Raise KeyError for unknown branch.'''
184 Raise KeyError for unknown branch.'''
185 return self._branchtip(self[branch])[0]
185 return self._branchtip(self[branch])[0]
186
186
187 def branchheads(self, branch, closed=False):
187 def branchheads(self, branch, closed=False):
188 heads = self[branch]
188 heads = self[branch]
189 if not closed:
189 if not closed:
190 heads = [h for h in heads if h not in self._closednodes]
190 heads = [h for h in heads if h not in self._closednodes]
191 return heads
191 return heads
192
192
193 def iterbranches(self):
193 def iterbranches(self):
194 for bn, heads in self.iteritems():
194 for bn, heads in self.iteritems():
195 yield (bn, heads) + self._branchtip(heads)
195 yield (bn, heads) + self._branchtip(heads)
196
196
197 def copy(self):
197 def copy(self):
198 """return an deep copy of the branchcache object"""
198 """return an deep copy of the branchcache object"""
199 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
199 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
200 self._closednodes)
200 self._closednodes)
201
201
202 def write(self, repo):
202 def write(self, repo):
203 try:
203 try:
204 f = repo.opener(_filename(repo), "w", atomictemp=True)
204 f = repo.opener(_filename(repo), "w", atomictemp=True)
205 cachekey = [hex(self.tipnode), str(self.tiprev)]
205 cachekey = [hex(self.tipnode), str(self.tiprev)]
206 if self.filteredhash is not None:
206 if self.filteredhash is not None:
207 cachekey.append(hex(self.filteredhash))
207 cachekey.append(hex(self.filteredhash))
208 f.write(" ".join(cachekey) + '\n')
208 f.write(" ".join(cachekey) + '\n')
209 for label, nodes in sorted(self.iteritems()):
209 for label, nodes in sorted(self.iteritems()):
210 for node in nodes:
210 for node in nodes:
211 if node in self._closednodes:
211 if node in self._closednodes:
212 state = 'c'
212 state = 'c'
213 else:
213 else:
214 state = 'o'
214 state = 'o'
215 f.write("%s %s %s\n" % (hex(node), state,
215 f.write("%s %s %s\n" % (hex(node), state,
216 encoding.fromlocal(label)))
216 encoding.fromlocal(label)))
217 f.close()
217 f.close()
218 except (IOError, OSError, util.Abort):
218 except (IOError, OSError, util.Abort):
219 # Abort may be raise by read only opener
219 # Abort may be raise by read only opener
220 pass
220 pass
221
221
222 def update(self, repo, revgen):
222 def update(self, repo, revgen):
223 """Given a branchhead cache, self, that may have extra nodes or be
223 """Given a branchhead cache, self, that may have extra nodes or be
224 missing heads, and a generator of nodes that are at least a superset of
224 missing heads, and a generator of nodes that are at least a superset of
225 heads missing, this function updates self to be correct.
225 heads missing, this function updates self to be correct.
226 """
226 """
227 cl = repo.changelog
227 cl = repo.changelog
228 # collect new branch entries
228 # collect new branch entries
229 newbranches = {}
229 newbranches = {}
230 getbranchinfo = cl.branchinfo
230 getbranchinfo = cl.branchinfo
231 for r in revgen:
231 for r in revgen:
232 branch, closesbranch = getbranchinfo(r)
232 branch, closesbranch = getbranchinfo(r)
233 node = cl.node(r)
233 node = cl.node(r)
234 newbranches.setdefault(branch, []).append(node)
234 newbranches.setdefault(branch, []).append(node)
235 if closesbranch:
235 if closesbranch:
236 self._closednodes.add(node)
236 self._closednodes.add(node)
237 # if older branchheads are reachable from new ones, they aren't
237 # if older branchheads are reachable from new ones, they aren't
238 # really branchheads. Note checking parents is insufficient:
238 # really branchheads. Note checking parents is insufficient:
239 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
239 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
240 for branch, newnodes in newbranches.iteritems():
240 for branch, newnodes in newbranches.iteritems():
241 bheads = self.setdefault(branch, [])
241 bheads = self.setdefault(branch, [])
242 # Remove candidate heads that no longer are in the repo (e.g., as
242 # Remove candidate heads that no longer are in the repo (e.g., as
243 # the result of a strip that just happened). Avoid using 'node in
243 # the result of a strip that just happened). Avoid using 'node in
244 # self' here because that dives down into branchcache code somewhat
244 # self' here because that dives down into branchcache code somewhat
245 # recursively.
245 # recursively.
246 bheadrevs = [cl.rev(node) for node in bheads
246 bheadrevs = [cl.rev(node) for node in bheads
247 if cl.hasnode(node)]
247 if cl.hasnode(node)]
248 newheadrevs = [cl.rev(node) for node in newnodes
248 newheadrevs = [cl.rev(node) for node in newnodes
249 if cl.hasnode(node)]
249 if cl.hasnode(node)]
250 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
250 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
251 # Remove duplicates - nodes that are in newheadrevs and are already
251 # Remove duplicates - nodes that are in newheadrevs and are already
252 # in bheadrevs. This can happen if you strip a node whose parent
252 # in bheadrevs. This can happen if you strip a node whose parent
253 # was already a head (because they're on different branches).
253 # was already a head (because they're on different branches).
254 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
254 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
255
255
256 # Starting from tip means fewer passes over reachable. If we know
256 # Starting from tip means fewer passes over reachable. If we know
257 # the new candidates are not ancestors of existing heads, we don't
257 # the new candidates are not ancestors of existing heads, we don't
258 # have to examine ancestors of existing heads
258 # have to examine ancestors of existing heads
259 if ctxisnew:
259 if ctxisnew:
260 iterrevs = sorted(newheadrevs)
260 iterrevs = sorted(newheadrevs)
261 else:
261 else:
262 iterrevs = list(bheadrevs)
262 iterrevs = list(bheadrevs)
263
263
264 # This loop prunes out two kinds of heads - heads that are
264 # This loop prunes out two kinds of heads - heads that are
265 # superseded by a head in newheadrevs, and newheadrevs that are not
265 # superseded by a head in newheadrevs, and newheadrevs that are not
266 # heads because an existing head is their descendant.
266 # heads because an existing head is their descendant.
267 while iterrevs:
267 while iterrevs:
268 latest = iterrevs.pop()
268 latest = iterrevs.pop()
269 if latest not in bheadrevs:
269 if latest not in bheadrevs:
270 continue
270 continue
271 ancestors = set(cl.ancestors([latest],
271 ancestors = set(cl.ancestors([latest], bheadrevs[0]))
272 bheadrevs[0]))
273 if ancestors:
272 if ancestors:
274 bheadrevs = [b for b in bheadrevs if b not in ancestors]
273 bheadrevs = [b for b in bheadrevs if b not in ancestors]
275 self[branch] = [cl.node(rev) for rev in bheadrevs]
274 self[branch] = [cl.node(rev) for rev in bheadrevs]
276 tiprev = max(bheadrevs)
275 tiprev = max(bheadrevs)
277 if tiprev > self.tiprev:
276 if tiprev > self.tiprev:
278 self.tipnode = cl.node(tiprev)
277 self.tipnode = cl.node(tiprev)
279 self.tiprev = tiprev
278 self.tiprev = tiprev
280
279
281 if not self.validfor(repo):
280 if not self.validfor(repo):
282 # cache key are not valid anymore
281 # cache key are not valid anymore
283 self.tipnode = nullid
282 self.tipnode = nullid
284 self.tiprev = nullrev
283 self.tiprev = nullrev
285 for heads in self.values():
284 for heads in self.values():
286 tiprev = max(cl.rev(node) for node in heads)
285 tiprev = max(cl.rev(node) for node in heads)
287 if tiprev > self.tiprev:
286 if tiprev > self.tiprev:
288 self.tipnode = cl.node(tiprev)
287 self.tipnode = cl.node(tiprev)
289 self.tiprev = tiprev
288 self.tiprev = tiprev
290 self.filteredhash = self._hashfiltered(repo)
289 self.filteredhash = self._hashfiltered(repo)
General Comments 0
You need to be logged in to leave comments. Login now