##// END OF EJS Templates
branchmap: drop `_cacheabletip` usage in `updatecache`...
Pierre-Yves David -
r18218:d5655e74 default
parent child Browse files
Show More
@@ -1,222 +1,212
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11
11
12 def _filename(repo):
12 def _filename(repo):
13 """name of a branchcache file for a given repo or repoview"""
13 """name of a branchcache file for a given repo or repoview"""
14 filename = "cache/branchheads"
14 filename = "cache/branchheads"
15 if repo.filtername:
15 if repo.filtername:
16 filename = '%s-%s' % (filename, repo.filtername)
16 filename = '%s-%s' % (filename, repo.filtername)
17 return filename
17 return filename
18
18
19 def read(repo):
19 def read(repo):
20 try:
20 try:
21 f = repo.opener(_filename(repo))
21 f = repo.opener(_filename(repo))
22 lines = f.read().split('\n')
22 lines = f.read().split('\n')
23 f.close()
23 f.close()
24 except (IOError, OSError):
24 except (IOError, OSError):
25 return None
25 return None
26
26
27 try:
27 try:
28 cachekey = lines.pop(0).split(" ", 2)
28 cachekey = lines.pop(0).split(" ", 2)
29 last, lrev = cachekey[:2]
29 last, lrev = cachekey[:2]
30 last, lrev = bin(last), int(lrev)
30 last, lrev = bin(last), int(lrev)
31 filteredhash = None
31 filteredhash = None
32 if len(cachekey) > 2:
32 if len(cachekey) > 2:
33 filteredhash = bin(cachekey[2])
33 filteredhash = bin(cachekey[2])
34 partial = branchcache(tipnode=last, tiprev=lrev,
34 partial = branchcache(tipnode=last, tiprev=lrev,
35 filteredhash=filteredhash)
35 filteredhash=filteredhash)
36 if not partial.validfor(repo):
36 if not partial.validfor(repo):
37 # invalidate the cache
37 # invalidate the cache
38 raise ValueError('tip differs')
38 raise ValueError('tip differs')
39 for l in lines:
39 for l in lines:
40 if not l:
40 if not l:
41 continue
41 continue
42 node, label = l.split(" ", 1)
42 node, label = l.split(" ", 1)
43 label = encoding.tolocal(label.strip())
43 label = encoding.tolocal(label.strip())
44 if not node in repo:
44 if not node in repo:
45 raise ValueError('node %s does not exist' % node)
45 raise ValueError('node %s does not exist' % node)
46 partial.setdefault(label, []).append(bin(node))
46 partial.setdefault(label, []).append(bin(node))
47 except KeyboardInterrupt:
47 except KeyboardInterrupt:
48 raise
48 raise
49 except Exception, inst:
49 except Exception, inst:
50 if repo.ui.debugflag:
50 if repo.ui.debugflag:
51 msg = 'invalid branchheads cache'
51 msg = 'invalid branchheads cache'
52 if repo.filtername is not None:
52 if repo.filtername is not None:
53 msg += ' (%s)' % repo.filtername
53 msg += ' (%s)' % repo.filtername
54 msg += ': %s\n'
54 msg += ': %s\n'
55 repo.ui.warn(msg % inst)
55 repo.ui.warn(msg % inst)
56 partial = None
56 partial = None
57 return partial
57 return partial
58
58
59
59
60
60
61 def updatecache(repo):
61 def updatecache(repo):
62 cl = repo.changelog
62 cl = repo.changelog
63 filtername = repo.filtername
63 filtername = repo.filtername
64 partial = repo._branchcaches.get(filtername)
64 partial = repo._branchcaches.get(filtername)
65
65
66 if partial is None or not partial.validfor(repo):
66 if partial is None or not partial.validfor(repo):
67 partial = read(repo)
67 partial = read(repo)
68 if partial is None:
68 if partial is None:
69 partial = branchcache()
69 partial = branchcache()
70
70
71 catip = repo._cacheabletip()
71 revs = list(cl.revs(start=partial.tiprev +1))
72 # if partial.tiprev == catip: cache is already up to date
72 if revs:
73 # if partial.tiprev > catip: we have uncachable element in `partial` can't
73 ctxgen = (repo[r] for r in revs)
74 # write on disk
75 if partial.tiprev < catip:
76 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip))
77 partial.update(repo, ctxgen)
74 partial.update(repo, ctxgen)
78 partial.write(repo)
75 partial.write(repo)
79 # If cacheable tip were lower than actual tip, we need to update the
80 # cache up to tip. This update (from cacheable to actual tip) is not
81 # written to disk since it's not cacheable.
82 tiprev = cl.rev(cl.tip())
83 if partial.tiprev < tiprev:
84 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev))
85 partial.update(repo, ctxgen)
86 repo._branchcaches[repo.filtername] = partial
76 repo._branchcaches[repo.filtername] = partial
87
77
88 class branchcache(dict):
78 class branchcache(dict):
89 """A dict like object that hold branches heads cache"""
79 """A dict like object that hold branches heads cache"""
90
80
91 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
81 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
92 filteredhash=None):
82 filteredhash=None):
93 super(branchcache, self).__init__(entries)
83 super(branchcache, self).__init__(entries)
94 self.tipnode = tipnode
84 self.tipnode = tipnode
95 self.tiprev = tiprev
85 self.tiprev = tiprev
96 self.filteredhash = filteredhash
86 self.filteredhash = filteredhash
97
87
98 def _hashfiltered(self, repo):
88 def _hashfiltered(self, repo):
99 """build hash of revision filtered in the current cache
89 """build hash of revision filtered in the current cache
100
90
101 Tracking tipnode and tiprev is not enough to ensure validaty of the
91 Tracking tipnode and tiprev is not enough to ensure validaty of the
102 cache as they do not help to distinct cache that ignored various
92 cache as they do not help to distinct cache that ignored various
103 revision bellow tiprev.
93 revision bellow tiprev.
104
94
105 To detect such difference, we build a cache of all ignored revisions.
95 To detect such difference, we build a cache of all ignored revisions.
106 """
96 """
107 cl = repo.changelog
97 cl = repo.changelog
108 if not cl.filteredrevs:
98 if not cl.filteredrevs:
109 return None
99 return None
110 key = None
100 key = None
111 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
101 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
112 if revs:
102 if revs:
113 s = util.sha1()
103 s = util.sha1()
114 for rev in revs:
104 for rev in revs:
115 s.update('%s;' % rev)
105 s.update('%s;' % rev)
116 key = s.digest()
106 key = s.digest()
117 return key
107 return key
118
108
119 def validfor(self, repo):
109 def validfor(self, repo):
120 """Is the cache content valide regarding a repo
110 """Is the cache content valide regarding a repo
121
111
122 - False when cached tipnode are unknown or if we detect a strip.
112 - False when cached tipnode are unknown or if we detect a strip.
123 - True when cache is up to date or a subset of current repo."""
113 - True when cache is up to date or a subset of current repo."""
124 try:
114 try:
125 return ((self.tipnode == repo.changelog.node(self.tiprev))
115 return ((self.tipnode == repo.changelog.node(self.tiprev))
126 and (self.filteredhash == self._hashfiltered(repo)))
116 and (self.filteredhash == self._hashfiltered(repo)))
127 except IndexError:
117 except IndexError:
128 return False
118 return False
129
119
130
120
131 def write(self, repo):
121 def write(self, repo):
132 try:
122 try:
133 f = repo.opener(_filename(repo), "w", atomictemp=True)
123 f = repo.opener(_filename(repo), "w", atomictemp=True)
134 cachekey = [hex(self.tipnode), str(self.tiprev)]
124 cachekey = [hex(self.tipnode), str(self.tiprev)]
135 if self.filteredhash is not None:
125 if self.filteredhash is not None:
136 cachekey.append(hex(self.filteredhash))
126 cachekey.append(hex(self.filteredhash))
137 f.write(" ".join(cachekey) + '\n')
127 f.write(" ".join(cachekey) + '\n')
138 for label, nodes in self.iteritems():
128 for label, nodes in self.iteritems():
139 for node in nodes:
129 for node in nodes:
140 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
130 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
141 f.close()
131 f.close()
142 except (IOError, OSError, util.Abort):
132 except (IOError, OSError, util.Abort):
143 # Abort may be raise by read only opener
133 # Abort may be raise by read only opener
144 pass
134 pass
145
135
146 def update(self, repo, ctxgen):
136 def update(self, repo, ctxgen):
147 """Given a branchhead cache, self, that may have extra nodes or be
137 """Given a branchhead cache, self, that may have extra nodes or be
148 missing heads, and a generator of nodes that are at least a superset of
138 missing heads, and a generator of nodes that are at least a superset of
149 heads missing, this function updates self to be correct.
139 heads missing, this function updates self to be correct.
150 """
140 """
151 cl = repo.changelog
141 cl = repo.changelog
152 # collect new branch entries
142 # collect new branch entries
153 newbranches = {}
143 newbranches = {}
154 for c in ctxgen:
144 for c in ctxgen:
155 newbranches.setdefault(c.branch(), []).append(c.node())
145 newbranches.setdefault(c.branch(), []).append(c.node())
156 # if older branchheads are reachable from new ones, they aren't
146 # if older branchheads are reachable from new ones, they aren't
157 # really branchheads. Note checking parents is insufficient:
147 # really branchheads. Note checking parents is insufficient:
158 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
148 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
159 for branch, newnodes in newbranches.iteritems():
149 for branch, newnodes in newbranches.iteritems():
160 bheads = self.setdefault(branch, [])
150 bheads = self.setdefault(branch, [])
161 # Remove candidate heads that no longer are in the repo (e.g., as
151 # Remove candidate heads that no longer are in the repo (e.g., as
162 # the result of a strip that just happened). Avoid using 'node in
152 # the result of a strip that just happened). Avoid using 'node in
163 # self' here because that dives down into branchcache code somewhat
153 # self' here because that dives down into branchcache code somewhat
164 # recursively.
154 # recursively.
165 bheadrevs = [cl.rev(node) for node in bheads
155 bheadrevs = [cl.rev(node) for node in bheads
166 if cl.hasnode(node)]
156 if cl.hasnode(node)]
167 newheadrevs = [cl.rev(node) for node in newnodes
157 newheadrevs = [cl.rev(node) for node in newnodes
168 if cl.hasnode(node)]
158 if cl.hasnode(node)]
169 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
159 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
170 # Remove duplicates - nodes that are in newheadrevs and are already
160 # Remove duplicates - nodes that are in newheadrevs and are already
171 # in bheadrevs. This can happen if you strip a node whose parent
161 # in bheadrevs. This can happen if you strip a node whose parent
172 # was already a head (because they're on different branches).
162 # was already a head (because they're on different branches).
173 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
163 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
174
164
175 # Starting from tip means fewer passes over reachable. If we know
165 # Starting from tip means fewer passes over reachable. If we know
176 # the new candidates are not ancestors of existing heads, we don't
166 # the new candidates are not ancestors of existing heads, we don't
177 # have to examine ancestors of existing heads
167 # have to examine ancestors of existing heads
178 if ctxisnew:
168 if ctxisnew:
179 iterrevs = sorted(newheadrevs)
169 iterrevs = sorted(newheadrevs)
180 else:
170 else:
181 iterrevs = list(bheadrevs)
171 iterrevs = list(bheadrevs)
182
172
183 # This loop prunes out two kinds of heads - heads that are
173 # This loop prunes out two kinds of heads - heads that are
184 # superseded by a head in newheadrevs, and newheadrevs that are not
174 # superseded by a head in newheadrevs, and newheadrevs that are not
185 # heads because an existing head is their descendant.
175 # heads because an existing head is their descendant.
186 while iterrevs:
176 while iterrevs:
187 latest = iterrevs.pop()
177 latest = iterrevs.pop()
188 if latest not in bheadrevs:
178 if latest not in bheadrevs:
189 continue
179 continue
190 ancestors = set(cl.ancestors([latest],
180 ancestors = set(cl.ancestors([latest],
191 bheadrevs[0]))
181 bheadrevs[0]))
192 if ancestors:
182 if ancestors:
193 bheadrevs = [b for b in bheadrevs if b not in ancestors]
183 bheadrevs = [b for b in bheadrevs if b not in ancestors]
194 self[branch] = [cl.node(rev) for rev in bheadrevs]
184 self[branch] = [cl.node(rev) for rev in bheadrevs]
195 tiprev = max(bheadrevs)
185 tiprev = max(bheadrevs)
196 if tiprev > self.tiprev:
186 if tiprev > self.tiprev:
197 self.tipnode = cl.node(tiprev)
187 self.tipnode = cl.node(tiprev)
198 self.tiprev = tiprev
188 self.tiprev = tiprev
199
189
200 # There may be branches that cease to exist when the last commit in the
190 # There may be branches that cease to exist when the last commit in the
201 # branch was stripped. This code filters them out. Note that the
191 # branch was stripped. This code filters them out. Note that the
202 # branch that ceased to exist may not be in newbranches because
192 # branch that ceased to exist may not be in newbranches because
203 # newbranches is the set of candidate heads, which when you strip the
193 # newbranches is the set of candidate heads, which when you strip the
204 # last commit in a branch will be the parent branch.
194 # last commit in a branch will be the parent branch.
205 droppednodes = []
195 droppednodes = []
206 for branch in self.keys():
196 for branch in self.keys():
207 nodes = [head for head in self[branch]
197 nodes = [head for head in self[branch]
208 if cl.hasnode(head)]
198 if cl.hasnode(head)]
209 if not nodes:
199 if not nodes:
210 droppednodes.extend(nodes)
200 droppednodes.extend(nodes)
211 del self[branch]
201 del self[branch]
212 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
202 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
213
203
214 # cache key are not valid anymore
204 # cache key are not valid anymore
215 self.tipnode = nullid
205 self.tipnode = nullid
216 self.tiprev = nullrev
206 self.tiprev = nullrev
217 for heads in self.values():
207 for heads in self.values():
218 tiprev = max(cl.rev(node) for node in heads)
208 tiprev = max(cl.rev(node) for node in heads)
219 if tiprev > self.tiprev:
209 if tiprev > self.tiprev:
220 self.tipnode = cl.node(tiprev)
210 self.tipnode = cl.node(tiprev)
221 self.tiprev = tiprev
211 self.tiprev = tiprev
222 self.filteredhash = self._hashfiltered(repo)
212 self.filteredhash = self._hashfiltered(repo)
General Comments 0
You need to be logged in to leave comments. Login now