##// END OF EJS Templates
branchmap: ignore Abort error while writing cache...
Pierre-Yves David -
r18214:cd4c7520 default
parent child Browse files
Show More
@@ -1,221 +1,222
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11
11
12 def _filename(repo):
12 def _filename(repo):
13 """name of a branchcache file for a given repo or repoview"""
13 """name of a branchcache file for a given repo or repoview"""
14 filename = "cache/branchheads"
14 filename = "cache/branchheads"
15 if repo.filtername:
15 if repo.filtername:
16 filename = '%s-%s' % (filename, repo.filtername)
16 filename = '%s-%s' % (filename, repo.filtername)
17 return filename
17 return filename
18
18
19 def read(repo):
19 def read(repo):
20 try:
20 try:
21 f = repo.opener(_filename(repo))
21 f = repo.opener(_filename(repo))
22 lines = f.read().split('\n')
22 lines = f.read().split('\n')
23 f.close()
23 f.close()
24 except (IOError, OSError):
24 except (IOError, OSError):
25 return None
25 return None
26
26
27 try:
27 try:
28 cachekey = lines.pop(0).split(" ", 2)
28 cachekey = lines.pop(0).split(" ", 2)
29 last, lrev = cachekey[:2]
29 last, lrev = cachekey[:2]
30 last, lrev = bin(last), int(lrev)
30 last, lrev = bin(last), int(lrev)
31 filteredhash = None
31 filteredhash = None
32 if len(cachekey) > 2:
32 if len(cachekey) > 2:
33 filteredhash = bin(cachekey[2])
33 filteredhash = bin(cachekey[2])
34 partial = branchcache(tipnode=last, tiprev=lrev,
34 partial = branchcache(tipnode=last, tiprev=lrev,
35 filteredhash=filteredhash)
35 filteredhash=filteredhash)
36 if not partial.validfor(repo):
36 if not partial.validfor(repo):
37 # invalidate the cache
37 # invalidate the cache
38 raise ValueError('tip differs')
38 raise ValueError('tip differs')
39 for l in lines:
39 for l in lines:
40 if not l:
40 if not l:
41 continue
41 continue
42 node, label = l.split(" ", 1)
42 node, label = l.split(" ", 1)
43 label = encoding.tolocal(label.strip())
43 label = encoding.tolocal(label.strip())
44 if not node in repo:
44 if not node in repo:
45 raise ValueError('node %s does not exist' % node)
45 raise ValueError('node %s does not exist' % node)
46 partial.setdefault(label, []).append(bin(node))
46 partial.setdefault(label, []).append(bin(node))
47 except KeyboardInterrupt:
47 except KeyboardInterrupt:
48 raise
48 raise
49 except Exception, inst:
49 except Exception, inst:
50 if repo.ui.debugflag:
50 if repo.ui.debugflag:
51 msg = 'invalid branchheads cache'
51 msg = 'invalid branchheads cache'
52 if repo.filtername is not None:
52 if repo.filtername is not None:
53 msg += ' (%s)' % repo.filtername
53 msg += ' (%s)' % repo.filtername
54 msg += ': %s\n'
54 msg += ': %s\n'
55 repo.ui.warn(msg % inst)
55 repo.ui.warn(msg % inst)
56 partial = None
56 partial = None
57 return partial
57 return partial
58
58
59
59
60
60
61 def updatecache(repo):
61 def updatecache(repo):
62 cl = repo.changelog
62 cl = repo.changelog
63 filtername = repo.filtername
63 filtername = repo.filtername
64 partial = repo._branchcaches.get(filtername)
64 partial = repo._branchcaches.get(filtername)
65
65
66 if partial is None or not partial.validfor(repo):
66 if partial is None or not partial.validfor(repo):
67 partial = read(repo)
67 partial = read(repo)
68 if partial is None:
68 if partial is None:
69 partial = branchcache()
69 partial = branchcache()
70
70
71 catip = repo._cacheabletip()
71 catip = repo._cacheabletip()
72 # if partial.tiprev == catip: cache is already up to date
72 # if partial.tiprev == catip: cache is already up to date
73 # if partial.tiprev > catip: we have uncachable element in `partial` can't
73 # if partial.tiprev > catip: we have uncachable element in `partial` can't
74 # write on disk
74 # write on disk
75 if partial.tiprev < catip:
75 if partial.tiprev < catip:
76 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip))
76 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip))
77 partial.update(repo, ctxgen)
77 partial.update(repo, ctxgen)
78 partial.write(repo)
78 partial.write(repo)
79 # If cacheable tip were lower than actual tip, we need to update the
79 # If cacheable tip were lower than actual tip, we need to update the
80 # cache up to tip. This update (from cacheable to actual tip) is not
80 # cache up to tip. This update (from cacheable to actual tip) is not
81 # written to disk since it's not cacheable.
81 # written to disk since it's not cacheable.
82 tiprev = cl.rev(cl.tip())
82 tiprev = cl.rev(cl.tip())
83 if partial.tiprev < tiprev:
83 if partial.tiprev < tiprev:
84 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev))
84 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev))
85 partial.update(repo, ctxgen)
85 partial.update(repo, ctxgen)
86 repo._branchcaches[repo.filtername] = partial
86 repo._branchcaches[repo.filtername] = partial
87
87
88 class branchcache(dict):
88 class branchcache(dict):
89 """A dict like object that hold branches heads cache"""
89 """A dict like object that hold branches heads cache"""
90
90
91 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
91 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
92 filteredhash=None):
92 filteredhash=None):
93 super(branchcache, self).__init__(entries)
93 super(branchcache, self).__init__(entries)
94 self.tipnode = tipnode
94 self.tipnode = tipnode
95 self.tiprev = tiprev
95 self.tiprev = tiprev
96 self.filteredhash = filteredhash
96 self.filteredhash = filteredhash
97
97
98 def _hashfiltered(self, repo):
98 def _hashfiltered(self, repo):
99 """build hash of revision filtered in the current cache
99 """build hash of revision filtered in the current cache
100
100
101 Tracking tipnode and tiprev is not enough to ensure validaty of the
101 Tracking tipnode and tiprev is not enough to ensure validaty of the
102 cache as they do not help to distinct cache that ignored various
102 cache as they do not help to distinct cache that ignored various
103 revision bellow tiprev.
103 revision bellow tiprev.
104
104
105 To detect such difference, we build a cache of all ignored revisions.
105 To detect such difference, we build a cache of all ignored revisions.
106 """
106 """
107 cl = repo.changelog
107 cl = repo.changelog
108 if not cl.filteredrevs:
108 if not cl.filteredrevs:
109 return None
109 return None
110 key = None
110 key = None
111 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
111 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
112 if revs:
112 if revs:
113 s = util.sha1()
113 s = util.sha1()
114 for rev in revs:
114 for rev in revs:
115 s.update('%s;' % rev)
115 s.update('%s;' % rev)
116 key = s.digest()
116 key = s.digest()
117 return key
117 return key
118
118
119 def validfor(self, repo):
119 def validfor(self, repo):
120 """Is the cache content valide regarding a repo
120 """Is the cache content valide regarding a repo
121
121
122 - False when cached tipnode are unknown or if we detect a strip.
122 - False when cached tipnode are unknown or if we detect a strip.
123 - True when cache is up to date or a subset of current repo."""
123 - True when cache is up to date or a subset of current repo."""
124 try:
124 try:
125 return ((self.tipnode == repo.changelog.node(self.tiprev))
125 return ((self.tipnode == repo.changelog.node(self.tiprev))
126 and (self.filteredhash == self._hashfiltered(repo)))
126 and (self.filteredhash == self._hashfiltered(repo)))
127 except IndexError:
127 except IndexError:
128 return False
128 return False
129
129
130
130
131 def write(self, repo):
131 def write(self, repo):
132 try:
132 try:
133 f = repo.opener(_filename(repo), "w", atomictemp=True)
133 f = repo.opener(_filename(repo), "w", atomictemp=True)
134 cachekey = [hex(self.tipnode), str(self.tiprev)]
134 cachekey = [hex(self.tipnode), str(self.tiprev)]
135 if self.filteredhash is not None:
135 if self.filteredhash is not None:
136 cachekey.append(hex(self.filteredhash))
136 cachekey.append(hex(self.filteredhash))
137 f.write(" ".join(cachekey) + '\n')
137 f.write(" ".join(cachekey) + '\n')
138 for label, nodes in self.iteritems():
138 for label, nodes in self.iteritems():
139 for node in nodes:
139 for node in nodes:
140 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
140 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
141 f.close()
141 f.close()
142 except (IOError, OSError):
142 except (IOError, OSError, util.Abort):
143 # Abort may be raise by read only opener
143 pass
144 pass
144
145
145 def update(self, repo, ctxgen):
146 def update(self, repo, ctxgen):
146 """Given a branchhead cache, self, that may have extra nodes or be
147 """Given a branchhead cache, self, that may have extra nodes or be
147 missing heads, and a generator of nodes that are at least a superset of
148 missing heads, and a generator of nodes that are at least a superset of
148 heads missing, this function updates self to be correct.
149 heads missing, this function updates self to be correct.
149 """
150 """
150 cl = repo.changelog
151 cl = repo.changelog
151 # collect new branch entries
152 # collect new branch entries
152 newbranches = {}
153 newbranches = {}
153 for c in ctxgen:
154 for c in ctxgen:
154 newbranches.setdefault(c.branch(), []).append(c.node())
155 newbranches.setdefault(c.branch(), []).append(c.node())
155 # if older branchheads are reachable from new ones, they aren't
156 # if older branchheads are reachable from new ones, they aren't
156 # really branchheads. Note checking parents is insufficient:
157 # really branchheads. Note checking parents is insufficient:
157 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
158 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
158 for branch, newnodes in newbranches.iteritems():
159 for branch, newnodes in newbranches.iteritems():
159 bheads = self.setdefault(branch, [])
160 bheads = self.setdefault(branch, [])
160 # Remove candidate heads that no longer are in the repo (e.g., as
161 # Remove candidate heads that no longer are in the repo (e.g., as
161 # the result of a strip that just happened). Avoid using 'node in
162 # the result of a strip that just happened). Avoid using 'node in
162 # self' here because that dives down into branchcache code somewhat
163 # self' here because that dives down into branchcache code somewhat
163 # recursively.
164 # recursively.
164 bheadrevs = [cl.rev(node) for node in bheads
165 bheadrevs = [cl.rev(node) for node in bheads
165 if cl.hasnode(node)]
166 if cl.hasnode(node)]
166 newheadrevs = [cl.rev(node) for node in newnodes
167 newheadrevs = [cl.rev(node) for node in newnodes
167 if cl.hasnode(node)]
168 if cl.hasnode(node)]
168 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
169 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
169 # Remove duplicates - nodes that are in newheadrevs and are already
170 # Remove duplicates - nodes that are in newheadrevs and are already
170 # in bheadrevs. This can happen if you strip a node whose parent
171 # in bheadrevs. This can happen if you strip a node whose parent
171 # was already a head (because they're on different branches).
172 # was already a head (because they're on different branches).
172 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
173 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
173
174
174 # Starting from tip means fewer passes over reachable. If we know
175 # Starting from tip means fewer passes over reachable. If we know
175 # the new candidates are not ancestors of existing heads, we don't
176 # the new candidates are not ancestors of existing heads, we don't
176 # have to examine ancestors of existing heads
177 # have to examine ancestors of existing heads
177 if ctxisnew:
178 if ctxisnew:
178 iterrevs = sorted(newheadrevs)
179 iterrevs = sorted(newheadrevs)
179 else:
180 else:
180 iterrevs = list(bheadrevs)
181 iterrevs = list(bheadrevs)
181
182
182 # This loop prunes out two kinds of heads - heads that are
183 # This loop prunes out two kinds of heads - heads that are
183 # superseded by a head in newheadrevs, and newheadrevs that are not
184 # superseded by a head in newheadrevs, and newheadrevs that are not
184 # heads because an existing head is their descendant.
185 # heads because an existing head is their descendant.
185 while iterrevs:
186 while iterrevs:
186 latest = iterrevs.pop()
187 latest = iterrevs.pop()
187 if latest not in bheadrevs:
188 if latest not in bheadrevs:
188 continue
189 continue
189 ancestors = set(cl.ancestors([latest],
190 ancestors = set(cl.ancestors([latest],
190 bheadrevs[0]))
191 bheadrevs[0]))
191 if ancestors:
192 if ancestors:
192 bheadrevs = [b for b in bheadrevs if b not in ancestors]
193 bheadrevs = [b for b in bheadrevs if b not in ancestors]
193 self[branch] = [cl.node(rev) for rev in bheadrevs]
194 self[branch] = [cl.node(rev) for rev in bheadrevs]
194 tiprev = max(bheadrevs)
195 tiprev = max(bheadrevs)
195 if tiprev > self.tiprev:
196 if tiprev > self.tiprev:
196 self.tipnode = cl.node(tiprev)
197 self.tipnode = cl.node(tiprev)
197 self.tiprev = tiprev
198 self.tiprev = tiprev
198
199
199 # There may be branches that cease to exist when the last commit in the
200 # There may be branches that cease to exist when the last commit in the
200 # branch was stripped. This code filters them out. Note that the
201 # branch was stripped. This code filters them out. Note that the
201 # branch that ceased to exist may not be in newbranches because
202 # branch that ceased to exist may not be in newbranches because
202 # newbranches is the set of candidate heads, which when you strip the
203 # newbranches is the set of candidate heads, which when you strip the
203 # last commit in a branch will be the parent branch.
204 # last commit in a branch will be the parent branch.
204 droppednodes = []
205 droppednodes = []
205 for branch in self.keys():
206 for branch in self.keys():
206 nodes = [head for head in self[branch]
207 nodes = [head for head in self[branch]
207 if cl.hasnode(head)]
208 if cl.hasnode(head)]
208 if not nodes:
209 if not nodes:
209 droppednodes.extend(nodes)
210 droppednodes.extend(nodes)
210 del self[branch]
211 del self[branch]
211 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
212 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
212
213
213 # cache key are not valid anymore
214 # cache key are not valid anymore
214 self.tipnode = nullid
215 self.tipnode = nullid
215 self.tiprev = nullrev
216 self.tiprev = nullrev
216 for heads in self.values():
217 for heads in self.values():
217 tiprev = max(cl.rev(node) for node in heads)
218 tiprev = max(cl.rev(node) for node in heads)
218 if tiprev > self.tiprev:
219 if tiprev > self.tiprev:
219 self.tipnode = cl.node(tiprev)
220 self.tipnode = cl.node(tiprev)
220 self.tiprev = tiprev
221 self.tiprev = tiprev
221 self.filteredhash = self._hashfiltered(repo)
222 self.filteredhash = self._hashfiltered(repo)
General Comments 0
You need to be logged in to leave comments. Login now