##// END OF EJS Templates
branchmap: use a different file name for filtered view of repo
Pierre-Yves David -
r18187:4df8716d default
parent child Browse files
Show More
@@ -1,212 +1,215 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11
11
12 def _filename(repo):
12 def _filename(repo):
13 """name of a branchcache file for a given repo"""
13 """name of a branchcache file for a given repo or repoview"""
14 return "cache/branchheads"
14 filename = "cache/branchheads"
15 if repo.filtername:
16 filename = '%s-%s' % (filename, repo.filtername)
17 return filename
15
18
16 def read(repo):
19 def read(repo):
17 try:
20 try:
18 f = repo.opener(_filename(repo))
21 f = repo.opener(_filename(repo))
19 lines = f.read().split('\n')
22 lines = f.read().split('\n')
20 f.close()
23 f.close()
21 except (IOError, OSError):
24 except (IOError, OSError):
22 return branchcache()
25 return branchcache()
23
26
24 try:
27 try:
25 cachekey = lines.pop(0).split(" ", 2)
28 cachekey = lines.pop(0).split(" ", 2)
26 last, lrev = cachekey[:2]
29 last, lrev = cachekey[:2]
27 last, lrev = bin(last), int(lrev)
30 last, lrev = bin(last), int(lrev)
28 filteredhash = None
31 filteredhash = None
29 if len(cachekey) > 2:
32 if len(cachekey) > 2:
30 filteredhash = bin(cachekey[2])
33 filteredhash = bin(cachekey[2])
31 partial = branchcache(tipnode=last, tiprev=lrev,
34 partial = branchcache(tipnode=last, tiprev=lrev,
32 filteredhash=filteredhash)
35 filteredhash=filteredhash)
33 if not partial.validfor(repo):
36 if not partial.validfor(repo):
34 # invalidate the cache
37 # invalidate the cache
35 raise ValueError('tip differs')
38 raise ValueError('tip differs')
36 for l in lines:
39 for l in lines:
37 if not l:
40 if not l:
38 continue
41 continue
39 node, label = l.split(" ", 1)
42 node, label = l.split(" ", 1)
40 label = encoding.tolocal(label.strip())
43 label = encoding.tolocal(label.strip())
41 if not node in repo:
44 if not node in repo:
42 raise ValueError('node %s does not exist' % node)
45 raise ValueError('node %s does not exist' % node)
43 partial.setdefault(label, []).append(bin(node))
46 partial.setdefault(label, []).append(bin(node))
44 except KeyboardInterrupt:
47 except KeyboardInterrupt:
45 raise
48 raise
46 except Exception, inst:
49 except Exception, inst:
47 if repo.ui.debugflag:
50 if repo.ui.debugflag:
48 repo.ui.warn(('invalid branchheads cache: %s\n') % inst)
51 repo.ui.warn(('invalid branchheads cache: %s\n') % inst)
49 partial = branchcache()
52 partial = branchcache()
50 return partial
53 return partial
51
54
52
55
53
56
54 def updatecache(repo):
57 def updatecache(repo):
55 repo = repo.unfiltered() # Until we get a smarter cache management
58 repo = repo.unfiltered() # Until we get a smarter cache management
56 cl = repo.changelog
59 cl = repo.changelog
57 partial = repo._branchcache
60 partial = repo._branchcache
58
61
59 if partial is None or not partial.validfor(repo):
62 if partial is None or not partial.validfor(repo):
60 partial = read(repo)
63 partial = read(repo)
61
64
62 catip = repo._cacheabletip()
65 catip = repo._cacheabletip()
63 # if partial.tiprev == catip: cache is already up to date
66 # if partial.tiprev == catip: cache is already up to date
64 # if partial.tiprev > catip: we have uncachable element in `partial` can't
67 # if partial.tiprev > catip: we have uncachable element in `partial` can't
65 # write on disk
68 # write on disk
66 if partial.tiprev < catip:
69 if partial.tiprev < catip:
67 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip))
70 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip))
68 partial.update(repo, ctxgen)
71 partial.update(repo, ctxgen)
69 partial.write(repo)
72 partial.write(repo)
70 # If cacheable tip were lower than actual tip, we need to update the
73 # If cacheable tip were lower than actual tip, we need to update the
71 # cache up to tip. This update (from cacheable to actual tip) is not
74 # cache up to tip. This update (from cacheable to actual tip) is not
72 # written to disk since it's not cacheable.
75 # written to disk since it's not cacheable.
73 tiprev = cl.rev(cl.tip())
76 tiprev = cl.rev(cl.tip())
74 if partial.tiprev < tiprev:
77 if partial.tiprev < tiprev:
75 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev))
78 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev))
76 partial.update(repo, ctxgen)
79 partial.update(repo, ctxgen)
77 repo._branchcache = partial
80 repo._branchcache = partial
78
81
79 class branchcache(dict):
82 class branchcache(dict):
80 """A dict like object that hold branches heads cache"""
83 """A dict like object that hold branches heads cache"""
81
84
82 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
85 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
83 filteredhash=None):
86 filteredhash=None):
84 super(branchcache, self).__init__(entries)
87 super(branchcache, self).__init__(entries)
85 self.tipnode = tipnode
88 self.tipnode = tipnode
86 self.tiprev = tiprev
89 self.tiprev = tiprev
87 self.filteredhash = filteredhash
90 self.filteredhash = filteredhash
88
91
89 def _hashfiltered(self, repo):
92 def _hashfiltered(self, repo):
90 """build hash of revision filtered in the current cache
93 """build hash of revision filtered in the current cache
91
94
92 Tracking tipnode and tiprev is not enough to ensure validaty of the
95 Tracking tipnode and tiprev is not enough to ensure validaty of the
93 cache as they do not help to distinct cache that ignored various
96 cache as they do not help to distinct cache that ignored various
94 revision bellow tiprev.
97 revision bellow tiprev.
95
98
96 To detect such difference, we build a cache of all ignored revisions.
99 To detect such difference, we build a cache of all ignored revisions.
97 """
100 """
98 cl = repo.changelog
101 cl = repo.changelog
99 if not cl.filteredrevs:
102 if not cl.filteredrevs:
100 return None
103 return None
101 key = None
104 key = None
102 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
105 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
103 if revs:
106 if revs:
104 s = util.sha1()
107 s = util.sha1()
105 for rev in revs:
108 for rev in revs:
106 s.update('%s;' % rev)
109 s.update('%s;' % rev)
107 key = s.digest()
110 key = s.digest()
108 return key
111 return key
109
112
110 def validfor(self, repo):
113 def validfor(self, repo):
111 """Is the cache content valide regarding a repo
114 """Is the cache content valide regarding a repo
112
115
113 - False when cached tipnode are unknown or if we detect a strip.
116 - False when cached tipnode are unknown or if we detect a strip.
114 - True when cache is up to date or a subset of current repo."""
117 - True when cache is up to date or a subset of current repo."""
115 try:
118 try:
116 return ((self.tipnode == repo.changelog.node(self.tiprev))
119 return ((self.tipnode == repo.changelog.node(self.tiprev))
117 and (self.filteredhash == self._hashfiltered(repo)))
120 and (self.filteredhash == self._hashfiltered(repo)))
118 except IndexError:
121 except IndexError:
119 return False
122 return False
120
123
121
124
122 def write(self, repo):
125 def write(self, repo):
123 try:
126 try:
124 f = repo.opener(_filename(repo), "w", atomictemp=True)
127 f = repo.opener(_filename(repo), "w", atomictemp=True)
125 cachekey = [hex(self.tipnode), str(self.tiprev)]
128 cachekey = [hex(self.tipnode), str(self.tiprev)]
126 if self.filteredhash is not None:
129 if self.filteredhash is not None:
127 cachekey.append(hex(self.filteredhash))
130 cachekey.append(hex(self.filteredhash))
128 f.write(" ".join(cachekey) + '\n')
131 f.write(" ".join(cachekey) + '\n')
129 for label, nodes in self.iteritems():
132 for label, nodes in self.iteritems():
130 for node in nodes:
133 for node in nodes:
131 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
134 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
132 f.close()
135 f.close()
133 except (IOError, OSError):
136 except (IOError, OSError):
134 pass
137 pass
135
138
136 def update(self, repo, ctxgen):
139 def update(self, repo, ctxgen):
137 """Given a branchhead cache, self, that may have extra nodes or be
140 """Given a branchhead cache, self, that may have extra nodes or be
138 missing heads, and a generator of nodes that are at least a superset of
141 missing heads, and a generator of nodes that are at least a superset of
139 heads missing, this function updates self to be correct.
142 heads missing, this function updates self to be correct.
140 """
143 """
141 cl = repo.changelog
144 cl = repo.changelog
142 # collect new branch entries
145 # collect new branch entries
143 newbranches = {}
146 newbranches = {}
144 for c in ctxgen:
147 for c in ctxgen:
145 newbranches.setdefault(c.branch(), []).append(c.node())
148 newbranches.setdefault(c.branch(), []).append(c.node())
146 # if older branchheads are reachable from new ones, they aren't
149 # if older branchheads are reachable from new ones, they aren't
147 # really branchheads. Note checking parents is insufficient:
150 # really branchheads. Note checking parents is insufficient:
148 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
151 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
149 for branch, newnodes in newbranches.iteritems():
152 for branch, newnodes in newbranches.iteritems():
150 bheads = self.setdefault(branch, [])
153 bheads = self.setdefault(branch, [])
151 # Remove candidate heads that no longer are in the repo (e.g., as
154 # Remove candidate heads that no longer are in the repo (e.g., as
152 # the result of a strip that just happened). Avoid using 'node in
155 # the result of a strip that just happened). Avoid using 'node in
153 # self' here because that dives down into branchcache code somewhat
156 # self' here because that dives down into branchcache code somewhat
154 # recursively.
157 # recursively.
155 bheadrevs = [cl.rev(node) for node in bheads
158 bheadrevs = [cl.rev(node) for node in bheads
156 if cl.hasnode(node)]
159 if cl.hasnode(node)]
157 newheadrevs = [cl.rev(node) for node in newnodes
160 newheadrevs = [cl.rev(node) for node in newnodes
158 if cl.hasnode(node)]
161 if cl.hasnode(node)]
159 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
162 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
160 # Remove duplicates - nodes that are in newheadrevs and are already
163 # Remove duplicates - nodes that are in newheadrevs and are already
161 # in bheadrevs. This can happen if you strip a node whose parent
164 # in bheadrevs. This can happen if you strip a node whose parent
162 # was already a head (because they're on different branches).
165 # was already a head (because they're on different branches).
163 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
166 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
164
167
165 # Starting from tip means fewer passes over reachable. If we know
168 # Starting from tip means fewer passes over reachable. If we know
166 # the new candidates are not ancestors of existing heads, we don't
169 # the new candidates are not ancestors of existing heads, we don't
167 # have to examine ancestors of existing heads
170 # have to examine ancestors of existing heads
168 if ctxisnew:
171 if ctxisnew:
169 iterrevs = sorted(newheadrevs)
172 iterrevs = sorted(newheadrevs)
170 else:
173 else:
171 iterrevs = list(bheadrevs)
174 iterrevs = list(bheadrevs)
172
175
173 # This loop prunes out two kinds of heads - heads that are
176 # This loop prunes out two kinds of heads - heads that are
174 # superseded by a head in newheadrevs, and newheadrevs that are not
177 # superseded by a head in newheadrevs, and newheadrevs that are not
175 # heads because an existing head is their descendant.
178 # heads because an existing head is their descendant.
176 while iterrevs:
179 while iterrevs:
177 latest = iterrevs.pop()
180 latest = iterrevs.pop()
178 if latest not in bheadrevs:
181 if latest not in bheadrevs:
179 continue
182 continue
180 ancestors = set(cl.ancestors([latest],
183 ancestors = set(cl.ancestors([latest],
181 bheadrevs[0]))
184 bheadrevs[0]))
182 if ancestors:
185 if ancestors:
183 bheadrevs = [b for b in bheadrevs if b not in ancestors]
186 bheadrevs = [b for b in bheadrevs if b not in ancestors]
184 self[branch] = [cl.node(rev) for rev in bheadrevs]
187 self[branch] = [cl.node(rev) for rev in bheadrevs]
185 tiprev = max(bheadrevs)
188 tiprev = max(bheadrevs)
186 if tiprev > self.tiprev:
189 if tiprev > self.tiprev:
187 self.tipnode = cl.node(tiprev)
190 self.tipnode = cl.node(tiprev)
188 self.tiprev = tiprev
191 self.tiprev = tiprev
189
192
190 # There may be branches that cease to exist when the last commit in the
193 # There may be branches that cease to exist when the last commit in the
191 # branch was stripped. This code filters them out. Note that the
194 # branch was stripped. This code filters them out. Note that the
192 # branch that ceased to exist may not be in newbranches because
195 # branch that ceased to exist may not be in newbranches because
193 # newbranches is the set of candidate heads, which when you strip the
196 # newbranches is the set of candidate heads, which when you strip the
194 # last commit in a branch will be the parent branch.
197 # last commit in a branch will be the parent branch.
195 droppednodes = []
198 droppednodes = []
196 for branch in self.keys():
199 for branch in self.keys():
197 nodes = [head for head in self[branch]
200 nodes = [head for head in self[branch]
198 if cl.hasnode(head)]
201 if cl.hasnode(head)]
199 if not nodes:
202 if not nodes:
200 droppednodes.extend(nodes)
203 droppednodes.extend(nodes)
201 del self[branch]
204 del self[branch]
202 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
205 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
203
206
204 # cache key are not valid anymore
207 # cache key are not valid anymore
205 self.tipnode = nullid
208 self.tipnode = nullid
206 self.tiprev = nullrev
209 self.tiprev = nullrev
207 for heads in self.values():
210 for heads in self.values():
208 tiprev = max(cl.rev(node) for node in heads)
211 tiprev = max(cl.rev(node) for node in heads)
209 if tiprev > self.tiprev:
212 if tiprev > self.tiprev:
210 self.tipnode = cl.node(tiprev)
213 self.tipnode = cl.node(tiprev)
211 self.tiprev = tiprev
214 self.tiprev = tiprev
212 self.filteredhash = self._hashfiltered(repo)
215 self.filteredhash = self._hashfiltered(repo)
General Comments 0
You need to be logged in to leave comments. Login now