##// END OF EJS Templates
branchmap: add a copy method...
Pierre-Yves David -
r18232:dd0b636b default
parent child Browse files
Show More
@@ -1,212 +1,215 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11
11
12 def _filename(repo):
12 def _filename(repo):
13 """name of a branchcache file for a given repo or repoview"""
13 """name of a branchcache file for a given repo or repoview"""
14 filename = "cache/branchheads"
14 filename = "cache/branchheads"
15 if repo.filtername:
15 if repo.filtername:
16 filename = '%s-%s' % (filename, repo.filtername)
16 filename = '%s-%s' % (filename, repo.filtername)
17 return filename
17 return filename
18
18
19 def read(repo):
19 def read(repo):
20 try:
20 try:
21 f = repo.opener(_filename(repo))
21 f = repo.opener(_filename(repo))
22 lines = f.read().split('\n')
22 lines = f.read().split('\n')
23 f.close()
23 f.close()
24 except (IOError, OSError):
24 except (IOError, OSError):
25 return None
25 return None
26
26
27 try:
27 try:
28 cachekey = lines.pop(0).split(" ", 2)
28 cachekey = lines.pop(0).split(" ", 2)
29 last, lrev = cachekey[:2]
29 last, lrev = cachekey[:2]
30 last, lrev = bin(last), int(lrev)
30 last, lrev = bin(last), int(lrev)
31 filteredhash = None
31 filteredhash = None
32 if len(cachekey) > 2:
32 if len(cachekey) > 2:
33 filteredhash = bin(cachekey[2])
33 filteredhash = bin(cachekey[2])
34 partial = branchcache(tipnode=last, tiprev=lrev,
34 partial = branchcache(tipnode=last, tiprev=lrev,
35 filteredhash=filteredhash)
35 filteredhash=filteredhash)
36 if not partial.validfor(repo):
36 if not partial.validfor(repo):
37 # invalidate the cache
37 # invalidate the cache
38 raise ValueError('tip differs')
38 raise ValueError('tip differs')
39 for l in lines:
39 for l in lines:
40 if not l:
40 if not l:
41 continue
41 continue
42 node, label = l.split(" ", 1)
42 node, label = l.split(" ", 1)
43 label = encoding.tolocal(label.strip())
43 label = encoding.tolocal(label.strip())
44 if not node in repo:
44 if not node in repo:
45 raise ValueError('node %s does not exist' % node)
45 raise ValueError('node %s does not exist' % node)
46 partial.setdefault(label, []).append(bin(node))
46 partial.setdefault(label, []).append(bin(node))
47 except KeyboardInterrupt:
47 except KeyboardInterrupt:
48 raise
48 raise
49 except Exception, inst:
49 except Exception, inst:
50 if repo.ui.debugflag:
50 if repo.ui.debugflag:
51 msg = 'invalid branchheads cache'
51 msg = 'invalid branchheads cache'
52 if repo.filtername is not None:
52 if repo.filtername is not None:
53 msg += ' (%s)' % repo.filtername
53 msg += ' (%s)' % repo.filtername
54 msg += ': %s\n'
54 msg += ': %s\n'
55 repo.ui.warn(msg % inst)
55 repo.ui.warn(msg % inst)
56 partial = None
56 partial = None
57 return partial
57 return partial
58
58
59
59
60
60
61 def updatecache(repo):
61 def updatecache(repo):
62 cl = repo.changelog
62 cl = repo.changelog
63 filtername = repo.filtername
63 filtername = repo.filtername
64 partial = repo._branchcaches.get(filtername)
64 partial = repo._branchcaches.get(filtername)
65
65
66 if partial is None or not partial.validfor(repo):
66 if partial is None or not partial.validfor(repo):
67 partial = read(repo)
67 partial = read(repo)
68 if partial is None:
68 if partial is None:
69 partial = branchcache()
69 partial = branchcache()
70
70
71 revs = list(cl.revs(start=partial.tiprev +1))
71 revs = list(cl.revs(start=partial.tiprev +1))
72 if revs:
72 if revs:
73 ctxgen = (repo[r] for r in revs)
73 ctxgen = (repo[r] for r in revs)
74 partial.update(repo, ctxgen)
74 partial.update(repo, ctxgen)
75 partial.write(repo)
75 partial.write(repo)
76 repo._branchcaches[repo.filtername] = partial
76 repo._branchcaches[repo.filtername] = partial
77
77
78 class branchcache(dict):
78 class branchcache(dict):
79 """A dict like object that hold branches heads cache"""
79 """A dict like object that hold branches heads cache"""
80
80
81 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
81 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
82 filteredhash=None):
82 filteredhash=None):
83 super(branchcache, self).__init__(entries)
83 super(branchcache, self).__init__(entries)
84 self.tipnode = tipnode
84 self.tipnode = tipnode
85 self.tiprev = tiprev
85 self.tiprev = tiprev
86 self.filteredhash = filteredhash
86 self.filteredhash = filteredhash
87
87
88 def _hashfiltered(self, repo):
88 def _hashfiltered(self, repo):
89 """build hash of revision filtered in the current cache
89 """build hash of revision filtered in the current cache
90
90
91 Tracking tipnode and tiprev is not enough to ensure validaty of the
91 Tracking tipnode and tiprev is not enough to ensure validaty of the
92 cache as they do not help to distinct cache that ignored various
92 cache as they do not help to distinct cache that ignored various
93 revision bellow tiprev.
93 revision bellow tiprev.
94
94
95 To detect such difference, we build a cache of all ignored revisions.
95 To detect such difference, we build a cache of all ignored revisions.
96 """
96 """
97 cl = repo.changelog
97 cl = repo.changelog
98 if not cl.filteredrevs:
98 if not cl.filteredrevs:
99 return None
99 return None
100 key = None
100 key = None
101 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
101 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
102 if revs:
102 if revs:
103 s = util.sha1()
103 s = util.sha1()
104 for rev in revs:
104 for rev in revs:
105 s.update('%s;' % rev)
105 s.update('%s;' % rev)
106 key = s.digest()
106 key = s.digest()
107 return key
107 return key
108
108
109 def validfor(self, repo):
109 def validfor(self, repo):
110 """Is the cache content valide regarding a repo
110 """Is the cache content valide regarding a repo
111
111
112 - False when cached tipnode are unknown or if we detect a strip.
112 - False when cached tipnode are unknown or if we detect a strip.
113 - True when cache is up to date or a subset of current repo."""
113 - True when cache is up to date or a subset of current repo."""
114 try:
114 try:
115 return ((self.tipnode == repo.changelog.node(self.tiprev))
115 return ((self.tipnode == repo.changelog.node(self.tiprev))
116 and (self.filteredhash == self._hashfiltered(repo)))
116 and (self.filteredhash == self._hashfiltered(repo)))
117 except IndexError:
117 except IndexError:
118 return False
118 return False
119
119
120 def copy(self):
121 """return an deep copy of the branchcache object"""
122 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash)
120
123
121 def write(self, repo):
124 def write(self, repo):
122 try:
125 try:
123 f = repo.opener(_filename(repo), "w", atomictemp=True)
126 f = repo.opener(_filename(repo), "w", atomictemp=True)
124 cachekey = [hex(self.tipnode), str(self.tiprev)]
127 cachekey = [hex(self.tipnode), str(self.tiprev)]
125 if self.filteredhash is not None:
128 if self.filteredhash is not None:
126 cachekey.append(hex(self.filteredhash))
129 cachekey.append(hex(self.filteredhash))
127 f.write(" ".join(cachekey) + '\n')
130 f.write(" ".join(cachekey) + '\n')
128 for label, nodes in self.iteritems():
131 for label, nodes in self.iteritems():
129 for node in nodes:
132 for node in nodes:
130 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
133 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
131 f.close()
134 f.close()
132 except (IOError, OSError, util.Abort):
135 except (IOError, OSError, util.Abort):
133 # Abort may be raise by read only opener
136 # Abort may be raise by read only opener
134 pass
137 pass
135
138
136 def update(self, repo, ctxgen):
139 def update(self, repo, ctxgen):
137 """Given a branchhead cache, self, that may have extra nodes or be
140 """Given a branchhead cache, self, that may have extra nodes or be
138 missing heads, and a generator of nodes that are at least a superset of
141 missing heads, and a generator of nodes that are at least a superset of
139 heads missing, this function updates self to be correct.
142 heads missing, this function updates self to be correct.
140 """
143 """
141 cl = repo.changelog
144 cl = repo.changelog
142 # collect new branch entries
145 # collect new branch entries
143 newbranches = {}
146 newbranches = {}
144 for c in ctxgen:
147 for c in ctxgen:
145 newbranches.setdefault(c.branch(), []).append(c.node())
148 newbranches.setdefault(c.branch(), []).append(c.node())
146 # if older branchheads are reachable from new ones, they aren't
149 # if older branchheads are reachable from new ones, they aren't
147 # really branchheads. Note checking parents is insufficient:
150 # really branchheads. Note checking parents is insufficient:
148 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
151 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
149 for branch, newnodes in newbranches.iteritems():
152 for branch, newnodes in newbranches.iteritems():
150 bheads = self.setdefault(branch, [])
153 bheads = self.setdefault(branch, [])
151 # Remove candidate heads that no longer are in the repo (e.g., as
154 # Remove candidate heads that no longer are in the repo (e.g., as
152 # the result of a strip that just happened). Avoid using 'node in
155 # the result of a strip that just happened). Avoid using 'node in
153 # self' here because that dives down into branchcache code somewhat
156 # self' here because that dives down into branchcache code somewhat
154 # recursively.
157 # recursively.
155 bheadrevs = [cl.rev(node) for node in bheads
158 bheadrevs = [cl.rev(node) for node in bheads
156 if cl.hasnode(node)]
159 if cl.hasnode(node)]
157 newheadrevs = [cl.rev(node) for node in newnodes
160 newheadrevs = [cl.rev(node) for node in newnodes
158 if cl.hasnode(node)]
161 if cl.hasnode(node)]
159 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
162 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
160 # Remove duplicates - nodes that are in newheadrevs and are already
163 # Remove duplicates - nodes that are in newheadrevs and are already
161 # in bheadrevs. This can happen if you strip a node whose parent
164 # in bheadrevs. This can happen if you strip a node whose parent
162 # was already a head (because they're on different branches).
165 # was already a head (because they're on different branches).
163 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
166 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
164
167
165 # Starting from tip means fewer passes over reachable. If we know
168 # Starting from tip means fewer passes over reachable. If we know
166 # the new candidates are not ancestors of existing heads, we don't
169 # the new candidates are not ancestors of existing heads, we don't
167 # have to examine ancestors of existing heads
170 # have to examine ancestors of existing heads
168 if ctxisnew:
171 if ctxisnew:
169 iterrevs = sorted(newheadrevs)
172 iterrevs = sorted(newheadrevs)
170 else:
173 else:
171 iterrevs = list(bheadrevs)
174 iterrevs = list(bheadrevs)
172
175
173 # This loop prunes out two kinds of heads - heads that are
176 # This loop prunes out two kinds of heads - heads that are
174 # superseded by a head in newheadrevs, and newheadrevs that are not
177 # superseded by a head in newheadrevs, and newheadrevs that are not
175 # heads because an existing head is their descendant.
178 # heads because an existing head is their descendant.
176 while iterrevs:
179 while iterrevs:
177 latest = iterrevs.pop()
180 latest = iterrevs.pop()
178 if latest not in bheadrevs:
181 if latest not in bheadrevs:
179 continue
182 continue
180 ancestors = set(cl.ancestors([latest],
183 ancestors = set(cl.ancestors([latest],
181 bheadrevs[0]))
184 bheadrevs[0]))
182 if ancestors:
185 if ancestors:
183 bheadrevs = [b for b in bheadrevs if b not in ancestors]
186 bheadrevs = [b for b in bheadrevs if b not in ancestors]
184 self[branch] = [cl.node(rev) for rev in bheadrevs]
187 self[branch] = [cl.node(rev) for rev in bheadrevs]
185 tiprev = max(bheadrevs)
188 tiprev = max(bheadrevs)
186 if tiprev > self.tiprev:
189 if tiprev > self.tiprev:
187 self.tipnode = cl.node(tiprev)
190 self.tipnode = cl.node(tiprev)
188 self.tiprev = tiprev
191 self.tiprev = tiprev
189
192
190 # There may be branches that cease to exist when the last commit in the
193 # There may be branches that cease to exist when the last commit in the
191 # branch was stripped. This code filters them out. Note that the
194 # branch was stripped. This code filters them out. Note that the
192 # branch that ceased to exist may not be in newbranches because
195 # branch that ceased to exist may not be in newbranches because
193 # newbranches is the set of candidate heads, which when you strip the
196 # newbranches is the set of candidate heads, which when you strip the
194 # last commit in a branch will be the parent branch.
197 # last commit in a branch will be the parent branch.
195 droppednodes = []
198 droppednodes = []
196 for branch in self.keys():
199 for branch in self.keys():
197 nodes = [head for head in self[branch]
200 nodes = [head for head in self[branch]
198 if cl.hasnode(head)]
201 if cl.hasnode(head)]
199 if not nodes:
202 if not nodes:
200 droppednodes.extend(nodes)
203 droppednodes.extend(nodes)
201 del self[branch]
204 del self[branch]
202 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
205 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
203
206
204 # cache key are not valid anymore
207 # cache key are not valid anymore
205 self.tipnode = nullid
208 self.tipnode = nullid
206 self.tiprev = nullrev
209 self.tiprev = nullrev
207 for heads in self.values():
210 for heads in self.values():
208 tiprev = max(cl.rev(node) for node in heads)
211 tiprev = max(cl.rev(node) for node in heads)
209 if tiprev > self.tiprev:
212 if tiprev > self.tiprev:
210 self.tipnode = cl.node(tiprev)
213 self.tipnode = cl.node(tiprev)
211 self.tiprev = tiprev
214 self.tiprev = tiprev
212 self.filteredhash = self._hashfiltered(repo)
215 self.filteredhash = self._hashfiltered(repo)
General Comments 0
You need to be logged in to leave comments. Login now