Show More
@@ -1,1274 +1,1277 | |||||
1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo |
|
1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 |
|
8 | |||
9 | import struct |
|
9 | import struct | |
10 |
|
10 | |||
11 | from .node import ( |
|
11 | from .node import ( | |
12 | bin, |
|
12 | bin, | |
13 | hex, |
|
13 | hex, | |
14 | nullrev, |
|
14 | nullrev, | |
15 | ) |
|
15 | ) | |
16 |
|
16 | |||
17 | from typing import ( |
|
17 | from typing import ( | |
18 | Any, |
|
18 | Any, | |
19 | Callable, |
|
19 | Callable, | |
20 | Dict, |
|
20 | Dict, | |
21 | Iterable, |
|
21 | Iterable, | |
22 | List, |
|
22 | List, | |
23 | Optional, |
|
23 | Optional, | |
24 | Set, |
|
24 | Set, | |
25 | TYPE_CHECKING, |
|
25 | TYPE_CHECKING, | |
26 | Tuple, |
|
26 | Tuple, | |
27 | Union, |
|
27 | Union, | |
28 | cast, |
|
28 | cast, | |
29 | ) |
|
29 | ) | |
30 |
|
30 | |||
31 | from . import ( |
|
31 | from . import ( | |
32 | encoding, |
|
32 | encoding, | |
33 | error, |
|
33 | error, | |
34 | obsolete, |
|
34 | obsolete, | |
35 | scmutil, |
|
35 | scmutil, | |
36 | util, |
|
36 | util, | |
37 | ) |
|
37 | ) | |
38 |
|
38 | |||
39 | from .utils import ( |
|
39 | from .utils import ( | |
40 | repoviewutil, |
|
40 | repoviewutil, | |
41 | stringutil, |
|
41 | stringutil, | |
42 | ) |
|
42 | ) | |
43 |
|
43 | |||
44 | if TYPE_CHECKING: |
|
44 | if TYPE_CHECKING: | |
45 | from . import localrepo |
|
45 | from . import localrepo | |
46 |
|
46 | |||
47 | assert [localrepo] |
|
47 | assert [localrepo] | |
48 |
|
48 | |||
49 | subsettable = repoviewutil.subsettable |
|
49 | subsettable = repoviewutil.subsettable | |
50 |
|
50 | |||
51 | calcsize = struct.calcsize |
|
51 | calcsize = struct.calcsize | |
52 | pack_into = struct.pack_into |
|
52 | pack_into = struct.pack_into | |
53 | unpack_from = struct.unpack_from |
|
53 | unpack_from = struct.unpack_from | |
54 |
|
54 | |||
55 |
|
55 | |||
56 | class BranchMapCache: |
|
56 | class BranchMapCache: | |
57 | """mapping of filtered views of repo with their branchcache""" |
|
57 | """mapping of filtered views of repo with their branchcache""" | |
58 |
|
58 | |||
59 | def __init__(self): |
|
59 | def __init__(self): | |
60 | self._per_filter = {} |
|
60 | self._per_filter = {} | |
61 |
|
61 | |||
62 | def __getitem__(self, repo): |
|
62 | def __getitem__(self, repo): | |
63 | self.updatecache(repo) |
|
63 | self.updatecache(repo) | |
64 | bcache = self._per_filter[repo.filtername] |
|
64 | bcache = self._per_filter[repo.filtername] | |
65 | assert bcache._filtername == repo.filtername, ( |
|
65 | assert bcache._filtername == repo.filtername, ( | |
66 | bcache._filtername, |
|
66 | bcache._filtername, | |
67 | repo.filtername, |
|
67 | repo.filtername, | |
68 | ) |
|
68 | ) | |
69 | return bcache |
|
69 | return bcache | |
70 |
|
70 | |||
71 | def update_disk(self, repo): |
|
71 | def update_disk(self, repo): | |
72 | """ensure and up-to-date cache is (or will be) written on disk |
|
72 | """ensure and up-to-date cache is (or will be) written on disk | |
73 |
|
73 | |||
74 | The cache for this repository view is updated if needed and written on |
|
74 | The cache for this repository view is updated if needed and written on | |
75 | disk. |
|
75 | disk. | |
76 |
|
76 | |||
77 | If a transaction is in progress, the writing is schedule to transaction |
|
77 | If a transaction is in progress, the writing is schedule to transaction | |
78 | close. See the `BranchMapCache.write_dirty` method. |
|
78 | close. See the `BranchMapCache.write_dirty` method. | |
79 |
|
79 | |||
80 | This method exist independently of __getitem__ as it is sometime useful |
|
80 | This method exist independently of __getitem__ as it is sometime useful | |
81 | to signal that we have no intend to use the data in memory yet. |
|
81 | to signal that we have no intend to use the data in memory yet. | |
82 | """ |
|
82 | """ | |
83 | self.updatecache(repo) |
|
83 | self.updatecache(repo) | |
84 | bcache = self._per_filter[repo.filtername] |
|
84 | bcache = self._per_filter[repo.filtername] | |
85 | assert bcache._filtername == repo.filtername, ( |
|
85 | assert bcache._filtername == repo.filtername, ( | |
86 | bcache._filtername, |
|
86 | bcache._filtername, | |
87 | repo.filtername, |
|
87 | repo.filtername, | |
88 | ) |
|
88 | ) | |
89 | tr = repo.currenttransaction() |
|
89 | tr = repo.currenttransaction() | |
90 | if getattr(tr, 'finalized', True): |
|
90 | if getattr(tr, 'finalized', True): | |
91 | bcache.sync_disk(repo) |
|
91 | bcache.sync_disk(repo) | |
92 |
|
92 | |||
93 | def updatecache(self, repo): |
|
93 | def updatecache(self, repo): | |
94 | """Update the cache for the given filtered view on a repository""" |
|
94 | """Update the cache for the given filtered view on a repository""" | |
95 | # This can trigger updates for the caches for subsets of the filtered |
|
95 | # This can trigger updates for the caches for subsets of the filtered | |
96 | # view, e.g. when there is no cache for this filtered view or the cache |
|
96 | # view, e.g. when there is no cache for this filtered view or the cache | |
97 | # is stale. |
|
97 | # is stale. | |
98 |
|
98 | |||
99 | cl = repo.changelog |
|
99 | cl = repo.changelog | |
100 | filtername = repo.filtername |
|
100 | filtername = repo.filtername | |
101 | bcache = self._per_filter.get(filtername) |
|
101 | bcache = self._per_filter.get(filtername) | |
102 | if bcache is None or not bcache.validfor(repo): |
|
102 | if bcache is None or not bcache.validfor(repo): | |
103 | # cache object missing or cache object stale? Read from disk |
|
103 | # cache object missing or cache object stale? Read from disk | |
104 | bcache = branch_cache_from_file(repo) |
|
104 | bcache = branch_cache_from_file(repo) | |
105 |
|
105 | |||
106 | revs = [] |
|
106 | revs = [] | |
107 | if bcache is None: |
|
107 | if bcache is None: | |
108 | # no (fresh) cache available anymore, perhaps we can re-use |
|
108 | # no (fresh) cache available anymore, perhaps we can re-use | |
109 | # the cache for a subset, then extend that to add info on missing |
|
109 | # the cache for a subset, then extend that to add info on missing | |
110 | # revisions. |
|
110 | # revisions. | |
111 | subsetname = subsettable.get(filtername) |
|
111 | subsetname = subsettable.get(filtername) | |
112 | if subsetname is not None: |
|
112 | if subsetname is not None: | |
113 | subset = repo.filtered(subsetname) |
|
113 | subset = repo.filtered(subsetname) | |
114 | self.updatecache(subset) |
|
114 | self.updatecache(subset) | |
115 | bcache = self._per_filter[subset.filtername].inherit_for(repo) |
|
115 | bcache = self._per_filter[subset.filtername].inherit_for(repo) | |
116 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs |
|
116 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | |
117 | revs.extend(r for r in extrarevs if r <= bcache.tiprev) |
|
117 | revs.extend(r for r in extrarevs if r <= bcache.tiprev) | |
118 | else: |
|
118 | else: | |
119 | # nothing to fall back on, start empty. |
|
119 | # nothing to fall back on, start empty. | |
120 | bcache = new_branch_cache(repo) |
|
120 | bcache = new_branch_cache(repo) | |
121 |
|
121 | |||
122 | revs.extend(cl.revs(start=bcache.tiprev + 1)) |
|
122 | revs.extend(cl.revs(start=bcache.tiprev + 1)) | |
123 | if revs: |
|
123 | if revs: | |
124 | bcache.update(repo, revs) |
|
124 | bcache.update(repo, revs) | |
125 |
|
125 | |||
126 | assert bcache.validfor(repo), filtername |
|
126 | assert bcache.validfor(repo), filtername | |
127 | self._per_filter[repo.filtername] = bcache |
|
127 | self._per_filter[repo.filtername] = bcache | |
128 |
|
128 | |||
129 | def replace(self, repo, remotebranchmap): |
|
129 | def replace(self, repo, remotebranchmap): | |
130 | """Replace the branchmap cache for a repo with a branch mapping. |
|
130 | """Replace the branchmap cache for a repo with a branch mapping. | |
131 |
|
131 | |||
132 | This is likely only called during clone with a branch map from a |
|
132 | This is likely only called during clone with a branch map from a | |
133 | remote. |
|
133 | remote. | |
134 |
|
134 | |||
135 | """ |
|
135 | """ | |
136 | cl = repo.changelog |
|
136 | cl = repo.changelog | |
137 | clrev = cl.rev |
|
137 | clrev = cl.rev | |
138 | clbranchinfo = cl.branchinfo |
|
138 | clbranchinfo = cl.branchinfo | |
139 | rbheads = [] |
|
139 | rbheads = [] | |
140 | closed = set() |
|
140 | closed = set() | |
141 | for bheads in remotebranchmap.values(): |
|
141 | for bheads in remotebranchmap.values(): | |
142 | rbheads += bheads |
|
142 | rbheads += bheads | |
143 | for h in bheads: |
|
143 | for h in bheads: | |
144 | r = clrev(h) |
|
144 | r = clrev(h) | |
145 | b, c = clbranchinfo(r) |
|
145 | b, c = clbranchinfo(r) | |
146 | if c: |
|
146 | if c: | |
147 | closed.add(h) |
|
147 | closed.add(h) | |
148 |
|
148 | |||
149 | if rbheads: |
|
149 | if rbheads: | |
150 | rtiprev = max((int(clrev(node)) for node in rbheads)) |
|
150 | rtiprev = max((int(clrev(node)) for node in rbheads)) | |
151 | cache = new_branch_cache( |
|
151 | cache = new_branch_cache( | |
152 | repo, |
|
152 | repo, | |
153 | remotebranchmap, |
|
153 | remotebranchmap, | |
154 | repo[rtiprev].node(), |
|
154 | repo[rtiprev].node(), | |
155 | rtiprev, |
|
155 | rtiprev, | |
156 | closednodes=closed, |
|
156 | closednodes=closed, | |
157 | ) |
|
157 | ) | |
158 |
|
158 | |||
159 | # Try to stick it as low as possible |
|
159 | # Try to stick it as low as possible | |
160 | # filter above served are unlikely to be fetch from a clone |
|
160 | # filter above served are unlikely to be fetch from a clone | |
161 | for candidate in (b'base', b'immutable', b'served'): |
|
161 | for candidate in (b'base', b'immutable', b'served'): | |
162 | rview = repo.filtered(candidate) |
|
162 | rview = repo.filtered(candidate) | |
163 | if cache.validfor(rview): |
|
163 | if cache.validfor(rview): | |
164 | cache._filtername = candidate |
|
164 | cache._filtername = candidate | |
165 | self._per_filter[candidate] = cache |
|
165 | self._per_filter[candidate] = cache | |
166 | cache._state = STATE_DIRTY |
|
166 | cache._state = STATE_DIRTY | |
167 | cache.write(rview) |
|
167 | cache.write(rview) | |
168 | return |
|
168 | return | |
169 |
|
169 | |||
170 | def clear(self): |
|
170 | def clear(self): | |
171 | self._per_filter.clear() |
|
171 | self._per_filter.clear() | |
172 |
|
172 | |||
173 | def write_dirty(self, repo): |
|
173 | def write_dirty(self, repo): | |
174 | unfi = repo.unfiltered() |
|
174 | unfi = repo.unfiltered() | |
175 | for filtername in repoviewutil.get_ordered_subset(): |
|
175 | for filtername in repoviewutil.get_ordered_subset(): | |
176 | cache = self._per_filter.get(filtername) |
|
176 | cache = self._per_filter.get(filtername) | |
177 | if cache is None: |
|
177 | if cache is None: | |
178 | continue |
|
178 | continue | |
179 | if filtername is None: |
|
179 | if filtername is None: | |
180 | repo = unfi |
|
180 | repo = unfi | |
181 | else: |
|
181 | else: | |
182 | repo = unfi.filtered(filtername) |
|
182 | repo = unfi.filtered(filtername) | |
183 | cache.sync_disk(repo) |
|
183 | cache.sync_disk(repo) | |
184 |
|
184 | |||
185 |
|
185 | |||
186 | def _unknownnode(node): |
|
186 | def _unknownnode(node): | |
187 | """raises ValueError when branchcache found a node which does not exists""" |
|
187 | """raises ValueError when branchcache found a node which does not exists""" | |
188 | raise ValueError('node %s does not exist' % node.hex()) |
|
188 | raise ValueError('node %s does not exist' % node.hex()) | |
189 |
|
189 | |||
190 |
|
190 | |||
191 | def _branchcachedesc(repo): |
|
191 | def _branchcachedesc(repo): | |
192 | if repo.filtername is not None: |
|
192 | if repo.filtername is not None: | |
193 | return b'branch cache (%s)' % repo.filtername |
|
193 | return b'branch cache (%s)' % repo.filtername | |
194 | else: |
|
194 | else: | |
195 | return b'branch cache' |
|
195 | return b'branch cache' | |
196 |
|
196 | |||
197 |
|
197 | |||
198 | class _BaseBranchCache: |
|
198 | class _BaseBranchCache: | |
199 | """A dict like object that hold branches heads cache. |
|
199 | """A dict like object that hold branches heads cache. | |
200 |
|
200 | |||
201 | This cache is used to avoid costly computations to determine all the |
|
201 | This cache is used to avoid costly computations to determine all the | |
202 | branch heads of a repo. |
|
202 | branch heads of a repo. | |
203 | """ |
|
203 | """ | |
204 |
|
204 | |||
205 | def __init__( |
|
205 | def __init__( | |
206 | self, |
|
206 | self, | |
207 | repo: "localrepo.localrepository", |
|
207 | repo: "localrepo.localrepository", | |
208 | entries: Union[ |
|
208 | entries: Union[ | |
209 | Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] |
|
209 | Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | |
210 | ] = (), |
|
210 | ] = (), | |
211 | closed_nodes: Optional[Set[bytes]] = None, |
|
211 | closed_nodes: Optional[Set[bytes]] = None, | |
212 | ) -> None: |
|
212 | ) -> None: | |
213 | """hasnode is a function which can be used to verify whether changelog |
|
213 | """hasnode is a function which can be used to verify whether changelog | |
214 | has a given node or not. If it's not provided, we assume that every node |
|
214 | has a given node or not. If it's not provided, we assume that every node | |
215 | we have exists in changelog""" |
|
215 | we have exists in changelog""" | |
216 | # closednodes is a set of nodes that close their branch. If the branch |
|
216 | # closednodes is a set of nodes that close their branch. If the branch | |
217 | # cache has been updated, it may contain nodes that are no longer |
|
217 | # cache has been updated, it may contain nodes that are no longer | |
218 | # heads. |
|
218 | # heads. | |
219 | if closed_nodes is None: |
|
219 | if closed_nodes is None: | |
220 | closed_nodes = set() |
|
220 | closed_nodes = set() | |
221 | self._closednodes = set(closed_nodes) |
|
221 | self._closednodes = set(closed_nodes) | |
222 | self._entries = dict(entries) |
|
222 | self._entries = dict(entries) | |
223 |
|
223 | |||
224 | def __iter__(self): |
|
224 | def __iter__(self): | |
225 | return iter(self._entries) |
|
225 | return iter(self._entries) | |
226 |
|
226 | |||
227 | def __setitem__(self, key, value): |
|
227 | def __setitem__(self, key, value): | |
228 | self._entries[key] = value |
|
228 | self._entries[key] = value | |
229 |
|
229 | |||
230 | def __getitem__(self, key): |
|
230 | def __getitem__(self, key): | |
231 | return self._entries[key] |
|
231 | return self._entries[key] | |
232 |
|
232 | |||
233 | def __contains__(self, key): |
|
233 | def __contains__(self, key): | |
234 | return key in self._entries |
|
234 | return key in self._entries | |
235 |
|
235 | |||
236 | def iteritems(self): |
|
236 | def iteritems(self): | |
237 | return self._entries.items() |
|
237 | return self._entries.items() | |
238 |
|
238 | |||
239 | items = iteritems |
|
239 | items = iteritems | |
240 |
|
240 | |||
241 | def hasbranch(self, label): |
|
241 | def hasbranch(self, label): | |
242 | """checks whether a branch of this name exists or not""" |
|
242 | """checks whether a branch of this name exists or not""" | |
243 | return label in self._entries |
|
243 | return label in self._entries | |
244 |
|
244 | |||
245 | def _branchtip(self, heads): |
|
245 | def _branchtip(self, heads): | |
246 | """Return tuple with last open head in heads and false, |
|
246 | """Return tuple with last open head in heads and false, | |
247 | otherwise return last closed head and true.""" |
|
247 | otherwise return last closed head and true.""" | |
248 | tip = heads[-1] |
|
248 | tip = heads[-1] | |
249 | closed = True |
|
249 | closed = True | |
250 | for h in reversed(heads): |
|
250 | for h in reversed(heads): | |
251 | if h not in self._closednodes: |
|
251 | if h not in self._closednodes: | |
252 | tip = h |
|
252 | tip = h | |
253 | closed = False |
|
253 | closed = False | |
254 | break |
|
254 | break | |
255 | return tip, closed |
|
255 | return tip, closed | |
256 |
|
256 | |||
257 | def branchtip(self, branch): |
|
257 | def branchtip(self, branch): | |
258 | """Return the tipmost open head on branch head, otherwise return the |
|
258 | """Return the tipmost open head on branch head, otherwise return the | |
259 | tipmost closed head on branch. |
|
259 | tipmost closed head on branch. | |
260 | Raise KeyError for unknown branch.""" |
|
260 | Raise KeyError for unknown branch.""" | |
261 | return self._branchtip(self[branch])[0] |
|
261 | return self._branchtip(self[branch])[0] | |
262 |
|
262 | |||
263 | def iteropen(self, nodes): |
|
263 | def iteropen(self, nodes): | |
264 | return (n for n in nodes if n not in self._closednodes) |
|
264 | return (n for n in nodes if n not in self._closednodes) | |
265 |
|
265 | |||
266 | def branchheads(self, branch, closed=False): |
|
266 | def branchheads(self, branch, closed=False): | |
267 | heads = self._entries[branch] |
|
267 | heads = self._entries[branch] | |
268 | if not closed: |
|
268 | if not closed: | |
269 | heads = list(self.iteropen(heads)) |
|
269 | heads = list(self.iteropen(heads)) | |
270 | return heads |
|
270 | return heads | |
271 |
|
271 | |||
272 | def iterbranches(self): |
|
272 | def iterbranches(self): | |
273 | for bn, heads in self.items(): |
|
273 | for bn, heads in self.items(): | |
274 | yield (bn, heads) + self._branchtip(heads) |
|
274 | yield (bn, heads) + self._branchtip(heads) | |
275 |
|
275 | |||
276 | def iterheads(self): |
|
276 | def iterheads(self): | |
277 | """returns all the heads""" |
|
277 | """returns all the heads""" | |
278 | return self._entries.values() |
|
278 | return self._entries.values() | |
279 |
|
279 | |||
280 | def update(self, repo, revgen): |
|
280 | def update(self, repo, revgen): | |
281 | """Given a branchhead cache, self, that may have extra nodes or be |
|
281 | """Given a branchhead cache, self, that may have extra nodes or be | |
282 | missing heads, and a generator of nodes that are strictly a superset of |
|
282 | missing heads, and a generator of nodes that are strictly a superset of | |
283 | heads missing, this function updates self to be correct. |
|
283 | heads missing, this function updates self to be correct. | |
284 | """ |
|
284 | """ | |
285 | starttime = util.timer() |
|
285 | starttime = util.timer() | |
286 | cl = repo.changelog |
|
286 | cl = repo.changelog | |
287 | # Faster than using ctx.obsolete() |
|
287 | # Faster than using ctx.obsolete() | |
288 | obsrevs = obsolete.getrevs(repo, b'obsolete') |
|
288 | obsrevs = obsolete.getrevs(repo, b'obsolete') | |
289 | # collect new branch entries |
|
289 | # collect new branch entries | |
290 | newbranches = {} |
|
290 | newbranches = {} | |
|
291 | new_closed = set() | |||
291 | obs_ignored = set() |
|
292 | obs_ignored = set() | |
292 | getbranchinfo = repo.revbranchcache().branchinfo |
|
293 | getbranchinfo = repo.revbranchcache().branchinfo | |
293 | max_rev = -1 |
|
294 | max_rev = -1 | |
294 | for r in revgen: |
|
295 | for r in revgen: | |
295 | max_rev = max(max_rev, r) |
|
296 | max_rev = max(max_rev, r) | |
296 | if r in obsrevs: |
|
297 | if r in obsrevs: | |
297 | # We ignore obsolete changesets as they shouldn't be |
|
298 | # We ignore obsolete changesets as they shouldn't be | |
298 | # considered heads. |
|
299 | # considered heads. | |
299 | obs_ignored.add(r) |
|
300 | obs_ignored.add(r) | |
300 | continue |
|
301 | continue | |
301 | branch, closesbranch = getbranchinfo(r) |
|
302 | branch, closesbranch = getbranchinfo(r) | |
302 | newbranches.setdefault(branch, []).append(r) |
|
303 | newbranches.setdefault(branch, []).append(r) | |
303 | if closesbranch: |
|
304 | if closesbranch: | |
304 |
|
|
305 | new_closed.add(r) | |
305 | if max_rev < 0: |
|
306 | if max_rev < 0: | |
306 | msg = "running branchcache.update without revision to update" |
|
307 | msg = "running branchcache.update without revision to update" | |
307 | raise error.ProgrammingError(msg) |
|
308 | raise error.ProgrammingError(msg) | |
308 |
|
309 | |||
309 | # Delay fetching the topological heads until they are needed. |
|
310 | # Delay fetching the topological heads until they are needed. | |
310 | # A repository without non-continous branches can skip this part. |
|
311 | # A repository without non-continous branches can skip this part. | |
311 | topoheads = None |
|
312 | topoheads = None | |
312 |
|
313 | |||
313 | # If a changeset is visible, its parents must be visible too, so |
|
314 | # If a changeset is visible, its parents must be visible too, so | |
314 | # use the faster unfiltered parent accessor. |
|
315 | # use the faster unfiltered parent accessor. | |
315 | parentrevs = repo.unfiltered().changelog.parentrevs |
|
316 | parentrevs = repo.unfiltered().changelog.parentrevs | |
316 |
|
317 | |||
317 | for branch, newheadrevs in newbranches.items(): |
|
318 | for branch, newheadrevs in newbranches.items(): | |
318 | # For every branch, compute the new branchheads. |
|
319 | # For every branch, compute the new branchheads. | |
319 | # A branchhead is a revision such that no descendant is on |
|
320 | # A branchhead is a revision such that no descendant is on | |
320 | # the same branch. |
|
321 | # the same branch. | |
321 | # |
|
322 | # | |
322 | # The branchheads are computed iteratively in revision order. |
|
323 | # The branchheads are computed iteratively in revision order. | |
323 | # This ensures topological order, i.e. parents are processed |
|
324 | # This ensures topological order, i.e. parents are processed | |
324 | # before their children. Ancestors are inclusive here, i.e. |
|
325 | # before their children. Ancestors are inclusive here, i.e. | |
325 | # any revision is an ancestor of itself. |
|
326 | # any revision is an ancestor of itself. | |
326 | # |
|
327 | # | |
327 | # Core observations: |
|
328 | # Core observations: | |
328 | # - The current revision is always a branchhead for the |
|
329 | # - The current revision is always a branchhead for the | |
329 | # repository up to that point. |
|
330 | # repository up to that point. | |
330 | # - It is the first revision of the branch if and only if |
|
331 | # - It is the first revision of the branch if and only if | |
331 | # there was no branchhead before. In that case, it is the |
|
332 | # there was no branchhead before. In that case, it is the | |
332 | # only branchhead as there are no possible ancestors on |
|
333 | # only branchhead as there are no possible ancestors on | |
333 | # the same branch. |
|
334 | # the same branch. | |
334 | # - If a parent is on the same branch, a branchhead can |
|
335 | # - If a parent is on the same branch, a branchhead can | |
335 | # only be an ancestor of that parent, if it is parent |
|
336 | # only be an ancestor of that parent, if it is parent | |
336 | # itself. Otherwise it would have been removed as ancestor |
|
337 | # itself. Otherwise it would have been removed as ancestor | |
337 | # of that parent before. |
|
338 | # of that parent before. | |
338 | # - Therefore, if all parents are on the same branch, they |
|
339 | # - Therefore, if all parents are on the same branch, they | |
339 | # can just be removed from the branchhead set. |
|
340 | # can just be removed from the branchhead set. | |
340 | # - If one parent is on the same branch and the other is not |
|
341 | # - If one parent is on the same branch and the other is not | |
341 | # and there was exactly one branchhead known, the existing |
|
342 | # and there was exactly one branchhead known, the existing | |
342 | # branchhead can only be an ancestor if it is the parent. |
|
343 | # branchhead can only be an ancestor if it is the parent. | |
343 | # Otherwise it would have been removed as ancestor of |
|
344 | # Otherwise it would have been removed as ancestor of | |
344 | # the parent before. The other parent therefore can't have |
|
345 | # the parent before. The other parent therefore can't have | |
345 | # a branchhead as ancestor. |
|
346 | # a branchhead as ancestor. | |
346 | # - In all other cases, the parents on different branches |
|
347 | # - In all other cases, the parents on different branches | |
347 | # could have a branchhead as ancestor. Those parents are |
|
348 | # could have a branchhead as ancestor. Those parents are | |
348 | # kept in the "uncertain" set. If all branchheads are also |
|
349 | # kept in the "uncertain" set. If all branchheads are also | |
349 | # topological heads, they can't have descendants and further |
|
350 | # topological heads, they can't have descendants and further | |
350 | # checks can be skipped. Otherwise, the ancestors of the |
|
351 | # checks can be skipped. Otherwise, the ancestors of the | |
351 | # "uncertain" set are removed from branchheads. |
|
352 | # "uncertain" set are removed from branchheads. | |
352 | # This computation is heavy and avoided if at all possible. |
|
353 | # This computation is heavy and avoided if at all possible. | |
353 | bheads = self._entries.get(branch, []) |
|
354 | bheads = self._entries.get(branch, []) | |
354 | bheadset = {cl.rev(node) for node in bheads} |
|
355 | bheadset = {cl.rev(node) for node in bheads} | |
355 | uncertain = set() |
|
356 | uncertain = set() | |
356 | for newrev in sorted(newheadrevs): |
|
357 | for newrev in sorted(newheadrevs): | |
357 | if not bheadset: |
|
358 | if not bheadset: | |
358 | bheadset.add(newrev) |
|
359 | bheadset.add(newrev) | |
359 | continue |
|
360 | continue | |
360 |
|
361 | |||
361 | parents = [p for p in parentrevs(newrev) if p != nullrev] |
|
362 | parents = [p for p in parentrevs(newrev) if p != nullrev] | |
362 | samebranch = set() |
|
363 | samebranch = set() | |
363 | otherbranch = set() |
|
364 | otherbranch = set() | |
364 | obsparents = set() |
|
365 | obsparents = set() | |
365 | for p in parents: |
|
366 | for p in parents: | |
366 | if p in obsrevs: |
|
367 | if p in obsrevs: | |
367 | # We ignored this obsolete changeset earlier, but now |
|
368 | # We ignored this obsolete changeset earlier, but now | |
368 | # that it has non-ignored children, we need to make |
|
369 | # that it has non-ignored children, we need to make | |
369 | # sure their ancestors are not considered heads. To |
|
370 | # sure their ancestors are not considered heads. To | |
370 | # achieve that, we will simply treat this obsolete |
|
371 | # achieve that, we will simply treat this obsolete | |
371 | # changeset as a parent from other branch. |
|
372 | # changeset as a parent from other branch. | |
372 | obsparents.add(p) |
|
373 | obsparents.add(p) | |
373 | elif p in bheadset or getbranchinfo(p)[0] == branch: |
|
374 | elif p in bheadset or getbranchinfo(p)[0] == branch: | |
374 | samebranch.add(p) |
|
375 | samebranch.add(p) | |
375 | else: |
|
376 | else: | |
376 | otherbranch.add(p) |
|
377 | otherbranch.add(p) | |
377 | if not (len(bheadset) == len(samebranch) == 1): |
|
378 | if not (len(bheadset) == len(samebranch) == 1): | |
378 | uncertain.update(otherbranch) |
|
379 | uncertain.update(otherbranch) | |
379 | uncertain.update(obsparents) |
|
380 | uncertain.update(obsparents) | |
380 | bheadset.difference_update(samebranch) |
|
381 | bheadset.difference_update(samebranch) | |
381 | bheadset.add(newrev) |
|
382 | bheadset.add(newrev) | |
382 |
|
383 | |||
383 | if uncertain: |
|
384 | if uncertain: | |
384 | if topoheads is None: |
|
385 | if topoheads is None: | |
385 | topoheads = set(cl.headrevs()) |
|
386 | topoheads = set(cl.headrevs()) | |
386 | if bheadset - topoheads: |
|
387 | if bheadset - topoheads: | |
387 | floorrev = min(bheadset) |
|
388 | floorrev = min(bheadset) | |
388 | if floorrev <= max(uncertain): |
|
389 | if floorrev <= max(uncertain): | |
389 | ancestors = set(cl.ancestors(uncertain, floorrev)) |
|
390 | ancestors = set(cl.ancestors(uncertain, floorrev)) | |
390 | bheadset -= ancestors |
|
391 | bheadset -= ancestors | |
391 | if bheadset: |
|
392 | if bheadset: | |
392 | self[branch] = [cl.node(rev) for rev in sorted(bheadset)] |
|
393 | self[branch] = [cl.node(rev) for rev in sorted(bheadset)] | |
393 |
|
394 | |||
|
395 | self._closednodes.update(cl.node(rev) for rev in new_closed) | |||
|
396 | ||||
394 | duration = util.timer() - starttime |
|
397 | duration = util.timer() - starttime | |
395 | repo.ui.log( |
|
398 | repo.ui.log( | |
396 | b'branchcache', |
|
399 | b'branchcache', | |
397 | b'updated %s in %.4f seconds\n', |
|
400 | b'updated %s in %.4f seconds\n', | |
398 | _branchcachedesc(repo), |
|
401 | _branchcachedesc(repo), | |
399 | duration, |
|
402 | duration, | |
400 | ) |
|
403 | ) | |
401 | return max_rev |
|
404 | return max_rev | |
402 |
|
405 | |||
403 |
|
406 | |||
404 | STATE_CLEAN = 1 |
|
407 | STATE_CLEAN = 1 | |
405 | STATE_INHERITED = 2 |
|
408 | STATE_INHERITED = 2 | |
406 | STATE_DIRTY = 3 |
|
409 | STATE_DIRTY = 3 | |
407 |
|
410 | |||
408 |
|
411 | |||
409 | class _LocalBranchCache(_BaseBranchCache): |
|
412 | class _LocalBranchCache(_BaseBranchCache): | |
410 | """base class of branch-map info for a local repo or repoview""" |
|
413 | """base class of branch-map info for a local repo or repoview""" | |
411 |
|
414 | |||
412 | _base_filename = None |
|
415 | _base_filename = None | |
413 | _default_key_hashes: Tuple[bytes] = cast(Tuple[bytes], ()) |
|
416 | _default_key_hashes: Tuple[bytes] = cast(Tuple[bytes], ()) | |
414 |
|
417 | |||
415 | def __init__( |
|
418 | def __init__( | |
416 | self, |
|
419 | self, | |
417 | repo: "localrepo.localrepository", |
|
420 | repo: "localrepo.localrepository", | |
418 | entries: Union[ |
|
421 | entries: Union[ | |
419 | Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] |
|
422 | Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | |
420 | ] = (), |
|
423 | ] = (), | |
421 | tipnode: Optional[bytes] = None, |
|
424 | tipnode: Optional[bytes] = None, | |
422 | tiprev: Optional[int] = nullrev, |
|
425 | tiprev: Optional[int] = nullrev, | |
423 | key_hashes: Optional[Tuple[bytes]] = None, |
|
426 | key_hashes: Optional[Tuple[bytes]] = None, | |
424 | closednodes: Optional[Set[bytes]] = None, |
|
427 | closednodes: Optional[Set[bytes]] = None, | |
425 | hasnode: Optional[Callable[[bytes], bool]] = None, |
|
428 | hasnode: Optional[Callable[[bytes], bool]] = None, | |
426 | verify_node: bool = False, |
|
429 | verify_node: bool = False, | |
427 | inherited: bool = False, |
|
430 | inherited: bool = False, | |
428 | ) -> None: |
|
431 | ) -> None: | |
429 | """hasnode is a function which can be used to verify whether changelog |
|
432 | """hasnode is a function which can be used to verify whether changelog | |
430 | has a given node or not. If it's not provided, we assume that every node |
|
433 | has a given node or not. If it's not provided, we assume that every node | |
431 | we have exists in changelog""" |
|
434 | we have exists in changelog""" | |
432 | self._filtername = repo.filtername |
|
435 | self._filtername = repo.filtername | |
433 | if tipnode is None: |
|
436 | if tipnode is None: | |
434 | self.tipnode = repo.nullid |
|
437 | self.tipnode = repo.nullid | |
435 | else: |
|
438 | else: | |
436 | self.tipnode = tipnode |
|
439 | self.tipnode = tipnode | |
437 | self.tiprev = tiprev |
|
440 | self.tiprev = tiprev | |
438 | if key_hashes is None: |
|
441 | if key_hashes is None: | |
439 | self.key_hashes = self._default_key_hashes |
|
442 | self.key_hashes = self._default_key_hashes | |
440 | else: |
|
443 | else: | |
441 | self.key_hashes = key_hashes |
|
444 | self.key_hashes = key_hashes | |
442 | self._state = STATE_CLEAN |
|
445 | self._state = STATE_CLEAN | |
443 | if inherited: |
|
446 | if inherited: | |
444 | self._state = STATE_INHERITED |
|
447 | self._state = STATE_INHERITED | |
445 |
|
448 | |||
446 | super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) |
|
449 | super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) | |
447 | # closednodes is a set of nodes that close their branch. If the branch |
|
450 | # closednodes is a set of nodes that close their branch. If the branch | |
448 | # cache has been updated, it may contain nodes that are no longer |
|
451 | # cache has been updated, it may contain nodes that are no longer | |
449 | # heads. |
|
452 | # heads. | |
450 |
|
453 | |||
451 | # Do we need to verify branch at all ? |
|
454 | # Do we need to verify branch at all ? | |
452 | self._verify_node = verify_node |
|
455 | self._verify_node = verify_node | |
453 | # branches for which nodes are verified |
|
456 | # branches for which nodes are verified | |
454 | self._verifiedbranches = set() |
|
457 | self._verifiedbranches = set() | |
455 | self._hasnode = None |
|
458 | self._hasnode = None | |
456 | if self._verify_node: |
|
459 | if self._verify_node: | |
457 | self._hasnode = repo.changelog.hasnode |
|
460 | self._hasnode = repo.changelog.hasnode | |
458 |
|
461 | |||
459 | def _compute_key_hashes(self, repo) -> Tuple[bytes]: |
|
462 | def _compute_key_hashes(self, repo) -> Tuple[bytes]: | |
460 | raise NotImplementedError |
|
463 | raise NotImplementedError | |
461 |
|
464 | |||
462 | def validfor(self, repo): |
|
465 | def validfor(self, repo): | |
463 | """check that cache contents are valid for (a subset of) this repo |
|
466 | """check that cache contents are valid for (a subset of) this repo | |
464 |
|
467 | |||
465 | - False when the order of changesets changed or if we detect a strip. |
|
468 | - False when the order of changesets changed or if we detect a strip. | |
466 | - True when cache is up-to-date for the current repo or its subset.""" |
|
469 | - True when cache is up-to-date for the current repo or its subset.""" | |
467 | try: |
|
470 | try: | |
468 | node = repo.changelog.node(self.tiprev) |
|
471 | node = repo.changelog.node(self.tiprev) | |
469 | except IndexError: |
|
472 | except IndexError: | |
470 | # changesets were stripped and now we don't even have enough to |
|
473 | # changesets were stripped and now we don't even have enough to | |
471 | # find tiprev |
|
474 | # find tiprev | |
472 | return False |
|
475 | return False | |
473 | if self.tipnode != node: |
|
476 | if self.tipnode != node: | |
474 | # tiprev doesn't correspond to tipnode: repo was stripped, or this |
|
477 | # tiprev doesn't correspond to tipnode: repo was stripped, or this | |
475 | # repo has a different order of changesets |
|
478 | # repo has a different order of changesets | |
476 | return False |
|
479 | return False | |
477 | repo_key_hashes = self._compute_key_hashes(repo) |
|
480 | repo_key_hashes = self._compute_key_hashes(repo) | |
478 | # hashes don't match if this repo view has a different set of filtered |
|
481 | # hashes don't match if this repo view has a different set of filtered | |
479 | # revisions (e.g. due to phase changes) or obsolete revisions (e.g. |
|
482 | # revisions (e.g. due to phase changes) or obsolete revisions (e.g. | |
480 | # history was rewritten) |
|
483 | # history was rewritten) | |
481 | return self.key_hashes == repo_key_hashes |
|
484 | return self.key_hashes == repo_key_hashes | |
482 |
|
485 | |||
483 | @classmethod |
|
486 | @classmethod | |
484 | def fromfile(cls, repo): |
|
487 | def fromfile(cls, repo): | |
485 | f = None |
|
488 | f = None | |
486 | try: |
|
489 | try: | |
487 | f = repo.cachevfs(cls._filename(repo)) |
|
490 | f = repo.cachevfs(cls._filename(repo)) | |
488 | lineiter = iter(f) |
|
491 | lineiter = iter(f) | |
489 | init_kwargs = cls._load_header(repo, lineiter) |
|
492 | init_kwargs = cls._load_header(repo, lineiter) | |
490 | bcache = cls( |
|
493 | bcache = cls( | |
491 | repo, |
|
494 | repo, | |
492 | verify_node=True, |
|
495 | verify_node=True, | |
493 | **init_kwargs, |
|
496 | **init_kwargs, | |
494 | ) |
|
497 | ) | |
495 | if not bcache.validfor(repo): |
|
498 | if not bcache.validfor(repo): | |
496 | # invalidate the cache |
|
499 | # invalidate the cache | |
497 | raise ValueError('tip differs') |
|
500 | raise ValueError('tip differs') | |
498 | bcache._load_heads(repo, lineiter) |
|
501 | bcache._load_heads(repo, lineiter) | |
499 | except (IOError, OSError): |
|
502 | except (IOError, OSError): | |
500 | return None |
|
503 | return None | |
501 |
|
504 | |||
502 | except Exception as inst: |
|
505 | except Exception as inst: | |
503 | if repo.ui.debugflag: |
|
506 | if repo.ui.debugflag: | |
504 | msg = b'invalid %s: %s\n' |
|
507 | msg = b'invalid %s: %s\n' | |
505 | msg %= ( |
|
508 | msg %= ( | |
506 | _branchcachedesc(repo), |
|
509 | _branchcachedesc(repo), | |
507 | stringutil.forcebytestr(inst), |
|
510 | stringutil.forcebytestr(inst), | |
508 | ) |
|
511 | ) | |
509 | repo.ui.debug(msg) |
|
512 | repo.ui.debug(msg) | |
510 | bcache = None |
|
513 | bcache = None | |
511 |
|
514 | |||
512 | finally: |
|
515 | finally: | |
513 | if f: |
|
516 | if f: | |
514 | f.close() |
|
517 | f.close() | |
515 |
|
518 | |||
516 | return bcache |
|
519 | return bcache | |
517 |
|
520 | |||
518 | @classmethod |
|
521 | @classmethod | |
519 | def _load_header(cls, repo, lineiter) -> "dict[str, Any]": |
|
522 | def _load_header(cls, repo, lineiter) -> "dict[str, Any]": | |
520 | raise NotImplementedError |
|
523 | raise NotImplementedError | |
521 |
|
524 | |||
522 | def _load_heads(self, repo, lineiter): |
|
525 | def _load_heads(self, repo, lineiter): | |
523 | """fully loads the branchcache by reading from the file using the line |
|
526 | """fully loads the branchcache by reading from the file using the line | |
524 | iterator passed""" |
|
527 | iterator passed""" | |
525 | for line in lineiter: |
|
528 | for line in lineiter: | |
526 | line = line.rstrip(b'\n') |
|
529 | line = line.rstrip(b'\n') | |
527 | if not line: |
|
530 | if not line: | |
528 | continue |
|
531 | continue | |
529 | node, state, label = line.split(b" ", 2) |
|
532 | node, state, label = line.split(b" ", 2) | |
530 | if state not in b'oc': |
|
533 | if state not in b'oc': | |
531 | raise ValueError('invalid branch state') |
|
534 | raise ValueError('invalid branch state') | |
532 | label = encoding.tolocal(label.strip()) |
|
535 | label = encoding.tolocal(label.strip()) | |
533 | node = bin(node) |
|
536 | node = bin(node) | |
534 | self._entries.setdefault(label, []).append(node) |
|
537 | self._entries.setdefault(label, []).append(node) | |
535 | if state == b'c': |
|
538 | if state == b'c': | |
536 | self._closednodes.add(node) |
|
539 | self._closednodes.add(node) | |
537 |
|
540 | |||
538 | @classmethod |
|
541 | @classmethod | |
539 | def _filename(cls, repo): |
|
542 | def _filename(cls, repo): | |
540 | """name of a branchcache file for a given repo or repoview""" |
|
543 | """name of a branchcache file for a given repo or repoview""" | |
541 | filename = cls._base_filename |
|
544 | filename = cls._base_filename | |
542 | assert filename is not None |
|
545 | assert filename is not None | |
543 | if repo.filtername: |
|
546 | if repo.filtername: | |
544 | filename = b'%s-%s' % (filename, repo.filtername) |
|
547 | filename = b'%s-%s' % (filename, repo.filtername) | |
545 | return filename |
|
548 | return filename | |
546 |
|
549 | |||
547 | def inherit_for(self, repo): |
|
550 | def inherit_for(self, repo): | |
548 | """return a deep copy of the branchcache object""" |
|
551 | """return a deep copy of the branchcache object""" | |
549 | assert repo.filtername != self._filtername |
|
552 | assert repo.filtername != self._filtername | |
550 | other = type(self)( |
|
553 | other = type(self)( | |
551 | repo=repo, |
|
554 | repo=repo, | |
552 | # we always do a shally copy of self._entries, and the values is |
|
555 | # we always do a shally copy of self._entries, and the values is | |
553 | # always replaced, so no need to deepcopy until the above remains |
|
556 | # always replaced, so no need to deepcopy until the above remains | |
554 | # true. |
|
557 | # true. | |
555 | entries=self._entries, |
|
558 | entries=self._entries, | |
556 | tipnode=self.tipnode, |
|
559 | tipnode=self.tipnode, | |
557 | tiprev=self.tiprev, |
|
560 | tiprev=self.tiprev, | |
558 | key_hashes=self.key_hashes, |
|
561 | key_hashes=self.key_hashes, | |
559 | closednodes=set(self._closednodes), |
|
562 | closednodes=set(self._closednodes), | |
560 | verify_node=self._verify_node, |
|
563 | verify_node=self._verify_node, | |
561 | inherited=True, |
|
564 | inherited=True, | |
562 | ) |
|
565 | ) | |
563 | # also copy information about the current verification state |
|
566 | # also copy information about the current verification state | |
564 | other._verifiedbranches = set(self._verifiedbranches) |
|
567 | other._verifiedbranches = set(self._verifiedbranches) | |
565 | return other |
|
568 | return other | |
566 |
|
569 | |||
567 | def sync_disk(self, repo): |
|
570 | def sync_disk(self, repo): | |
568 | """synchronise the on disk file with the cache state |
|
571 | """synchronise the on disk file with the cache state | |
569 |
|
572 | |||
570 | If new value specific to this filter level need to be written, the file |
|
573 | If new value specific to this filter level need to be written, the file | |
571 | will be updated, if the state of the branchcache is inherited from a |
|
574 | will be updated, if the state of the branchcache is inherited from a | |
572 | subset, any stalled on disk file will be deleted. |
|
575 | subset, any stalled on disk file will be deleted. | |
573 |
|
576 | |||
574 | That method does nothing if there is nothing to do. |
|
577 | That method does nothing if there is nothing to do. | |
575 | """ |
|
578 | """ | |
576 | if self._state == STATE_DIRTY: |
|
579 | if self._state == STATE_DIRTY: | |
577 | self.write(repo) |
|
580 | self.write(repo) | |
578 | elif self._state == STATE_INHERITED: |
|
581 | elif self._state == STATE_INHERITED: | |
579 | filename = self._filename(repo) |
|
582 | filename = self._filename(repo) | |
580 | repo.cachevfs.tryunlink(filename) |
|
583 | repo.cachevfs.tryunlink(filename) | |
581 |
|
584 | |||
582 | def write(self, repo): |
|
585 | def write(self, repo): | |
583 | assert self._filtername == repo.filtername, ( |
|
586 | assert self._filtername == repo.filtername, ( | |
584 | self._filtername, |
|
587 | self._filtername, | |
585 | repo.filtername, |
|
588 | repo.filtername, | |
586 | ) |
|
589 | ) | |
587 | assert self._state == STATE_DIRTY, self._state |
|
590 | assert self._state == STATE_DIRTY, self._state | |
588 | # This method should not be called during an open transaction |
|
591 | # This method should not be called during an open transaction | |
589 | tr = repo.currenttransaction() |
|
592 | tr = repo.currenttransaction() | |
590 | if not getattr(tr, 'finalized', True): |
|
593 | if not getattr(tr, 'finalized', True): | |
591 | msg = "writing branchcache in the middle of a transaction" |
|
594 | msg = "writing branchcache in the middle of a transaction" | |
592 | raise error.ProgrammingError(msg) |
|
595 | raise error.ProgrammingError(msg) | |
593 | try: |
|
596 | try: | |
594 | filename = self._filename(repo) |
|
597 | filename = self._filename(repo) | |
595 | with repo.cachevfs(filename, b"w", atomictemp=True) as f: |
|
598 | with repo.cachevfs(filename, b"w", atomictemp=True) as f: | |
596 | self._write_header(f) |
|
599 | self._write_header(f) | |
597 | nodecount = self._write_heads(repo, f) |
|
600 | nodecount = self._write_heads(repo, f) | |
598 | repo.ui.log( |
|
601 | repo.ui.log( | |
599 | b'branchcache', |
|
602 | b'branchcache', | |
600 | b'wrote %s with %d labels and %d nodes\n', |
|
603 | b'wrote %s with %d labels and %d nodes\n', | |
601 | _branchcachedesc(repo), |
|
604 | _branchcachedesc(repo), | |
602 | len(self._entries), |
|
605 | len(self._entries), | |
603 | nodecount, |
|
606 | nodecount, | |
604 | ) |
|
607 | ) | |
605 | self._state = STATE_CLEAN |
|
608 | self._state = STATE_CLEAN | |
606 | except (IOError, OSError, error.Abort) as inst: |
|
609 | except (IOError, OSError, error.Abort) as inst: | |
607 | # Abort may be raised by read only opener, so log and continue |
|
610 | # Abort may be raised by read only opener, so log and continue | |
608 | repo.ui.debug( |
|
611 | repo.ui.debug( | |
609 | b"couldn't write branch cache: %s\n" |
|
612 | b"couldn't write branch cache: %s\n" | |
610 | % stringutil.forcebytestr(inst) |
|
613 | % stringutil.forcebytestr(inst) | |
611 | ) |
|
614 | ) | |
612 |
|
615 | |||
613 | def _write_header(self, fp) -> None: |
|
616 | def _write_header(self, fp) -> None: | |
614 | raise NotImplementedError |
|
617 | raise NotImplementedError | |
615 |
|
618 | |||
616 | def _write_heads(self, repo, fp) -> int: |
|
619 | def _write_heads(self, repo, fp) -> int: | |
617 | """write list of heads to a file |
|
620 | """write list of heads to a file | |
618 |
|
621 | |||
619 | Return the number of heads written.""" |
|
622 | Return the number of heads written.""" | |
620 | nodecount = 0 |
|
623 | nodecount = 0 | |
621 | for label, nodes in sorted(self._entries.items()): |
|
624 | for label, nodes in sorted(self._entries.items()): | |
622 | label = encoding.fromlocal(label) |
|
625 | label = encoding.fromlocal(label) | |
623 | for node in nodes: |
|
626 | for node in nodes: | |
624 | nodecount += 1 |
|
627 | nodecount += 1 | |
625 | if node in self._closednodes: |
|
628 | if node in self._closednodes: | |
626 | state = b'c' |
|
629 | state = b'c' | |
627 | else: |
|
630 | else: | |
628 | state = b'o' |
|
631 | state = b'o' | |
629 | fp.write(b"%s %s %s\n" % (hex(node), state, label)) |
|
632 | fp.write(b"%s %s %s\n" % (hex(node), state, label)) | |
630 | return nodecount |
|
633 | return nodecount | |
631 |
|
634 | |||
632 | def _verifybranch(self, branch): |
|
635 | def _verifybranch(self, branch): | |
633 | """verify head nodes for the given branch.""" |
|
636 | """verify head nodes for the given branch.""" | |
634 | if not self._verify_node: |
|
637 | if not self._verify_node: | |
635 | return |
|
638 | return | |
636 | if branch not in self._entries or branch in self._verifiedbranches: |
|
639 | if branch not in self._entries or branch in self._verifiedbranches: | |
637 | return |
|
640 | return | |
638 | assert self._hasnode is not None |
|
641 | assert self._hasnode is not None | |
639 | for n in self._entries[branch]: |
|
642 | for n in self._entries[branch]: | |
640 | if not self._hasnode(n): |
|
643 | if not self._hasnode(n): | |
641 | _unknownnode(n) |
|
644 | _unknownnode(n) | |
642 |
|
645 | |||
643 | self._verifiedbranches.add(branch) |
|
646 | self._verifiedbranches.add(branch) | |
644 |
|
647 | |||
645 | def _verifyall(self): |
|
648 | def _verifyall(self): | |
646 | """verifies nodes of all the branches""" |
|
649 | """verifies nodes of all the branches""" | |
647 | for b in self._entries.keys(): |
|
650 | for b in self._entries.keys(): | |
648 | if b not in self._verifiedbranches: |
|
651 | if b not in self._verifiedbranches: | |
649 | self._verifybranch(b) |
|
652 | self._verifybranch(b) | |
650 |
|
653 | |||
651 | def __getitem__(self, key): |
|
654 | def __getitem__(self, key): | |
652 | self._verifybranch(key) |
|
655 | self._verifybranch(key) | |
653 | return super().__getitem__(key) |
|
656 | return super().__getitem__(key) | |
654 |
|
657 | |||
655 | def __contains__(self, key): |
|
658 | def __contains__(self, key): | |
656 | self._verifybranch(key) |
|
659 | self._verifybranch(key) | |
657 | return super().__contains__(key) |
|
660 | return super().__contains__(key) | |
658 |
|
661 | |||
659 | def iteritems(self): |
|
662 | def iteritems(self): | |
660 | self._verifyall() |
|
663 | self._verifyall() | |
661 | return super().iteritems() |
|
664 | return super().iteritems() | |
662 |
|
665 | |||
663 | items = iteritems |
|
666 | items = iteritems | |
664 |
|
667 | |||
665 | def iterheads(self): |
|
668 | def iterheads(self): | |
666 | """returns all the heads""" |
|
669 | """returns all the heads""" | |
667 | self._verifyall() |
|
670 | self._verifyall() | |
668 | return super().iterheads() |
|
671 | return super().iterheads() | |
669 |
|
672 | |||
670 | def hasbranch(self, label): |
|
673 | def hasbranch(self, label): | |
671 | """checks whether a branch of this name exists or not""" |
|
674 | """checks whether a branch of this name exists or not""" | |
672 | self._verifybranch(label) |
|
675 | self._verifybranch(label) | |
673 | return super().hasbranch(label) |
|
676 | return super().hasbranch(label) | |
674 |
|
677 | |||
675 | def branchheads(self, branch, closed=False): |
|
678 | def branchheads(self, branch, closed=False): | |
676 | self._verifybranch(branch) |
|
679 | self._verifybranch(branch) | |
677 | return super().branchheads(branch, closed=closed) |
|
680 | return super().branchheads(branch, closed=closed) | |
678 |
|
681 | |||
679 | def update(self, repo, revgen): |
|
682 | def update(self, repo, revgen): | |
680 | assert self._filtername == repo.filtername, ( |
|
683 | assert self._filtername == repo.filtername, ( | |
681 | self._filtername, |
|
684 | self._filtername, | |
682 | repo.filtername, |
|
685 | repo.filtername, | |
683 | ) |
|
686 | ) | |
684 | cl = repo.changelog |
|
687 | cl = repo.changelog | |
685 | max_rev = super().update(repo, revgen) |
|
688 | max_rev = super().update(repo, revgen) | |
686 | # new tip revision which we found after iterating items from new |
|
689 | # new tip revision which we found after iterating items from new | |
687 | # branches |
|
690 | # branches | |
688 | if max_rev is not None and max_rev > self.tiprev: |
|
691 | if max_rev is not None and max_rev > self.tiprev: | |
689 | self.tiprev = max_rev |
|
692 | self.tiprev = max_rev | |
690 | self.tipnode = cl.node(max_rev) |
|
693 | self.tipnode = cl.node(max_rev) | |
691 | else: |
|
694 | else: | |
692 | # We should not be here is if this is false |
|
695 | # We should not be here is if this is false | |
693 | assert cl.node(self.tiprev) == self.tipnode |
|
696 | assert cl.node(self.tiprev) == self.tipnode | |
694 |
|
697 | |||
695 | if not self.validfor(repo): |
|
698 | if not self.validfor(repo): | |
696 | # the tiprev and tipnode should be aligned, so if the current repo |
|
699 | # the tiprev and tipnode should be aligned, so if the current repo | |
697 | # is not seens as valid this is because old cache key is now |
|
700 | # is not seens as valid this is because old cache key is now | |
698 | # invalid for the repo. |
|
701 | # invalid for the repo. | |
699 | # |
|
702 | # | |
700 | # However. we've just updated the cache and we assume it's valid, |
|
703 | # However. we've just updated the cache and we assume it's valid, | |
701 | # so let's make the cache key valid as well by recomputing it from |
|
704 | # so let's make the cache key valid as well by recomputing it from | |
702 | # the cached data |
|
705 | # the cached data | |
703 | self.key_hashes = self._compute_key_hashes(repo) |
|
706 | self.key_hashes = self._compute_key_hashes(repo) | |
704 | self.filteredhash = scmutil.combined_filtered_and_obsolete_hash( |
|
707 | self.filteredhash = scmutil.combined_filtered_and_obsolete_hash( | |
705 | repo, |
|
708 | repo, | |
706 | self.tiprev, |
|
709 | self.tiprev, | |
707 | ) |
|
710 | ) | |
708 |
|
711 | |||
709 | self._state = STATE_DIRTY |
|
712 | self._state = STATE_DIRTY | |
710 | tr = repo.currenttransaction() |
|
713 | tr = repo.currenttransaction() | |
711 | if getattr(tr, 'finalized', True): |
|
714 | if getattr(tr, 'finalized', True): | |
712 | # Avoid premature writing. |
|
715 | # Avoid premature writing. | |
713 | # |
|
716 | # | |
714 | # (The cache warming setup by localrepo will update the file later.) |
|
717 | # (The cache warming setup by localrepo will update the file later.) | |
715 | self.write(repo) |
|
718 | self.write(repo) | |
716 |
|
719 | |||
717 |
|
720 | |||
718 | def branch_cache_from_file(repo) -> Optional[_LocalBranchCache]: |
|
721 | def branch_cache_from_file(repo) -> Optional[_LocalBranchCache]: | |
719 | """Build a branch cache from on-disk data if possible |
|
722 | """Build a branch cache from on-disk data if possible | |
720 |
|
723 | |||
721 | Return a branch cache of the right format depending of the repository. |
|
724 | Return a branch cache of the right format depending of the repository. | |
722 | """ |
|
725 | """ | |
723 | if repo.ui.configbool(b"experimental", b"branch-cache-v3"): |
|
726 | if repo.ui.configbool(b"experimental", b"branch-cache-v3"): | |
724 | return BranchCacheV3.fromfile(repo) |
|
727 | return BranchCacheV3.fromfile(repo) | |
725 | else: |
|
728 | else: | |
726 | return BranchCacheV2.fromfile(repo) |
|
729 | return BranchCacheV2.fromfile(repo) | |
727 |
|
730 | |||
728 |
|
731 | |||
729 | def new_branch_cache(repo, *args, **kwargs): |
|
732 | def new_branch_cache(repo, *args, **kwargs): | |
730 | """Build a new branch cache from argument |
|
733 | """Build a new branch cache from argument | |
731 |
|
734 | |||
732 | Return a branch cache of the right format depending of the repository. |
|
735 | Return a branch cache of the right format depending of the repository. | |
733 | """ |
|
736 | """ | |
734 | if repo.ui.configbool(b"experimental", b"branch-cache-v3"): |
|
737 | if repo.ui.configbool(b"experimental", b"branch-cache-v3"): | |
735 | return BranchCacheV3(repo, *args, **kwargs) |
|
738 | return BranchCacheV3(repo, *args, **kwargs) | |
736 | else: |
|
739 | else: | |
737 | return BranchCacheV2(repo, *args, **kwargs) |
|
740 | return BranchCacheV2(repo, *args, **kwargs) | |
738 |
|
741 | |||
739 |
|
742 | |||
740 | class BranchCacheV2(_LocalBranchCache): |
|
743 | class BranchCacheV2(_LocalBranchCache): | |
741 | """a branch cache using version 2 of the format on disk |
|
744 | """a branch cache using version 2 of the format on disk | |
742 |
|
745 | |||
743 | The cache is serialized on disk in the following format: |
|
746 | The cache is serialized on disk in the following format: | |
744 |
|
747 | |||
745 | <tip hex node> <tip rev number> [optional filtered repo hex hash] |
|
748 | <tip hex node> <tip rev number> [optional filtered repo hex hash] | |
746 | <branch head hex node> <open/closed state> <branch name> |
|
749 | <branch head hex node> <open/closed state> <branch name> | |
747 | <branch head hex node> <open/closed state> <branch name> |
|
750 | <branch head hex node> <open/closed state> <branch name> | |
748 | ... |
|
751 | ... | |
749 |
|
752 | |||
750 | The first line is used to check if the cache is still valid. If the |
|
753 | The first line is used to check if the cache is still valid. If the | |
751 | branch cache is for a filtered repo view, an optional third hash is |
|
754 | branch cache is for a filtered repo view, an optional third hash is | |
752 | included that hashes the hashes of all filtered and obsolete revisions. |
|
755 | included that hashes the hashes of all filtered and obsolete revisions. | |
753 |
|
756 | |||
754 | The open/closed state is represented by a single letter 'o' or 'c'. |
|
757 | The open/closed state is represented by a single letter 'o' or 'c'. | |
755 | This field can be used to avoid changelog reads when determining if a |
|
758 | This field can be used to avoid changelog reads when determining if a | |
756 | branch head closes a branch or not. |
|
759 | branch head closes a branch or not. | |
757 | """ |
|
760 | """ | |
758 |
|
761 | |||
759 | _base_filename = b"branch2" |
|
762 | _base_filename = b"branch2" | |
760 |
|
763 | |||
761 | @classmethod |
|
764 | @classmethod | |
762 | def _load_header(cls, repo, lineiter) -> "dict[str, Any]": |
|
765 | def _load_header(cls, repo, lineiter) -> "dict[str, Any]": | |
763 | """parse the head of a branchmap file |
|
766 | """parse the head of a branchmap file | |
764 |
|
767 | |||
765 | return parameters to pass to a newly created class instance. |
|
768 | return parameters to pass to a newly created class instance. | |
766 | """ |
|
769 | """ | |
767 | cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) |
|
770 | cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) | |
768 | last, lrev = cachekey[:2] |
|
771 | last, lrev = cachekey[:2] | |
769 | last, lrev = bin(last), int(lrev) |
|
772 | last, lrev = bin(last), int(lrev) | |
770 | filteredhash = () |
|
773 | filteredhash = () | |
771 | if len(cachekey) > 2: |
|
774 | if len(cachekey) > 2: | |
772 | filteredhash = (bin(cachekey[2]),) |
|
775 | filteredhash = (bin(cachekey[2]),) | |
773 | return { |
|
776 | return { | |
774 | "tipnode": last, |
|
777 | "tipnode": last, | |
775 | "tiprev": lrev, |
|
778 | "tiprev": lrev, | |
776 | "key_hashes": filteredhash, |
|
779 | "key_hashes": filteredhash, | |
777 | } |
|
780 | } | |
778 |
|
781 | |||
779 | def _write_header(self, fp) -> None: |
|
782 | def _write_header(self, fp) -> None: | |
780 | """write the branch cache header to a file""" |
|
783 | """write the branch cache header to a file""" | |
781 | cachekey = [hex(self.tipnode), b'%d' % self.tiprev] |
|
784 | cachekey = [hex(self.tipnode), b'%d' % self.tiprev] | |
782 | if self.key_hashes: |
|
785 | if self.key_hashes: | |
783 | cachekey.append(hex(self.key_hashes[0])) |
|
786 | cachekey.append(hex(self.key_hashes[0])) | |
784 | fp.write(b" ".join(cachekey) + b'\n') |
|
787 | fp.write(b" ".join(cachekey) + b'\n') | |
785 |
|
788 | |||
786 | def _compute_key_hashes(self, repo) -> Tuple[bytes]: |
|
789 | def _compute_key_hashes(self, repo) -> Tuple[bytes]: | |
787 | """return the cache key hashes that match this repoview state""" |
|
790 | """return the cache key hashes that match this repoview state""" | |
788 | filtered_hash = scmutil.combined_filtered_and_obsolete_hash( |
|
791 | filtered_hash = scmutil.combined_filtered_and_obsolete_hash( | |
789 | repo, |
|
792 | repo, | |
790 | self.tiprev, |
|
793 | self.tiprev, | |
791 | needobsolete=True, |
|
794 | needobsolete=True, | |
792 | ) |
|
795 | ) | |
793 | keys: Tuple[bytes] = cast(Tuple[bytes], ()) |
|
796 | keys: Tuple[bytes] = cast(Tuple[bytes], ()) | |
794 | if filtered_hash is not None: |
|
797 | if filtered_hash is not None: | |
795 | keys: Tuple[bytes] = (filtered_hash,) |
|
798 | keys: Tuple[bytes] = (filtered_hash,) | |
796 | return keys |
|
799 | return keys | |
797 |
|
800 | |||
798 |
|
801 | |||
799 | class BranchCacheV3(_LocalBranchCache): |
|
802 | class BranchCacheV3(_LocalBranchCache): | |
800 | """a branch cache using version 3 of the format on disk |
|
803 | """a branch cache using version 3 of the format on disk | |
801 |
|
804 | |||
802 | This version is still EXPERIMENTAL and the format is subject to changes. |
|
805 | This version is still EXPERIMENTAL and the format is subject to changes. | |
803 |
|
806 | |||
804 | The cache is serialized on disk in the following format: |
|
807 | The cache is serialized on disk in the following format: | |
805 |
|
808 | |||
806 | <cache-key-xxx>=<xxx-value> <cache-key-yyy>=<yyy-value> [β¦] |
|
809 | <cache-key-xxx>=<xxx-value> <cache-key-yyy>=<yyy-value> [β¦] | |
807 | <branch head hex node> <open/closed state> <branch name> |
|
810 | <branch head hex node> <open/closed state> <branch name> | |
808 | <branch head hex node> <open/closed state> <branch name> |
|
811 | <branch head hex node> <open/closed state> <branch name> | |
809 | ... |
|
812 | ... | |
810 |
|
813 | |||
811 | The first line is used to check if the cache is still valid. It is a series |
|
814 | The first line is used to check if the cache is still valid. It is a series | |
812 | of key value pair. The following key are recognized: |
|
815 | of key value pair. The following key are recognized: | |
813 |
|
816 | |||
814 | - tip-rev: the rev-num of the tip-most revision seen by this cache |
|
817 | - tip-rev: the rev-num of the tip-most revision seen by this cache | |
815 | - tip-node: the node-id of the tip-most revision sen by this cache |
|
818 | - tip-node: the node-id of the tip-most revision sen by this cache | |
816 | - filtered-hash: the hash of all filtered revisions (before tip-rev) |
|
819 | - filtered-hash: the hash of all filtered revisions (before tip-rev) | |
817 | ignored by this cache. |
|
820 | ignored by this cache. | |
818 | - obsolete-hash: the hash of all non-filtered obsolete revisions (before |
|
821 | - obsolete-hash: the hash of all non-filtered obsolete revisions (before | |
819 | tip-rev) ignored by this cache. |
|
822 | tip-rev) ignored by this cache. | |
820 |
|
823 | |||
821 | The tip-rev is used to know how far behind the value in the file are |
|
824 | The tip-rev is used to know how far behind the value in the file are | |
822 | compared to the current repository state. |
|
825 | compared to the current repository state. | |
823 |
|
826 | |||
824 | The tip-node, filtered-hash and obsolete-hash are used to detect if this |
|
827 | The tip-node, filtered-hash and obsolete-hash are used to detect if this | |
825 | cache can be used for this repository state at all. |
|
828 | cache can be used for this repository state at all. | |
826 |
|
829 | |||
827 | The open/closed state is represented by a single letter 'o' or 'c'. |
|
830 | The open/closed state is represented by a single letter 'o' or 'c'. | |
828 | This field can be used to avoid changelog reads when determining if a |
|
831 | This field can be used to avoid changelog reads when determining if a | |
829 | branch head closes a branch or not. |
|
832 | branch head closes a branch or not. | |
830 |
|
833 | |||
831 | Topological heads are not included in the listing and should be dispatched |
|
834 | Topological heads are not included in the listing and should be dispatched | |
832 | on the right branch at read time. Obsolete topological heads should be |
|
835 | on the right branch at read time. Obsolete topological heads should be | |
833 | ignored. |
|
836 | ignored. | |
834 | """ |
|
837 | """ | |
835 |
|
838 | |||
836 | _base_filename = b"branch3" |
|
839 | _base_filename = b"branch3" | |
837 | _default_key_hashes = (None, None) |
|
840 | _default_key_hashes = (None, None) | |
838 |
|
841 | |||
839 | def _get_topo_heads(self, repo) -> List[int]: |
|
842 | def _get_topo_heads(self, repo) -> List[int]: | |
840 | """returns the topological head of a repoview content up to self.tiprev""" |
|
843 | """returns the topological head of a repoview content up to self.tiprev""" | |
841 | cl = repo.changelog |
|
844 | cl = repo.changelog | |
842 | if self.tiprev == nullrev: |
|
845 | if self.tiprev == nullrev: | |
843 | return [] |
|
846 | return [] | |
844 | elif self.tiprev == cl.tiprev(): |
|
847 | elif self.tiprev == cl.tiprev(): | |
845 | return cl.headrevs() |
|
848 | return cl.headrevs() | |
846 | else: |
|
849 | else: | |
847 | # XXX passing tiprev as ceiling of cl.headrevs could be faster |
|
850 | # XXX passing tiprev as ceiling of cl.headrevs could be faster | |
848 | heads = cl.headrevs(cl.revs(stop=self.tiprev)) |
|
851 | heads = cl.headrevs(cl.revs(stop=self.tiprev)) | |
849 | return heads |
|
852 | return heads | |
850 |
|
853 | |||
851 | def _write_header(self, fp) -> None: |
|
854 | def _write_header(self, fp) -> None: | |
852 | cache_keys = { |
|
855 | cache_keys = { | |
853 | b"tip-node": hex(self.tipnode), |
|
856 | b"tip-node": hex(self.tipnode), | |
854 | b"tip-rev": b'%d' % self.tiprev, |
|
857 | b"tip-rev": b'%d' % self.tiprev, | |
855 | } |
|
858 | } | |
856 | if self.key_hashes: |
|
859 | if self.key_hashes: | |
857 | if self.key_hashes[0] is not None: |
|
860 | if self.key_hashes[0] is not None: | |
858 | cache_keys[b"filtered-hash"] = hex(self.key_hashes[0]) |
|
861 | cache_keys[b"filtered-hash"] = hex(self.key_hashes[0]) | |
859 | if self.key_hashes[1] is not None: |
|
862 | if self.key_hashes[1] is not None: | |
860 | cache_keys[b"obsolete-hash"] = hex(self.key_hashes[1]) |
|
863 | cache_keys[b"obsolete-hash"] = hex(self.key_hashes[1]) | |
861 | pieces = (b"%s=%s" % i for i in sorted(cache_keys.items())) |
|
864 | pieces = (b"%s=%s" % i for i in sorted(cache_keys.items())) | |
862 | fp.write(b" ".join(pieces) + b'\n') |
|
865 | fp.write(b" ".join(pieces) + b'\n') | |
863 |
|
866 | |||
864 | def _write_heads(self, repo, fp) -> int: |
|
867 | def _write_heads(self, repo, fp) -> int: | |
865 | """write list of heads to a file |
|
868 | """write list of heads to a file | |
866 |
|
869 | |||
867 | Return the number of heads written.""" |
|
870 | Return the number of heads written.""" | |
868 | nodecount = 0 |
|
871 | nodecount = 0 | |
869 | topo_heads = set(self._get_topo_heads(repo)) |
|
872 | topo_heads = set(self._get_topo_heads(repo)) | |
870 | to_rev = repo.changelog.index.rev |
|
873 | to_rev = repo.changelog.index.rev | |
871 | for label, nodes in sorted(self._entries.items()): |
|
874 | for label, nodes in sorted(self._entries.items()): | |
872 | label = encoding.fromlocal(label) |
|
875 | label = encoding.fromlocal(label) | |
873 | for node in nodes: |
|
876 | for node in nodes: | |
874 | rev = to_rev(node) |
|
877 | rev = to_rev(node) | |
875 | if rev in topo_heads: |
|
878 | if rev in topo_heads: | |
876 | continue |
|
879 | continue | |
877 | if node in self._closednodes: |
|
880 | if node in self._closednodes: | |
878 | state = b'c' |
|
881 | state = b'c' | |
879 | else: |
|
882 | else: | |
880 | state = b'o' |
|
883 | state = b'o' | |
881 | nodecount += 1 |
|
884 | nodecount += 1 | |
882 | fp.write(b"%s %s %s\n" % (hex(node), state, label)) |
|
885 | fp.write(b"%s %s %s\n" % (hex(node), state, label)) | |
883 | return nodecount |
|
886 | return nodecount | |
884 |
|
887 | |||
885 | @classmethod |
|
888 | @classmethod | |
886 | def _load_header(cls, repo, lineiter): |
|
889 | def _load_header(cls, repo, lineiter): | |
887 | header_line = next(lineiter) |
|
890 | header_line = next(lineiter) | |
888 | pieces = header_line.rstrip(b'\n').split(b" ") |
|
891 | pieces = header_line.rstrip(b'\n').split(b" ") | |
889 | cache_keys = dict(p.split(b'=', 1) for p in pieces) |
|
892 | cache_keys = dict(p.split(b'=', 1) for p in pieces) | |
890 |
|
893 | |||
891 | args = {} |
|
894 | args = {} | |
892 | filtered_hash = None |
|
895 | filtered_hash = None | |
893 | obsolete_hash = None |
|
896 | obsolete_hash = None | |
894 | for k, v in cache_keys.items(): |
|
897 | for k, v in cache_keys.items(): | |
895 | if k == b"tip-rev": |
|
898 | if k == b"tip-rev": | |
896 | args["tiprev"] = int(v) |
|
899 | args["tiprev"] = int(v) | |
897 | elif k == b"tip-node": |
|
900 | elif k == b"tip-node": | |
898 | args["tipnode"] = bin(v) |
|
901 | args["tipnode"] = bin(v) | |
899 | elif k == b"filtered-hash": |
|
902 | elif k == b"filtered-hash": | |
900 | filtered_hash = bin(v) |
|
903 | filtered_hash = bin(v) | |
901 | elif k == b"obsolete-hash": |
|
904 | elif k == b"obsolete-hash": | |
902 | obsolete_hash = bin(v) |
|
905 | obsolete_hash = bin(v) | |
903 | else: |
|
906 | else: | |
904 | msg = b"unknown cache key: %r" % k |
|
907 | msg = b"unknown cache key: %r" % k | |
905 | raise ValueError(msg) |
|
908 | raise ValueError(msg) | |
906 | args["key_hashes"] = (filtered_hash, obsolete_hash) |
|
909 | args["key_hashes"] = (filtered_hash, obsolete_hash) | |
907 | return args |
|
910 | return args | |
908 |
|
911 | |||
909 | def _load_heads(self, repo, lineiter): |
|
912 | def _load_heads(self, repo, lineiter): | |
910 | """fully loads the branchcache by reading from the file using the line |
|
913 | """fully loads the branchcache by reading from the file using the line | |
911 | iterator passed""" |
|
914 | iterator passed""" | |
912 | super()._load_heads(repo, lineiter) |
|
915 | super()._load_heads(repo, lineiter) | |
913 | cl = repo.changelog |
|
916 | cl = repo.changelog | |
914 | getbranchinfo = repo.revbranchcache().branchinfo |
|
917 | getbranchinfo = repo.revbranchcache().branchinfo | |
915 | obsrevs = obsolete.getrevs(repo, b'obsolete') |
|
918 | obsrevs = obsolete.getrevs(repo, b'obsolete') | |
916 | to_node = cl.node |
|
919 | to_node = cl.node | |
917 | touched_branch = set() |
|
920 | touched_branch = set() | |
918 | for head in self._get_topo_heads(repo): |
|
921 | for head in self._get_topo_heads(repo): | |
919 | if head in obsrevs: |
|
922 | if head in obsrevs: | |
920 | continue |
|
923 | continue | |
921 | node = to_node(head) |
|
924 | node = to_node(head) | |
922 | branch, closed = getbranchinfo(head) |
|
925 | branch, closed = getbranchinfo(head) | |
923 | self._entries.setdefault(branch, []).append(node) |
|
926 | self._entries.setdefault(branch, []).append(node) | |
924 | if closed: |
|
927 | if closed: | |
925 | self._closednodes.add(node) |
|
928 | self._closednodes.add(node) | |
926 | touched_branch.add(branch) |
|
929 | touched_branch.add(branch) | |
927 | to_rev = cl.index.rev |
|
930 | to_rev = cl.index.rev | |
928 | for branch in touched_branch: |
|
931 | for branch in touched_branch: | |
929 | self._entries[branch].sort(key=to_rev) |
|
932 | self._entries[branch].sort(key=to_rev) | |
930 |
|
933 | |||
931 | def _compute_key_hashes(self, repo) -> Tuple[bytes]: |
|
934 | def _compute_key_hashes(self, repo) -> Tuple[bytes]: | |
932 | """return the cache key hashes that match this repoview state""" |
|
935 | """return the cache key hashes that match this repoview state""" | |
933 | return scmutil.filtered_and_obsolete_hash( |
|
936 | return scmutil.filtered_and_obsolete_hash( | |
934 | repo, |
|
937 | repo, | |
935 | self.tiprev, |
|
938 | self.tiprev, | |
936 | ) |
|
939 | ) | |
937 |
|
940 | |||
938 |
|
941 | |||
939 | class remotebranchcache(_BaseBranchCache): |
|
942 | class remotebranchcache(_BaseBranchCache): | |
940 | """Branchmap info for a remote connection, should not write locally""" |
|
943 | """Branchmap info for a remote connection, should not write locally""" | |
941 |
|
944 | |||
942 | def __init__( |
|
945 | def __init__( | |
943 | self, |
|
946 | self, | |
944 | repo: "localrepo.localrepository", |
|
947 | repo: "localrepo.localrepository", | |
945 | entries: Union[ |
|
948 | entries: Union[ | |
946 | Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] |
|
949 | Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | |
947 | ] = (), |
|
950 | ] = (), | |
948 | closednodes: Optional[Set[bytes]] = None, |
|
951 | closednodes: Optional[Set[bytes]] = None, | |
949 | ) -> None: |
|
952 | ) -> None: | |
950 | super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) |
|
953 | super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) | |
951 |
|
954 | |||
952 |
|
955 | |||
953 | # Revision branch info cache |
|
956 | # Revision branch info cache | |
954 |
|
957 | |||
955 | _rbcversion = b'-v1' |
|
958 | _rbcversion = b'-v1' | |
956 | _rbcnames = b'rbc-names' + _rbcversion |
|
959 | _rbcnames = b'rbc-names' + _rbcversion | |
957 | _rbcrevs = b'rbc-revs' + _rbcversion |
|
960 | _rbcrevs = b'rbc-revs' + _rbcversion | |
958 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] |
|
961 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] | |
959 | _rbcrecfmt = b'>4sI' |
|
962 | _rbcrecfmt = b'>4sI' | |
960 | _rbcrecsize = calcsize(_rbcrecfmt) |
|
963 | _rbcrecsize = calcsize(_rbcrecfmt) | |
961 | _rbcmininc = 64 * _rbcrecsize |
|
964 | _rbcmininc = 64 * _rbcrecsize | |
962 | _rbcnodelen = 4 |
|
965 | _rbcnodelen = 4 | |
963 | _rbcbranchidxmask = 0x7FFFFFFF |
|
966 | _rbcbranchidxmask = 0x7FFFFFFF | |
964 | _rbccloseflag = 0x80000000 |
|
967 | _rbccloseflag = 0x80000000 | |
965 |
|
968 | |||
966 |
|
969 | |||
967 | class rbcrevs: |
|
970 | class rbcrevs: | |
968 | """a byte string consisting of an immutable prefix followed by a mutable suffix""" |
|
971 | """a byte string consisting of an immutable prefix followed by a mutable suffix""" | |
969 |
|
972 | |||
970 | def __init__(self, revs): |
|
973 | def __init__(self, revs): | |
971 | self._prefix = revs |
|
974 | self._prefix = revs | |
972 | self._rest = bytearray() |
|
975 | self._rest = bytearray() | |
973 |
|
976 | |||
974 | def __len__(self): |
|
977 | def __len__(self): | |
975 | return len(self._prefix) + len(self._rest) |
|
978 | return len(self._prefix) + len(self._rest) | |
976 |
|
979 | |||
977 | def unpack_record(self, rbcrevidx): |
|
980 | def unpack_record(self, rbcrevidx): | |
978 | if rbcrevidx < len(self._prefix): |
|
981 | if rbcrevidx < len(self._prefix): | |
979 | return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx) |
|
982 | return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx) | |
980 | else: |
|
983 | else: | |
981 | return unpack_from( |
|
984 | return unpack_from( | |
982 | _rbcrecfmt, |
|
985 | _rbcrecfmt, | |
983 | util.buffer(self._rest), |
|
986 | util.buffer(self._rest), | |
984 | rbcrevidx - len(self._prefix), |
|
987 | rbcrevidx - len(self._prefix), | |
985 | ) |
|
988 | ) | |
986 |
|
989 | |||
987 | def make_mutable(self): |
|
990 | def make_mutable(self): | |
988 | if len(self._prefix) > 0: |
|
991 | if len(self._prefix) > 0: | |
989 | entirety = bytearray() |
|
992 | entirety = bytearray() | |
990 | entirety[:] = self._prefix |
|
993 | entirety[:] = self._prefix | |
991 | entirety.extend(self._rest) |
|
994 | entirety.extend(self._rest) | |
992 | self._rest = entirety |
|
995 | self._rest = entirety | |
993 | self._prefix = bytearray() |
|
996 | self._prefix = bytearray() | |
994 |
|
997 | |||
995 | def truncate(self, pos): |
|
998 | def truncate(self, pos): | |
996 | self.make_mutable() |
|
999 | self.make_mutable() | |
997 | del self._rest[pos:] |
|
1000 | del self._rest[pos:] | |
998 |
|
1001 | |||
999 | def pack_into(self, rbcrevidx, node, branchidx): |
|
1002 | def pack_into(self, rbcrevidx, node, branchidx): | |
1000 | if rbcrevidx < len(self._prefix): |
|
1003 | if rbcrevidx < len(self._prefix): | |
1001 | self.make_mutable() |
|
1004 | self.make_mutable() | |
1002 | buf = self._rest |
|
1005 | buf = self._rest | |
1003 | start_offset = rbcrevidx - len(self._prefix) |
|
1006 | start_offset = rbcrevidx - len(self._prefix) | |
1004 | end_offset = start_offset + _rbcrecsize |
|
1007 | end_offset = start_offset + _rbcrecsize | |
1005 |
|
1008 | |||
1006 | if len(self._rest) < end_offset: |
|
1009 | if len(self._rest) < end_offset: | |
1007 | # bytearray doesn't allocate extra space at least in Python 3.7. |
|
1010 | # bytearray doesn't allocate extra space at least in Python 3.7. | |
1008 | # When multiple changesets are added in a row, precise resize would |
|
1011 | # When multiple changesets are added in a row, precise resize would | |
1009 | # result in quadratic complexity. Overallocate to compensate by |
|
1012 | # result in quadratic complexity. Overallocate to compensate by | |
1010 | # using the classic doubling technique for dynamic arrays instead. |
|
1013 | # using the classic doubling technique for dynamic arrays instead. | |
1011 | # If there was a gap in the map before, less space will be reserved. |
|
1014 | # If there was a gap in the map before, less space will be reserved. | |
1012 | self._rest.extend(b'\0' * end_offset) |
|
1015 | self._rest.extend(b'\0' * end_offset) | |
1013 | return pack_into( |
|
1016 | return pack_into( | |
1014 | _rbcrecfmt, |
|
1017 | _rbcrecfmt, | |
1015 | buf, |
|
1018 | buf, | |
1016 | start_offset, |
|
1019 | start_offset, | |
1017 | node, |
|
1020 | node, | |
1018 | branchidx, |
|
1021 | branchidx, | |
1019 | ) |
|
1022 | ) | |
1020 |
|
1023 | |||
1021 | def extend(self, extension): |
|
1024 | def extend(self, extension): | |
1022 | return self._rest.extend(extension) |
|
1025 | return self._rest.extend(extension) | |
1023 |
|
1026 | |||
1024 | def slice(self, begin, end): |
|
1027 | def slice(self, begin, end): | |
1025 | if begin < len(self._prefix): |
|
1028 | if begin < len(self._prefix): | |
1026 | acc = bytearray() |
|
1029 | acc = bytearray() | |
1027 | acc[:] = self._prefix[begin:end] |
|
1030 | acc[:] = self._prefix[begin:end] | |
1028 | acc.extend( |
|
1031 | acc.extend( | |
1029 | self._rest[begin - len(self._prefix) : end - len(self._prefix)] |
|
1032 | self._rest[begin - len(self._prefix) : end - len(self._prefix)] | |
1030 | ) |
|
1033 | ) | |
1031 | return acc |
|
1034 | return acc | |
1032 | return self._rest[begin - len(self._prefix) : end - len(self._prefix)] |
|
1035 | return self._rest[begin - len(self._prefix) : end - len(self._prefix)] | |
1033 |
|
1036 | |||
1034 |
|
1037 | |||
1035 | class revbranchcache: |
|
1038 | class revbranchcache: | |
1036 | """Persistent cache, mapping from revision number to branch name and close. |
|
1039 | """Persistent cache, mapping from revision number to branch name and close. | |
1037 | This is a low level cache, independent of filtering. |
|
1040 | This is a low level cache, independent of filtering. | |
1038 |
|
1041 | |||
1039 | Branch names are stored in rbc-names in internal encoding separated by 0. |
|
1042 | Branch names are stored in rbc-names in internal encoding separated by 0. | |
1040 | rbc-names is append-only, and each branch name is only stored once and will |
|
1043 | rbc-names is append-only, and each branch name is only stored once and will | |
1041 | thus have a unique index. |
|
1044 | thus have a unique index. | |
1042 |
|
1045 | |||
1043 | The branch info for each revision is stored in rbc-revs as constant size |
|
1046 | The branch info for each revision is stored in rbc-revs as constant size | |
1044 | records. The whole file is read into memory, but it is only 'parsed' on |
|
1047 | records. The whole file is read into memory, but it is only 'parsed' on | |
1045 | demand. The file is usually append-only but will be truncated if repo |
|
1048 | demand. The file is usually append-only but will be truncated if repo | |
1046 | modification is detected. |
|
1049 | modification is detected. | |
1047 | The record for each revision contains the first 4 bytes of the |
|
1050 | The record for each revision contains the first 4 bytes of the | |
1048 | corresponding node hash, and the record is only used if it still matches. |
|
1051 | corresponding node hash, and the record is only used if it still matches. | |
1049 | Even a completely trashed rbc-revs fill thus still give the right result |
|
1052 | Even a completely trashed rbc-revs fill thus still give the right result | |
1050 | while converging towards full recovery ... assuming no incorrectly matching |
|
1053 | while converging towards full recovery ... assuming no incorrectly matching | |
1051 | node hashes. |
|
1054 | node hashes. | |
1052 | The record also contains 4 bytes where 31 bits contains the index of the |
|
1055 | The record also contains 4 bytes where 31 bits contains the index of the | |
1053 | branch and the last bit indicate that it is a branch close commit. |
|
1056 | branch and the last bit indicate that it is a branch close commit. | |
1054 | The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i |
|
1057 | The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i | |
1055 | and will grow with it but be 1/8th of its size. |
|
1058 | and will grow with it but be 1/8th of its size. | |
1056 | """ |
|
1059 | """ | |
1057 |
|
1060 | |||
1058 | def __init__(self, repo, readonly=True): |
|
1061 | def __init__(self, repo, readonly=True): | |
1059 | assert repo.filtername is None |
|
1062 | assert repo.filtername is None | |
1060 | self._repo = repo |
|
1063 | self._repo = repo | |
1061 | self._names = [] # branch names in local encoding with static index |
|
1064 | self._names = [] # branch names in local encoding with static index | |
1062 | self._rbcrevs = rbcrevs(bytearray()) |
|
1065 | self._rbcrevs = rbcrevs(bytearray()) | |
1063 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen |
|
1066 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen | |
1064 | try: |
|
1067 | try: | |
1065 | bndata = repo.cachevfs.read(_rbcnames) |
|
1068 | bndata = repo.cachevfs.read(_rbcnames) | |
1066 | self._rbcsnameslen = len(bndata) # for verification before writing |
|
1069 | self._rbcsnameslen = len(bndata) # for verification before writing | |
1067 | if bndata: |
|
1070 | if bndata: | |
1068 | self._names = [ |
|
1071 | self._names = [ | |
1069 | encoding.tolocal(bn) for bn in bndata.split(b'\0') |
|
1072 | encoding.tolocal(bn) for bn in bndata.split(b'\0') | |
1070 | ] |
|
1073 | ] | |
1071 | except (IOError, OSError): |
|
1074 | except (IOError, OSError): | |
1072 | if readonly: |
|
1075 | if readonly: | |
1073 | # don't try to use cache - fall back to the slow path |
|
1076 | # don't try to use cache - fall back to the slow path | |
1074 | self.branchinfo = self._branchinfo |
|
1077 | self.branchinfo = self._branchinfo | |
1075 |
|
1078 | |||
1076 | if self._names: |
|
1079 | if self._names: | |
1077 | try: |
|
1080 | try: | |
1078 | if repo.ui.configbool(b'storage', b'revbranchcache.mmap'): |
|
1081 | if repo.ui.configbool(b'storage', b'revbranchcache.mmap'): | |
1079 | with repo.cachevfs(_rbcrevs) as fp: |
|
1082 | with repo.cachevfs(_rbcrevs) as fp: | |
1080 | data = util.buffer(util.mmapread(fp)) |
|
1083 | data = util.buffer(util.mmapread(fp)) | |
1081 | else: |
|
1084 | else: | |
1082 | data = repo.cachevfs.read(_rbcrevs) |
|
1085 | data = repo.cachevfs.read(_rbcrevs) | |
1083 | self._rbcrevs = rbcrevs(data) |
|
1086 | self._rbcrevs = rbcrevs(data) | |
1084 | except (IOError, OSError) as inst: |
|
1087 | except (IOError, OSError) as inst: | |
1085 | repo.ui.debug( |
|
1088 | repo.ui.debug( | |
1086 | b"couldn't read revision branch cache: %s\n" |
|
1089 | b"couldn't read revision branch cache: %s\n" | |
1087 | % stringutil.forcebytestr(inst) |
|
1090 | % stringutil.forcebytestr(inst) | |
1088 | ) |
|
1091 | ) | |
1089 | # remember number of good records on disk |
|
1092 | # remember number of good records on disk | |
1090 | self._rbcrevslen = min( |
|
1093 | self._rbcrevslen = min( | |
1091 | len(self._rbcrevs) // _rbcrecsize, len(repo.changelog) |
|
1094 | len(self._rbcrevs) // _rbcrecsize, len(repo.changelog) | |
1092 | ) |
|
1095 | ) | |
1093 | if self._rbcrevslen == 0: |
|
1096 | if self._rbcrevslen == 0: | |
1094 | self._names = [] |
|
1097 | self._names = [] | |
1095 | self._rbcnamescount = len(self._names) # number of names read at |
|
1098 | self._rbcnamescount = len(self._names) # number of names read at | |
1096 | # _rbcsnameslen |
|
1099 | # _rbcsnameslen | |
1097 |
|
1100 | |||
1098 | def _clear(self): |
|
1101 | def _clear(self): | |
1099 | self._rbcsnameslen = 0 |
|
1102 | self._rbcsnameslen = 0 | |
1100 | del self._names[:] |
|
1103 | del self._names[:] | |
1101 | self._rbcnamescount = 0 |
|
1104 | self._rbcnamescount = 0 | |
1102 | self._rbcrevslen = len(self._repo.changelog) |
|
1105 | self._rbcrevslen = len(self._repo.changelog) | |
1103 | self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize)) |
|
1106 | self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize)) | |
1104 | util.clearcachedproperty(self, b'_namesreverse') |
|
1107 | util.clearcachedproperty(self, b'_namesreverse') | |
1105 |
|
1108 | |||
1106 | @util.propertycache |
|
1109 | @util.propertycache | |
1107 | def _namesreverse(self): |
|
1110 | def _namesreverse(self): | |
1108 | return {b: r for r, b in enumerate(self._names)} |
|
1111 | return {b: r for r, b in enumerate(self._names)} | |
1109 |
|
1112 | |||
1110 | def branchinfo(self, rev): |
|
1113 | def branchinfo(self, rev): | |
1111 | """Return branch name and close flag for rev, using and updating |
|
1114 | """Return branch name and close flag for rev, using and updating | |
1112 | persistent cache.""" |
|
1115 | persistent cache.""" | |
1113 | changelog = self._repo.changelog |
|
1116 | changelog = self._repo.changelog | |
1114 | rbcrevidx = rev * _rbcrecsize |
|
1117 | rbcrevidx = rev * _rbcrecsize | |
1115 |
|
1118 | |||
1116 | # avoid negative index, changelog.read(nullrev) is fast without cache |
|
1119 | # avoid negative index, changelog.read(nullrev) is fast without cache | |
1117 | if rev == nullrev: |
|
1120 | if rev == nullrev: | |
1118 | return changelog.branchinfo(rev) |
|
1121 | return changelog.branchinfo(rev) | |
1119 |
|
1122 | |||
1120 | # if requested rev isn't allocated, grow and cache the rev info |
|
1123 | # if requested rev isn't allocated, grow and cache the rev info | |
1121 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: |
|
1124 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | |
1122 | return self._branchinfo(rev) |
|
1125 | return self._branchinfo(rev) | |
1123 |
|
1126 | |||
1124 | # fast path: extract data from cache, use it if node is matching |
|
1127 | # fast path: extract data from cache, use it if node is matching | |
1125 | reponode = changelog.node(rev)[:_rbcnodelen] |
|
1128 | reponode = changelog.node(rev)[:_rbcnodelen] | |
1126 | cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx) |
|
1129 | cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx) | |
1127 | close = bool(branchidx & _rbccloseflag) |
|
1130 | close = bool(branchidx & _rbccloseflag) | |
1128 | if close: |
|
1131 | if close: | |
1129 | branchidx &= _rbcbranchidxmask |
|
1132 | branchidx &= _rbcbranchidxmask | |
1130 | if cachenode == b'\0\0\0\0': |
|
1133 | if cachenode == b'\0\0\0\0': | |
1131 | pass |
|
1134 | pass | |
1132 | elif cachenode == reponode: |
|
1135 | elif cachenode == reponode: | |
1133 | try: |
|
1136 | try: | |
1134 | return self._names[branchidx], close |
|
1137 | return self._names[branchidx], close | |
1135 | except IndexError: |
|
1138 | except IndexError: | |
1136 | # recover from invalid reference to unknown branch |
|
1139 | # recover from invalid reference to unknown branch | |
1137 | self._repo.ui.debug( |
|
1140 | self._repo.ui.debug( | |
1138 | b"referenced branch names not found" |
|
1141 | b"referenced branch names not found" | |
1139 | b" - rebuilding revision branch cache from scratch\n" |
|
1142 | b" - rebuilding revision branch cache from scratch\n" | |
1140 | ) |
|
1143 | ) | |
1141 | self._clear() |
|
1144 | self._clear() | |
1142 | else: |
|
1145 | else: | |
1143 | # rev/node map has changed, invalidate the cache from here up |
|
1146 | # rev/node map has changed, invalidate the cache from here up | |
1144 | self._repo.ui.debug( |
|
1147 | self._repo.ui.debug( | |
1145 | b"history modification detected - truncating " |
|
1148 | b"history modification detected - truncating " | |
1146 | b"revision branch cache to revision %d\n" % rev |
|
1149 | b"revision branch cache to revision %d\n" % rev | |
1147 | ) |
|
1150 | ) | |
1148 | truncate = rbcrevidx + _rbcrecsize |
|
1151 | truncate = rbcrevidx + _rbcrecsize | |
1149 | self._rbcrevs.truncate(truncate) |
|
1152 | self._rbcrevs.truncate(truncate) | |
1150 | self._rbcrevslen = min(self._rbcrevslen, truncate) |
|
1153 | self._rbcrevslen = min(self._rbcrevslen, truncate) | |
1151 |
|
1154 | |||
1152 | # fall back to slow path and make sure it will be written to disk |
|
1155 | # fall back to slow path and make sure it will be written to disk | |
1153 | return self._branchinfo(rev) |
|
1156 | return self._branchinfo(rev) | |
1154 |
|
1157 | |||
1155 | def _branchinfo(self, rev): |
|
1158 | def _branchinfo(self, rev): | |
1156 | """Retrieve branch info from changelog and update _rbcrevs""" |
|
1159 | """Retrieve branch info from changelog and update _rbcrevs""" | |
1157 | changelog = self._repo.changelog |
|
1160 | changelog = self._repo.changelog | |
1158 | b, close = changelog.branchinfo(rev) |
|
1161 | b, close = changelog.branchinfo(rev) | |
1159 | if b in self._namesreverse: |
|
1162 | if b in self._namesreverse: | |
1160 | branchidx = self._namesreverse[b] |
|
1163 | branchidx = self._namesreverse[b] | |
1161 | else: |
|
1164 | else: | |
1162 | branchidx = len(self._names) |
|
1165 | branchidx = len(self._names) | |
1163 | self._names.append(b) |
|
1166 | self._names.append(b) | |
1164 | self._namesreverse[b] = branchidx |
|
1167 | self._namesreverse[b] = branchidx | |
1165 | reponode = changelog.node(rev) |
|
1168 | reponode = changelog.node(rev) | |
1166 | if close: |
|
1169 | if close: | |
1167 | branchidx |= _rbccloseflag |
|
1170 | branchidx |= _rbccloseflag | |
1168 | self._setcachedata(rev, reponode, branchidx) |
|
1171 | self._setcachedata(rev, reponode, branchidx) | |
1169 | return b, close |
|
1172 | return b, close | |
1170 |
|
1173 | |||
1171 | def setdata(self, rev, changelogrevision): |
|
1174 | def setdata(self, rev, changelogrevision): | |
1172 | """add new data information to the cache""" |
|
1175 | """add new data information to the cache""" | |
1173 | branch, close = changelogrevision.branchinfo |
|
1176 | branch, close = changelogrevision.branchinfo | |
1174 |
|
1177 | |||
1175 | if branch in self._namesreverse: |
|
1178 | if branch in self._namesreverse: | |
1176 | branchidx = self._namesreverse[branch] |
|
1179 | branchidx = self._namesreverse[branch] | |
1177 | else: |
|
1180 | else: | |
1178 | branchidx = len(self._names) |
|
1181 | branchidx = len(self._names) | |
1179 | self._names.append(branch) |
|
1182 | self._names.append(branch) | |
1180 | self._namesreverse[branch] = branchidx |
|
1183 | self._namesreverse[branch] = branchidx | |
1181 | if close: |
|
1184 | if close: | |
1182 | branchidx |= _rbccloseflag |
|
1185 | branchidx |= _rbccloseflag | |
1183 | self._setcachedata(rev, self._repo.changelog.node(rev), branchidx) |
|
1186 | self._setcachedata(rev, self._repo.changelog.node(rev), branchidx) | |
1184 | # If no cache data were readable (non exists, bad permission, etc) |
|
1187 | # If no cache data were readable (non exists, bad permission, etc) | |
1185 | # the cache was bypassing itself by setting: |
|
1188 | # the cache was bypassing itself by setting: | |
1186 | # |
|
1189 | # | |
1187 | # self.branchinfo = self._branchinfo |
|
1190 | # self.branchinfo = self._branchinfo | |
1188 | # |
|
1191 | # | |
1189 | # Since we now have data in the cache, we need to drop this bypassing. |
|
1192 | # Since we now have data in the cache, we need to drop this bypassing. | |
1190 | if 'branchinfo' in vars(self): |
|
1193 | if 'branchinfo' in vars(self): | |
1191 | del self.branchinfo |
|
1194 | del self.branchinfo | |
1192 |
|
1195 | |||
1193 | def _setcachedata(self, rev, node, branchidx): |
|
1196 | def _setcachedata(self, rev, node, branchidx): | |
1194 | """Writes the node's branch data to the in-memory cache data.""" |
|
1197 | """Writes the node's branch data to the in-memory cache data.""" | |
1195 | if rev == nullrev: |
|
1198 | if rev == nullrev: | |
1196 | return |
|
1199 | return | |
1197 | rbcrevidx = rev * _rbcrecsize |
|
1200 | rbcrevidx = rev * _rbcrecsize | |
1198 | self._rbcrevs.pack_into(rbcrevidx, node, branchidx) |
|
1201 | self._rbcrevs.pack_into(rbcrevidx, node, branchidx) | |
1199 | self._rbcrevslen = min(self._rbcrevslen, rev) |
|
1202 | self._rbcrevslen = min(self._rbcrevslen, rev) | |
1200 |
|
1203 | |||
1201 | tr = self._repo.currenttransaction() |
|
1204 | tr = self._repo.currenttransaction() | |
1202 | if tr: |
|
1205 | if tr: | |
1203 | tr.addfinalize(b'write-revbranchcache', self.write) |
|
1206 | tr.addfinalize(b'write-revbranchcache', self.write) | |
1204 |
|
1207 | |||
1205 | def write(self, tr=None): |
|
1208 | def write(self, tr=None): | |
1206 | """Save branch cache if it is dirty.""" |
|
1209 | """Save branch cache if it is dirty.""" | |
1207 | repo = self._repo |
|
1210 | repo = self._repo | |
1208 | wlock = None |
|
1211 | wlock = None | |
1209 | step = b'' |
|
1212 | step = b'' | |
1210 | try: |
|
1213 | try: | |
1211 | # write the new names |
|
1214 | # write the new names | |
1212 | if self._rbcnamescount < len(self._names): |
|
1215 | if self._rbcnamescount < len(self._names): | |
1213 | wlock = repo.wlock(wait=False) |
|
1216 | wlock = repo.wlock(wait=False) | |
1214 | step = b' names' |
|
1217 | step = b' names' | |
1215 | self._writenames(repo) |
|
1218 | self._writenames(repo) | |
1216 |
|
1219 | |||
1217 | # write the new revs |
|
1220 | # write the new revs | |
1218 | start = self._rbcrevslen * _rbcrecsize |
|
1221 | start = self._rbcrevslen * _rbcrecsize | |
1219 | if start != len(self._rbcrevs): |
|
1222 | if start != len(self._rbcrevs): | |
1220 | step = b'' |
|
1223 | step = b'' | |
1221 | if wlock is None: |
|
1224 | if wlock is None: | |
1222 | wlock = repo.wlock(wait=False) |
|
1225 | wlock = repo.wlock(wait=False) | |
1223 | self._writerevs(repo, start) |
|
1226 | self._writerevs(repo, start) | |
1224 |
|
1227 | |||
1225 | except (IOError, OSError, error.Abort, error.LockError) as inst: |
|
1228 | except (IOError, OSError, error.Abort, error.LockError) as inst: | |
1226 | repo.ui.debug( |
|
1229 | repo.ui.debug( | |
1227 | b"couldn't write revision branch cache%s: %s\n" |
|
1230 | b"couldn't write revision branch cache%s: %s\n" | |
1228 | % (step, stringutil.forcebytestr(inst)) |
|
1231 | % (step, stringutil.forcebytestr(inst)) | |
1229 | ) |
|
1232 | ) | |
1230 | finally: |
|
1233 | finally: | |
1231 | if wlock is not None: |
|
1234 | if wlock is not None: | |
1232 | wlock.release() |
|
1235 | wlock.release() | |
1233 |
|
1236 | |||
1234 | def _writenames(self, repo): |
|
1237 | def _writenames(self, repo): | |
1235 | """write the new branch names to revbranchcache""" |
|
1238 | """write the new branch names to revbranchcache""" | |
1236 | if self._rbcnamescount != 0: |
|
1239 | if self._rbcnamescount != 0: | |
1237 | f = repo.cachevfs.open(_rbcnames, b'ab') |
|
1240 | f = repo.cachevfs.open(_rbcnames, b'ab') | |
1238 | if f.tell() == self._rbcsnameslen: |
|
1241 | if f.tell() == self._rbcsnameslen: | |
1239 | f.write(b'\0') |
|
1242 | f.write(b'\0') | |
1240 | else: |
|
1243 | else: | |
1241 | f.close() |
|
1244 | f.close() | |
1242 | repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames) |
|
1245 | repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames) | |
1243 | self._rbcnamescount = 0 |
|
1246 | self._rbcnamescount = 0 | |
1244 | self._rbcrevslen = 0 |
|
1247 | self._rbcrevslen = 0 | |
1245 | if self._rbcnamescount == 0: |
|
1248 | if self._rbcnamescount == 0: | |
1246 | # before rewriting names, make sure references are removed |
|
1249 | # before rewriting names, make sure references are removed | |
1247 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) |
|
1250 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) | |
1248 | f = repo.cachevfs.open(_rbcnames, b'wb') |
|
1251 | f = repo.cachevfs.open(_rbcnames, b'wb') | |
1249 | f.write( |
|
1252 | f.write( | |
1250 | b'\0'.join( |
|
1253 | b'\0'.join( | |
1251 | encoding.fromlocal(b) |
|
1254 | encoding.fromlocal(b) | |
1252 | for b in self._names[self._rbcnamescount :] |
|
1255 | for b in self._names[self._rbcnamescount :] | |
1253 | ) |
|
1256 | ) | |
1254 | ) |
|
1257 | ) | |
1255 | self._rbcsnameslen = f.tell() |
|
1258 | self._rbcsnameslen = f.tell() | |
1256 | f.close() |
|
1259 | f.close() | |
1257 | self._rbcnamescount = len(self._names) |
|
1260 | self._rbcnamescount = len(self._names) | |
1258 |
|
1261 | |||
1259 | def _writerevs(self, repo, start): |
|
1262 | def _writerevs(self, repo, start): | |
1260 | """write the new revs to revbranchcache""" |
|
1263 | """write the new revs to revbranchcache""" | |
1261 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) |
|
1264 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) | |
1262 | with repo.cachevfs.open(_rbcrevs, b'ab') as f: |
|
1265 | with repo.cachevfs.open(_rbcrevs, b'ab') as f: | |
1263 | if f.tell() != start: |
|
1266 | if f.tell() != start: | |
1264 | repo.ui.debug( |
|
1267 | repo.ui.debug( | |
1265 | b"truncating cache/%s to %d\n" % (_rbcrevs, start) |
|
1268 | b"truncating cache/%s to %d\n" % (_rbcrevs, start) | |
1266 | ) |
|
1269 | ) | |
1267 | f.seek(start) |
|
1270 | f.seek(start) | |
1268 | if f.tell() != start: |
|
1271 | if f.tell() != start: | |
1269 | start = 0 |
|
1272 | start = 0 | |
1270 | f.seek(start) |
|
1273 | f.seek(start) | |
1271 | f.truncate() |
|
1274 | f.truncate() | |
1272 | end = revs * _rbcrecsize |
|
1275 | end = revs * _rbcrecsize | |
1273 | f.write(self._rbcrevs.slice(start, end)) |
|
1276 | f.write(self._rbcrevs.slice(start, end)) | |
1274 | self._rbcrevslen = revs |
|
1277 | self._rbcrevslen = revs |
General Comments 0
You need to be logged in to leave comments.
Login now